text stringlengths 8 4.13M |
|---|
use std::collections::HashMap;
use std::collections::HashSet;
use incrementalmerkletree::*;
use pedersen::PedersenDigest;
use base::*;
use c2p::*;
use p2c::*;
use std::collections::VecDeque;
use convert::*;
#[derive(Clone)]
pub struct SenderProof {
pub proof: String,
//hb:([u64;4],[u64;4]),
pub coin: String,
pub delt_ba: String,
pub enc: String,
pub block_number: u64,
}
#[derive(Clone)]
pub struct ReceiverProof{
pub proof: String,
pub nullifier: String,
pub root: String,
pub delt_ba: String
}
pub struct PrivacyContract {
balances: HashMap<String, String>,
last_spent: HashMap<String, u64>,
coins: HashSet<String>,
nullifier_set: HashSet<String>,
tree: IncrementalMerkleTree<PedersenDigest>,
}
impl PrivacyContract {
pub fn new() -> Self {
PrivacyContract {
balances: HashMap::new(),
last_spent: HashMap::new(),
coins: HashSet::new(),
nullifier_set: HashSet::new(),
tree: IncrementalMerkleTree::new(60 as usize),
}
}
pub fn set_banlance(&mut self, address: String, balance: String) {
self.balances.insert(address, balance);
}
pub fn get_banlance(&mut self, address: String) -> String {
self.balances.get(&address).unwrap().clone()
}
pub fn send_verify(&mut self, address: String, message: SenderProof) -> (bool, Option<MerklePath<PedersenDigest>>) {
if self.coins.contains(&message.coin) {
println!("Dup coin");
return (false, None);
}
// compare block number
if let Some(block_number) = self.last_spent.get_mut(&address) {
if *block_number >= message.block_number {
println!("invalid block number");
return (false, None);
}
}
let balance = self.balances.get_mut(&address).unwrap();
assert!(p2c_verify(balance.clone(),message.coin.clone(),message.delt_ba.clone(),message.enc,address.clone(),message.proof).unwrap());
self.last_spent.insert(address, message.block_number);
self.coins.insert(message.coin.clone());
self.tree.append(PedersenDigest(str2u644(message.coin.clone())));
*balance = ecc_sub(balance.clone(), message.delt_ba);
println!("sender proof verify ok! root {:?} coin {:?}", self.tree.root(), message.coin);
(true, Some(self.tree.path(VecDeque::new())))
}
pub fn receive_verify(&mut self, address: String, message: ReceiverProof) -> bool {
if str2u644(message.root.clone()) != self.tree.root().0 {
println!("invalid root, message.root {:?}, tree.root {:?}", message.root, self.tree.root());
return false;
}
if self.nullifier_set.contains(&message.nullifier) {
println!("Dup nullifier");
return false;
}
assert!(c2p_verify(message.nullifier.clone(),message.root,message.delt_ba.clone(),message.proof).unwrap());
self.nullifier_set.insert(message.nullifier);
let balance = self.balances.get_mut(&address).unwrap();
*balance = ecc_add(balance.clone(), message.delt_ba);
true
}
} |
use glam::Vec2;
use legion::prelude::Entity;
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Moving {
pub base_speed: f32,
pub target: MoveTarget,
}
#[derive(Clone, Copy, Debug, PartialEq)]
#[allow(dead_code)]
pub enum MoveTarget {
None,
Location(Vec2),
Entity(Entity),
}
|
struct Point {
x: i32,
y: i32,
}
fn main() {
let p = Point { x: 0, y: 7 };
// A way of destructuring. The names of the fields must match the names of the fields in the
// struct
let Point { x, y } = p;
println!("{}", x);
println!("{}", y);
let Point { x: alter_x, y: alter_y } = p;
println!("{}", alter_x);
println!("{}", alter_y);
}
|
// This file is part of syslog2. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/syslog2/master/COPYRIGHT. No part of syslog2, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2016 The developers of syslog2. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/syslog2/master/COPYRIGHT.
extern crate libc;
use Priority;
use Severity;
use super::syslogSenders::Rfc3164Facility;
use self::libc::c_int;
/// Fill in for lack of this value in Android bionic's libc
/// On Windows, create a default for this value that matches Linux
#[cfg(any(target_os = "android", target_os = "windows"))] pub const LOG_NFACILITIES: c_int = 24;
#[cfg(not(any(target_os = "android", target_os = "windows")))] pub const LOG_NFACILITIES: c_int = self::libc::LOG_NFACILITIES;
/// `LOG_AUTHPRIV` and `LOG_FTP` are not available on Solaris for local logging
/// `LOG_NTP`, `LOG_SECURITY` and `LOG_CONSOLE` are only available on FreeBSD and DragonFlyBSD for local logging
/// `LOG_NETINFO`, `LOG_REMOTEAUTH`, `LOG_INSTALL`, `LOG_RAS` and `LOG_LAUNCHD` are only available on Mac OS X for local logging
/// `LOG_CRON` differs in value on Solaris, and _should not_ be used to send syslog2 messages on the wire
#[allow(non_camel_case_types)]
#[derive(Debug, Copy, Clone)]
#[repr(i32)] // We'd like to use c_int here, but the compiler won't let us
#[cfg(not(target_os = "windows"))]
pub enum Facility
{
/// Do not use this value in calls to syslog2, as it is ignored
/// If you really, really want to log as the kernel, use this value with openlog and then omit a facility whilst logging, eg
/// use the Severity-only variants of the API
LOG_KERN = self::libc::LOG_KERN,
LOG_USER = self::libc::LOG_USER,
LOG_MAIL = self::libc::LOG_MAIL,
LOG_DAEMON = self::libc::LOG_DAEMON,
LOG_AUTH = self::libc::LOG_AUTH,
LOG_SYSLOG = self::libc::LOG_SYSLOG,
LOG_LPR = self::libc::LOG_LPR,
LOG_NEWS = self::libc::LOG_NEWS,
LOG_UUCP = self::libc::LOG_UUCP,
LOG_CRON = self::libc::LOG_CRON,
#[cfg(not(target_os = "solaris"))] LOG_AUTHPRIV = self::libc::LOG_AUTHPRIV,
#[cfg(not(target_os = "solaris"))] LOG_FTP = self::libc::LOG_FTP,
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] LOG_NTP = self::libc::LOG_NTP,
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] LOG_SECURITY = self::libc::LOG_SECURITY,
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] LOG_CONSOLE = self::libc::LOG_CONSOLE,
#[cfg(target_os = "macos")] LOG_NETINFO = self::libc::LOG_NETINFO,
#[cfg(target_os = "macos")] LOG_REMOTEAUTH = self::libc::LOG_REMOTEAUTH,
#[cfg(target_os = "macos")] LOG_INSTALL = self::libc::LOG_INSTALL,
#[cfg(target_os = "macos")] LOG_RAS = self::libc::LOG_RAS,
LOG_LOCAL0 = self::libc::LOG_LOCAL0,
LOG_LOCAL1 = self::libc::LOG_LOCAL1,
LOG_LOCAL2 = self::libc::LOG_LOCAL2,
LOG_LOCAL3 = self::libc::LOG_LOCAL3,
LOG_LOCAL4 = self::libc::LOG_LOCAL4,
LOG_LOCAL5 = self::libc::LOG_LOCAL5,
LOG_LOCAL6 = self::libc::LOG_LOCAL6,
LOG_LOCAL7 = self::libc::LOG_LOCAL7,
#[cfg(target_os = "macos")] LOG_LAUNCHD = self::libc::LOG_LAUNCHD,
}
/// These values are 'fakes' to allow some measure of syslog2 compatibility on Windows
/// Values match those used on Linux
#[allow(non_camel_case_types)]
#[derive(Debug, Copy, Clone)]
#[repr(i32)] // We'd like to use c_int here, but the compiler won't let us
#[cfg(target_os = "windows")]
pub enum Facility
{
LOG_KERN = 0,
LOG_USER = 1 << 3,
LOG_MAIL = 2 << 3,
LOG_DAEMON = 3 << 3,
LOG_AUTH = 4 << 3,
LOG_SYSLOG = 5 << 3,
LOG_LPR = 6 << 3,
LOG_NEWS = 7 << 3,
LOG_UUCP = 8 << 3,
LOG_CRON = 9 << 3,
LOG_AUTHPRIV = 10 << 3,
LOG_FTP = 11 << 3,
LOG_LOCAL0 = 16 << 3,
LOG_LOCAL1 = 17 << 3,
LOG_LOCAL2 = 18 << 3,
LOG_LOCAL3 = 19 << 3,
LOG_LOCAL4 = 20 << 3,
LOG_LOCAL5 = 21 << 3,
LOG_LOCAL6 = 22 << 3,
LOG_LOCAL7 = 23 << 3,
}
impl Facility
{
#[inline(always)]
pub fn toPriority(self, severity: Severity) -> Priority
{
severity.toPriority(self)
}
/// Returns `LOG_AUTHPRIV` except on Solaris, where it returns `LOG_AUTH`
#[inline(always)]
#[cfg(not(target_os = "solaris"))]
pub fn bestAuthenticationFacilityForPlatform() -> Facility
{
Facility::LOG_AUTHPRIV
}
/// Returns `LOG_AUTHPRIV` except on Solaris, where it returns `LOG_AUTH`
#[inline(always)]
#[cfg(target_os = "solaris")]
pub fn bestAuthenticationFacilityForPlatform() -> Facility
{
Facility::LOG_AUTH
}
/// Will not match 1:1, as we will not (ab)use effectively private use RFC 3164 facility codes (12 - 15 inclusive and greater than 23)
/// Solaris cron is mapped to clock (11), not cron (15), for maximum compatibility
/// FreeBSD and DragonFlyBSD LOG_SECURITY and LOG_CONSOLE are mapped to authpriv, for maximum compatibility and secure handling
/// FreeBSD and DragonFlyBSD LOG_NTP is mapped to daemon
/// Mac OS X LOG_NETINFO is mapped to daemon
/// Mac OS X LOG_REMOTEAUTH is mapped to authpriv
/// Mac OS X LOG_INSTALL and LOG_RAS are mapped to authpriv (as they would seem to leak private or privleged information)
/// Mac OS X LOG_LAUNCHD is mapped to daemon
pub fn toRfc3164Facility(self) -> Rfc3164Facility
{
match self
{
Facility::LOG_KERN => Rfc3164Facility::kern,
Facility::LOG_USER => Rfc3164Facility::user,
Facility::LOG_MAIL => Rfc3164Facility::mail,
Facility::LOG_DAEMON => Rfc3164Facility::daemon,
Facility::LOG_AUTH => Rfc3164Facility::auth,
Facility::LOG_SYSLOG => Rfc3164Facility::syslog2,
Facility::LOG_LPR => Rfc3164Facility::lpr,
Facility::LOG_NEWS => Rfc3164Facility::news,
Facility::LOG_UUCP => Rfc3164Facility::news,
Facility::LOG_CRON => Rfc3164Facility::clock,
#[cfg(not(target_os = "solaris"))] Facility::LOG_AUTHPRIV => Rfc3164Facility::authpriv,
#[cfg(not(target_os = "solaris"))] Facility::LOG_FTP => Rfc3164Facility::ftp,
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] Facility::LOG_NTP => Rfc3164Facility::daemon,
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] Facility::LOG_SECURITY => Rfc3164Facility::authpriv,
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] Facility::LOG_CONSOLE => Rfc3164Facility::authpriv,
#[cfg(target_os = "macos")] Facility::LOG_NETINFO => Rfc3164Facility::daemon,
#[cfg(target_os = "macos")] Facility::LOG_REMOTEAUTH => Rfc3164Facility::authpriv,
#[cfg(target_os = "macos")] Facility::LOG_INSTALL => Rfc3164Facility::authpriv,
#[cfg(target_os = "macos")] Facility::LOG_RAS => Rfc3164Facility::authpriv,
Facility::LOG_LOCAL0 => Rfc3164Facility::local0,
Facility::LOG_LOCAL1 => Rfc3164Facility::local1,
Facility::LOG_LOCAL2 => Rfc3164Facility::local2,
Facility::LOG_LOCAL3 => Rfc3164Facility::local3,
Facility::LOG_LOCAL4 => Rfc3164Facility::local4,
Facility::LOG_LOCAL5 => Rfc3164Facility::local5,
Facility::LOG_LOCAL6 => Rfc3164Facility::local6,
Facility::LOG_LOCAL7 => Rfc3164Facility::local7,
#[cfg(target_os = "macos")] Facility::LOG_LAUNCHD => Rfc3164Facility::daemon,
}
}
}
impl Default for Facility
{
/// Defaults to `LOG_USER`, as used in Musl libc
#[inline(always)]
fn default() -> Facility
{
Facility::LOG_USER
}
}
|
use std::collections::BTreeMap;
use model::business::Ticker;
/// Records of all stocks values
pub struct Stocks {
values : BTreeMap<String, Vec<f32>>
}
impl Stocks {
pub fn new() -> Stocks {
Stocks { values: BTreeMap::new() }
}
pub fn push(&mut self, ticker: &Ticker, value: f32) {
if let Some(v) = self.values.get_mut(ticker) {
v.push(value);
return
}
{
self.values.insert(ticker.to_string(), vec!(value));
}
}
pub fn get(&self, ticker: &Ticker, tick: usize) -> f32 {
self.values.get(ticker).unwrap()[tick]
}
pub fn get_all(&self, ticker: &Ticker) -> Vec<f32> {
self.values.get(ticker).unwrap().clone()
}
}
|
pub trait TimeFormatter {
/// Converts an integer representing nanoseconds to a human-readable string.
fn format_as_time(&self) -> String;
}
const MICROSECOND: u64 = 1_000;
const MILLISECOND: u64 = MICROSECOND * 1_000;
const SECOND: u64 = MILLISECOND * 1_000;
const MINUTE: u64 = SECOND * 60;
#[allow(clippy::cast_precision_loss)]
#[allow(clippy::non_ascii_literal)]
impl TimeFormatter for u64 {
fn format_as_time(&self) -> String {
// minutes
if *self >= MINUTE {
format!(
"{} min {}",
*self / MINUTE, // integer portion
(*self % MINUTE).format_as_time() // fractional portion
)
}
// seconds
else if *self >= SECOND {
format!("{:.3} sec", (*self as f64) / SECOND as f64)
}
// milliseconds
else if *self >= MILLISECOND {
format!("{:.3} ms", (*self as f64) / MILLISECOND as f64)
}
// microseconds
else if *self >= MICROSECOND {
format!("{:.3} μs", (*self as f64) / MICROSECOND as f64)
}
// nanoseconds
else {
format!("{} ns", *self)
}
}
}
#[cfg(test)]
#[allow(clippy::non_ascii_literal)]
mod time_formatter_tests {
use super::{TimeFormatter, MICROSECOND, MILLISECOND, MINUTE, SECOND};
#[test]
fn time_formatter() {
// nanoseconds
let mut span: u64 = 123;
assert_eq!(span.format_as_time(), "123 ns");
// microseconds
span = MICROSECOND + 234;
assert_eq!(span.format_as_time(), "1.234 μs");
// milliseconds
span = 7 * MILLISECOND + 654 * MICROSECOND;
assert_eq!(span.format_as_time(), "7.654 ms");
// seconds
span = 9 * SECOND + 877 * MILLISECOND;
assert_eq!(span.format_as_time(), "9.877 sec");
// minutes
span = 2 * MINUTE + 15 * SECOND + 543 * MILLISECOND;
assert_eq!(span.format_as_time(), "2 min 15.543 sec");
}
}
|
use prelude::*;
struct ReactCommand;
impl Command for ReactCommand {
fn execute(&self, _ctx: &mut Context, msg: &Message, _args: Args) -> CommandResult {
let _ = msg.react("1⃣");
let _ = msg.react("2⃣");
let _ = msg.react("3⃣");
Ok(())
}
}
pub fn register_command(sf: StandardFramework) -> StandardFramework {
sf.cmd("react", ReactCommand)
}
|
// Copyright 2020. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::transaction_service::{
error::{TransactionServiceError, TransactionServiceProtocolError},
handle::TransactionEvent,
service::TransactionServiceResources,
storage::database::{TransactionBackend, TransactionStatus},
};
use futures::{channel::mpsc::Receiver, FutureExt, StreamExt};
use log::*;
use std::{convert::TryFrom, sync::Arc, time::Duration};
use tari_comms::types::CommsPublicKey;
use tari_comms_dht::{domain_message::OutboundDomainMessage, outbound::OutboundEncryption};
use tari_core::{
base_node::proto::{
base_node as BaseNodeProto,
base_node::{
base_node_service_request::Request as BaseNodeRequestProto,
base_node_service_response::Response as BaseNodeResponseProto,
},
},
mempool::{
proto::mempool as MempoolProto,
service::{MempoolResponse, MempoolServiceResponse},
TxStorageResponse,
},
transactions::transaction::TransactionOutput,
};
use tari_crypto::tari_utilities::{hex::Hex, Hashable};
use tari_p2p::tari_message::TariMessageType;
use tokio::time::delay_for;
const LOG_TARGET: &str = "wallet::transaction_service::protocols::broadcast_protocol";
/// This protocol defines the process of monitoring a mempool and base node to detect when a Completed transaction is
/// Broadcast to the mempool or potentially Mined
pub struct TransactionBroadcastProtocol<TBackend>
where TBackend: TransactionBackend + Clone + 'static
{
id: u64,
resources: TransactionServiceResources<TBackend>,
timeout: Duration,
base_node_public_key: CommsPublicKey,
mempool_response_receiver: Option<Receiver<MempoolServiceResponse>>,
base_node_response_receiver: Option<Receiver<BaseNodeProto::BaseNodeServiceResponse>>,
}
impl<TBackend> TransactionBroadcastProtocol<TBackend>
where TBackend: TransactionBackend + Clone + 'static
{
pub fn new(
id: u64,
resources: TransactionServiceResources<TBackend>,
timeout: Duration,
base_node_public_key: CommsPublicKey,
mempool_response_receiver: Receiver<MempoolServiceResponse>,
base_node_response_receiver: Receiver<BaseNodeProto::BaseNodeServiceResponse>,
) -> Self
{
Self {
id,
resources,
timeout,
base_node_public_key,
mempool_response_receiver: Some(mempool_response_receiver),
base_node_response_receiver: Some(base_node_response_receiver),
}
}
/// The task that defines the execution of the protocol.
pub async fn execute(mut self) -> Result<u64, TransactionServiceProtocolError> {
let mut mempool_response_receiver = self
.mempool_response_receiver
.take()
.ok_or_else(|| TransactionServiceProtocolError::new(self.id, TransactionServiceError::InvalidStateError))?;
let mut base_node_response_receiver = self
.base_node_response_receiver
.take()
.ok_or_else(|| TransactionServiceProtocolError::new(self.id, TransactionServiceError::InvalidStateError))?;
// This is the main loop of the protocol and following the following steps
// 1) Check transaction being monitored is still in the Completed state and needs to be monitored
// 2) Send a MempoolRequest::SubmitTransaction to Mempool and a Mined? Request to base node
// 3) Wait for a either a Mempool response, Base Node response for the correct Id OR a Timeout
// a) A Mempool response for this Id is received > update the Tx status and end the protocol
// b) A Basenode response for this Id is received showing it is mined > Update Tx status and end protocol
// c) Timeout is reached > Start again
loop {
let completed_tx = match self.resources.db.get_completed_transaction(self.id).await {
Ok(tx) => tx,
Err(e) => {
error!(
target: LOG_TARGET,
"Cannot find Completed Transaction (TxId: {}) referred to by this Broadcast protocol: {:?}",
self.id,
e
);
return Err(TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::TransactionDoesNotExistError,
));
},
};
if completed_tx.status != TransactionStatus::Completed {
debug!(
target: LOG_TARGET,
"Transaction (TxId: {}) no longer in Completed state and will stop being broadcast", self.id
);
return Ok(self.id);
}
info!(
target: LOG_TARGET,
"Attempting to Broadcast Transaction (TxId: {} and Kernel Signature: {}) to Mempool",
self.id,
completed_tx.transaction.body.kernels()[0]
.excess_sig
.get_signature()
.to_hex()
);
trace!(target: LOG_TARGET, "{}", completed_tx.transaction);
// Send Mempool Request
let mempool_request = MempoolProto::MempoolServiceRequest {
request_key: completed_tx.tx_id,
request: Some(MempoolProto::mempool_service_request::Request::SubmitTransaction(
completed_tx.transaction.clone().into(),
)),
};
self.resources
.outbound_message_service
.send_direct(
self.base_node_public_key.clone(),
OutboundEncryption::None,
OutboundDomainMessage::new(TariMessageType::MempoolRequest, mempool_request.clone()),
)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
// Send Base Node query
let mut hashes = Vec::new();
for o in completed_tx.transaction.body.outputs() {
hashes.push(o.hash());
}
let request = BaseNodeRequestProto::FetchUtxos(BaseNodeProto::HashOutputs { outputs: hashes });
let service_request = BaseNodeProto::BaseNodeServiceRequest {
request_key: self.id,
request: Some(request),
};
self.resources
.outbound_message_service
.send_direct(
self.base_node_public_key.clone(),
OutboundEncryption::None,
OutboundDomainMessage::new(TariMessageType::BaseNodeRequest, service_request),
)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
let mut delay = delay_for(self.timeout).fuse();
futures::select! {
mempool_response = mempool_response_receiver.select_next_some() => {
if self.handle_mempool_response(mempool_response).await? {
break;
}
},
base_node_response = base_node_response_receiver.select_next_some() => {
if self.handle_base_node_response(base_node_response).await? {
break;
}
},
() = delay => {
},
}
info!(
target: LOG_TARGET,
"Mempool broadcast timed out for Transaction with TX_ID: {}", self.id
);
let _ = self
.resources
.event_publisher
.send(Arc::new(TransactionEvent::MempoolBroadcastTimedOut(self.id)))
.map_err(|e| {
trace!(
target: LOG_TARGET,
"Error sending event, usually because there are no subscribers: {:?}",
e
);
e
});
}
Ok(self.id)
}
async fn handle_mempool_response(
&mut self,
response: MempoolServiceResponse,
) -> Result<bool, TransactionServiceProtocolError>
{
if response.request_key != self.id {
trace!(
target: LOG_TARGET,
"Mempool response key does not match this Broadcast Protocol Id"
);
return Ok(false);
}
// Handle a receive Mempool Response
match response.response {
MempoolResponse::Stats(_) => {
error!(target: LOG_TARGET, "Invalid Mempool response variant");
},
MempoolResponse::State(_) => {
error!(target: LOG_TARGET, "Invalid Mempool response variant");
},
MempoolResponse::TxStorage(ts) => {
let completed_tx = match self
.resources
.db
.get_completed_transaction(response.request_key.clone())
.await
{
Ok(tx) => tx,
Err(e) => {
error!(
target: LOG_TARGET,
"Cannot find Completed Transaction (TxId: {}) referred to by this Broadcast protocol: {:?}",
self.id,
e
);
return Err(TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::TransactionDoesNotExistError,
));
},
};
match completed_tx.status {
TransactionStatus::Completed => match ts {
// Getting this response means the Mempool Rejected this transaction so it will be
// cancelled.
TxStorageResponse::NotStored => {
error!(
target: LOG_TARGET,
"Mempool response received for TxId: {:?}. Transaction was REJECTED. Cancelling \
transaction.",
self.id
);
if let Err(e) = self
.resources
.output_manager_service
.cancel_transaction(completed_tx.tx_id)
.await
{
error!(
target: LOG_TARGET,
"Failed to Cancel outputs for TX_ID: {} after failed sending attempt with error \
{:?}",
completed_tx.tx_id,
e
);
}
if let Err(e) = self.resources.db.cancel_completed_transaction(completed_tx.tx_id).await {
error!(
target: LOG_TARGET,
"Failed to Cancel TX_ID: {} after failed sending attempt with error {:?}",
completed_tx.tx_id,
e
);
}
let _ = self
.resources
.event_publisher
.send(Arc::new(TransactionEvent::TransactionCancelled(self.id)))
.map_err(|e| {
trace!(
target: LOG_TARGET,
"Error sending event, usually because there are no subscribers: {:?}",
e
);
e
});
return Err(TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::MempoolRejection,
));
},
// Any other variant of this enum means the transaction has been received by the
// base_node and is in one of the various mempools
_ => {
// If this transaction is still in the Completed State it should be upgraded to the
// Broadcast state
info!(
target: LOG_TARGET,
"Completed Transaction (TxId: {} and Kernel Excess Sig: {}) detected as Broadcast to \
Base Node Mempool in {:?}",
self.id,
completed_tx.transaction.body.kernels()[0]
.excess_sig
.get_signature()
.to_hex(),
ts
);
self.resources
.db
.broadcast_completed_transaction(self.id)
.await
.map_err(|e| {
TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e))
})?;
let _ = self
.resources
.event_publisher
.send(Arc::new(TransactionEvent::TransactionBroadcast(self.id)))
.map_err(|e| {
trace!(
target: LOG_TARGET,
"Error sending event, usually because there are no subscribers: {:?}",
e
);
e
});
return Ok(true);
},
},
_ => (),
}
},
}
Ok(false)
}
async fn handle_base_node_response(
&mut self,
response: BaseNodeProto::BaseNodeServiceResponse,
) -> Result<bool, TransactionServiceProtocolError>
{
if response.request_key != self.id {
trace!(
target: LOG_TARGET,
"Base Node response key does not match this Broadcast Protocol Id"
);
return Ok(false);
}
let response: Vec<tari_core::transactions::proto::types::TransactionOutput> = match response.response {
Some(BaseNodeResponseProto::TransactionOutputs(outputs)) => outputs.outputs,
_ => {
return Ok(false);
},
};
let completed_tx = match self.resources.db.get_completed_transaction(self.id).await {
Ok(tx) => tx,
Err(_) => {
error!(
target: LOG_TARGET,
"Cannot find Completed Transaction (TxId: {}) referred to by this Broadcast protocol", self.id
);
return Err(TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::TransactionDoesNotExistError,
));
},
};
if !response.is_empty() &&
(completed_tx.status == TransactionStatus::Broadcast ||
completed_tx.status == TransactionStatus::Completed)
{
let mut check = true;
for output in response.iter() {
let transaction_output = TransactionOutput::try_from(output.clone()).map_err(|_| {
TransactionServiceProtocolError::new(
self.id,
TransactionServiceError::ConversionError("Could not convert Transaction Output".to_string()),
)
})?;
check = check &&
completed_tx
.transaction
.body
.outputs()
.iter()
.any(|item| item == &transaction_output);
}
// If all outputs are present then mark this transaction as mined.
if check && !response.is_empty() {
self.resources
.output_manager_service
.confirm_transaction(
self.id,
completed_tx.transaction.body.inputs().clone(),
completed_tx.transaction.body.outputs().clone(),
)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
self.resources
.db
.mine_completed_transaction(self.id)
.await
.map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?;
let _ = self
.resources
.event_publisher
.send(Arc::new(TransactionEvent::TransactionMined(self.id)))
.map_err(|e| {
trace!(
target: LOG_TARGET,
"Error sending event, usually because there are no subscribers: {:?}",
e
);
e
});
info!(
target: LOG_TARGET,
"Transaction (TxId: {:?}) detected as mined on the Base Layer", self.id
);
return Ok(true);
}
}
Ok(false)
}
}
|
fn main(){
// hello world.
println!("Hello World");
// print format.
println!("Today is {month}/{day}",
month=4.to_string(), day=22.to_string() );
}
|
#![doc(html_root_url = "https://docs.rs/tokio-sync/0.1.6")]
#![deny(missing_debug_implementations, missing_docs, unreachable_pub)]
#![cfg_attr(test, deny(warnings))]
//! Asynchronous synchronization primitives.
//!
//! This crate provides primitives for synchronizing asynchronous tasks.
extern crate fnv;
#[macro_use]
extern crate futures;
macro_rules! debug {
($($t:tt)*) => {
if false {
println!($($t)*);
}
}
}
macro_rules! if_fuzz {
($($t:tt)*) => {{
if false { $($t)* }
}}
}
pub mod lock;
mod loom;
pub mod mpsc;
pub mod oneshot;
pub mod semaphore;
pub mod task;
pub mod watch;
|
use rocket::response::Response;
use rocket_contrib::json::Json;
#[derive(Deserialize)]
pub struct NewChallengeRequest{
title: String,
topic: String,
image: String,
description: String,
}
#[post("/new_challenge", data = "<request>")]
pub fn new_Challenge(request: Json<NewChallengeRequest>, con: MainDbCon,
) -> Response {
con.0.query("INSERT INTO Challenge(title, topic, description, picture)
values($1, $2, $3, $4)",
&[&request.title], $[$request.topic], $[$request.description], $[$request.image],
);
let mut res = Response::new();
res.set_status(Status::new(204, "challenge added"));
res
}
#[derive(Deserialize)]
pub struct ChangeChallengeRequest{
id: i32,
title: String,
topic: String,
image: String,
description: String,
}
#[post("/change_challenge", data = "<request>")]
pub fn change_Challenge(request: Json<ChangeChallengeRequest>, con: MainDbCon,
) -> Response {
con.0.query("UPDATE Challenge SET title = $1 AND topic = $2 AND description = $3 AND picture = $4
WHERE id = $5",
&[&request.title], $[$request.topic], $[$request.description], $[$request.image], $[$request.id],
);
let mut res = Response::new();
res.set_status(Status::new(204, "challenge changed"));
res
}
#[derive(Deserialize)]
pub struct DeleteChallengeRequest{
id: i32,
}
#[post("/delete_challenge", data = "<request>")]
pub fn delete_Challenge(request: Json<DeleteChallengeRequest>, con: MainDbCon,
) -> Response {
con.0.query("DELETE FROM Challenge
WHERE id = $1",
$[$request.id],
);
let mut res = Response::new();
res.set_status(Status::new(204, "challenge deleted"));
res
} |
use std::cmp::Reverse;
use std::cmp::{max, min};
use std::collections::{BinaryHeap, HashMap, HashSet};
use itertools::Itertools;
use whiteread::parse_line;
const ten97: usize = 1000000007;
fn alphabet2idx(c: char) -> usize {
if c.is_ascii_lowercase() {
c as u8 as usize - 'a' as u8 as usize
} else if c.is_ascii_uppercase() {
c as u8 as usize - 'A' as u8 as usize
} else {
panic!("wtf")
}
}
// Euler tour
// オイラーツアーというらしい
fn main() {
let n: usize = parse_line().unwrap();
let mut paths: Vec<BinaryHeap<Reverse<usize>>> = vec![BinaryHeap::new(); n + 1];
for _ in 0..n - 1 {
let (a, b): (usize, usize) = parse_line().unwrap();
paths[a].push(Reverse(b));
paths[b].push(Reverse(a));
}
let mut pre = vec![0; n + 1];
let mut already = vec![false; n + 1];
already[1] = true;
let mut michisuji = vec![1];
let mut now = 1;
print!("1");
loop {
if let Some(next) = paths[now].pop() {
if !already[next.0] {
pre[next.0] = now;
now = next.0;
already[now] = true;
michisuji.push(now);
continue;
}
} else {
if now == 1 {
break;
} else {
now = pre[now];
michisuji.push(now);
}
}
}
for i in michisuji.iter().skip(1) {
print!(" {}", i);
}
println!("");
}
|
use crate::H256;
#[derive(Debug, Clone)]
pub enum Error {
MissingKey(H256),
}
pub type Result<T> = ::std::result::Result<T, Error>;
|
// See LICENSE file for copyright and license details.
use std::f32::consts::{PI, FRAC_PI_2};
use std::num::{pow, abs};
use cgmath::{Vector2, Vector3, Vector};
use core::types::{MInt, MapPos};
use core::misc::{rad_to_deg};
use visualizer::types::{WorldPos, MFloat, VertexCoord};
pub const HEX_EX_RADIUS: MFloat = 1.0;
// (pow(1.0, 2) - pow(0.5, 2)).sqrt()
pub const HEX_IN_RADIUS: MFloat = 0.866025403784 * HEX_EX_RADIUS;
pub const MINIMAL_LIFT_HEIGHT: MFloat = 0.01;
pub fn lift(v: Vector3<MFloat>) -> Vector3<MFloat> {
let mut v = v;
v.z += MINIMAL_LIFT_HEIGHT;
v
}
pub fn map_pos_to_world_pos(i: MapPos) -> WorldPos {
let v = Vector2 {
x: (i.v.x as MFloat) * HEX_IN_RADIUS * 2.0,
y: (i.v.y as MFloat) * HEX_EX_RADIUS * 1.5,
};
if i.v.y % 2 == 0 {
WorldPos{v: Vector3{
x: v.x + HEX_IN_RADIUS,
y: v.y,
z: 0.0,
}}
} else {
WorldPos{v: v.extend(0.0)}
}
}
pub fn index_to_circle_vertex(count: MInt, i: MInt) -> VertexCoord {
let n = FRAC_PI_2 + 2.0 * PI * (i as MFloat) / (count as MFloat);
VertexCoord{
v: Vector3{
x: n.cos(),
y: n.sin(),
z: 0.0
}.mul_s(HEX_EX_RADIUS)
}
}
pub fn index_to_hex_vertex(i: MInt) -> VertexCoord {
index_to_circle_vertex(6, i)
}
pub fn index_to_hex_vertex_s(scale: MFloat, i: MInt) -> VertexCoord {
let v = index_to_hex_vertex(i).v.mul_s(scale);
VertexCoord{v: v}
}
pub fn dist(a: WorldPos, b: WorldPos) -> MFloat {
let dx = abs(b.v.x - a.v.x);
let dy = abs(b.v.y - a.v.y);
let dz = abs(b.v.z - a.v.z);
(pow(dx, 2) + pow(dy, 2) + pow(dz, 2)).sqrt()
}
pub fn get_rot_angle(a: WorldPos, b: WorldPos) -> MFloat {
let mut angle = rad_to_deg(((b.v.x - a.v.x) / dist(a, b)).asin());
if b.v.y - a.v.y > 0.0 {
angle = -(180.0 + angle);
}
angle
}
// vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
// Copyright (c) 2016 <daggerbot@gmail.com>
// This software is available under the terms of the zlib license.
// See README.md for more information.
mod app;
mod module;
mod os;
pub use app::AppContext;
pub use module::{ModuleInfo, ModuleQuery, ModuleQueryFn};
pub const GAME_ID: &'static str = "monster-battle";
pub const GAME_TITLE: &'static str = "Monster Battle";
|
use super::errors::Error;
use super::eval::eval_term;
use super::Validator;
use crate::flat::PrimitiveSubtype::*;
use crate::flat::*;
use crate::lexer::Span;
use crate::raw::{IntLiteral, Spanned};
use std::cmp::Ordering;
use std::collections::HashMap;
use std::convert::TryFrom;
pub fn type_check(
term: Spanned<&Term>,
scope: &Libraries,
cache: &mut HashMap<Name, Type>,
) -> Result<Type, Error> {
let seen = Vec::new();
_type_check(term, scope, cache, seen)
}
pub fn _type_check(
term: Spanned<&Term>,
scope: &Libraries,
cache: &mut HashMap<Name, Type>,
mut seen: Vec<(Name, Span)>,
) -> Result<Type, Error> {
match &term.value {
Term::Identifier(name) => {
if let Some(ty) = cache.get(name) {
return Ok(ty.clone());
}
if seen.iter().any(|(n, _)| n == name) {
return Err(Error::VarCycle(seen).into());
}
// NOTE: once Name is refactored just contain IDs, it can implement Copy and we won't
// need to clone explicitly here
seen.push((name.clone(), term.span));
let ty = _type_check(
scope.get_term(term.span.wrap(name))?.into(),
scope,
cache,
seen,
);
cache.insert(
name.clone(),
match ty.clone() {
Ok(t) => t,
Err(_) => Type::Any,
},
);
ty
}
Term::Str(_) => Ok(Type::Str(Str { bounds: None })),
// TODO: think about this some more
Term::Int(_) => Ok(Type::Int),
Term::Float(_) => Ok(Type::Primitive(PrimitiveSubtype::Float64)),
Term::True | Term::False => Ok(Type::Primitive(PrimitiveSubtype::Bool)),
}
}
pub fn term_can_have_type(
term: Spanned<&Term>,
expected_ty: &Type,
actual_ty: &Type,
validator: &mut Validator,
) -> Result<(), Error> {
match (&term.value, expected_ty) {
(Term::Identifier(_), _) => panic!("only pass evaled terms"),
(_, Type::Identifier(_)) => panic!("only pass evaled types"),
(_, Type::Int) => panic!("users can't specify untyped Ints"),
(_, Type::Any) => Ok(()),
(Term::Str(string), Type::Str(Str { bounds })) => match bounds {
None => Ok(()),
Some(bounds) => {
let bounds_ty = validator.type_check(bounds.into());
if let Type::Any = bounds_ty {
return Ok(());
}
let bounds = eval_term(bounds.into(), validator.scope)?;
term_can_have_type(bounds, &Type::Primitive(UInt64), &bounds_ty, validator)?;
match bounds.value {
Term::Int(bounds_literal) => {
if bounds_literal.value < string.len() as u64 {
Err(Error::StringBoundsError {
length: term.span.wrap(string.len()),
bounds: bounds.span.wrap(bounds_literal.value),
})
} else {
Ok(())
}
}
_ => panic!("only Ints should have been let through here"),
}
}
},
(Term::Int(val), Type::Primitive(Int8)) => {
i8::try_from(*val)
.map(|_| ())
.map_err(|_| Error::IntCoercionError {
span: term.span,
ty: Type::Primitive(Int8),
})
}
(Term::Int(val), Type::Primitive(Int16)) => {
i16::try_from(*val)
.map(|_| ())
.map_err(|_| Error::IntCoercionError {
span: term.span,
ty: Type::Primitive(Int16),
})
}
(Term::Int(val), Type::Primitive(Int32)) => {
i32::try_from(*val)
.map(|_| ())
.map_err(|_| Error::IntCoercionError {
span: term.span,
ty: Type::Primitive(Int32),
})
}
(Term::Int(val), Type::Primitive(Int64)) => {
i64::try_from(*val)
.map(|_| ())
.map_err(|_| Error::IntCoercionError {
span: term.span,
ty: Type::Primitive(Int64),
})
}
(Term::Int(val), Type::Primitive(UInt8)) => {
u8::try_from(*val)
.map(|_| ())
.map_err(|_| Error::IntCoercionError {
span: term.span,
ty: Type::Primitive(UInt8),
})
}
(Term::Int(val), Type::Primitive(UInt16)) => {
u16::try_from(*val)
.map(|_| ())
.map_err(|_| Error::IntCoercionError {
span: term.span,
ty: Type::Primitive(UInt16),
})
}
(Term::Int(val), Type::Primitive(UInt32)) => {
u32::try_from(*val)
.map(|_| ())
.map_err(|_| Error::IntCoercionError {
span: term.span,
ty: Type::Primitive(UInt32),
})
}
(Term::Int(val), Type::Primitive(UInt64)) => {
u64::try_from(*val)
.map(|_| ())
.map_err(|_| Error::IntCoercionError {
span: term.span,
ty: Type::Primitive(UInt64),
})
}
(Term::Float(_), Type::Primitive(Float32))
| (Term::Float(_), Type::Primitive(Float64))
| (Term::True, Type::Primitive(Bool))
| (Term::False, Type::Primitive(Bool)) => Ok(()),
_ => Err(Error::TypeError {
actual: term.span.wrap(actual_ty.clone()),
expected: expected_ty.clone(),
}),
}
}
impl TryFrom<IntLiteral> for i64 {
type Error = ();
fn try_from(value: IntLiteral) -> Result<Self, Self::Error> {
if value.is_negative {
match value.value.cmp(&(std::i64::MIN as u64)) {
Ordering::Less => Ok(value.value as i64 * -1),
Ordering::Equal => Ok(value.value as i64),
Ordering::Greater => Err(()),
}
} else {
match value.value.cmp(&(std::i64::MAX as u64)) {
Ordering::Less | Ordering::Equal => Ok(value.value as i64),
Ordering::Greater => Err(()),
}
}
}
}
impl TryFrom<IntLiteral> for i32 {
type Error = ();
fn try_from(value: IntLiteral) -> Result<Self, Self::Error> {
i64::try_from(value).and_then(|val| i32::try_from(val).map_err(|_| ()))
}
}
impl TryFrom<IntLiteral> for i16 {
type Error = ();
fn try_from(value: IntLiteral) -> Result<Self, Self::Error> {
i64::try_from(value).and_then(|val| i16::try_from(val).map_err(|_| ()))
}
}
impl TryFrom<IntLiteral> for i8 {
type Error = ();
fn try_from(value: IntLiteral) -> Result<Self, Self::Error> {
i64::try_from(value).and_then(|val| i8::try_from(val).map_err(|_| ()))
}
}
impl TryFrom<IntLiteral> for u64 {
type Error = ();
fn try_from(value: IntLiteral) -> Result<Self, Self::Error> {
if value.is_negative {
Err(())
} else {
Ok(value.value)
}
}
}
impl TryFrom<IntLiteral> for u32 {
type Error = ();
fn try_from(value: IntLiteral) -> Result<Self, Self::Error> {
u64::try_from(value).and_then(|val| u32::try_from(val).map_err(|_| ()))
}
}
impl TryFrom<IntLiteral> for u16 {
type Error = ();
fn try_from(value: IntLiteral) -> Result<Self, Self::Error> {
u64::try_from(value).and_then(|val| u16::try_from(val).map_err(|_| ()))
}
}
impl TryFrom<IntLiteral> for u8 {
type Error = ();
fn try_from(value: IntLiteral) -> Result<Self, Self::Error> {
u64::try_from(value).and_then(|val| u8::try_from(val).map_err(|_| ()))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn int_literal_try_from() {
assert_eq!(
i64::try_from(IntLiteral {
value: std::i64::MAX as u64,
is_negative: false
})
.is_ok(),
true
);
assert_eq!(
i64::try_from(IntLiteral {
value: std::i64::MIN as u64,
is_negative: false
})
.is_err(),
true
);
assert_eq!(
i64::try_from(IntLiteral {
value: std::i64::MIN as u64,
is_negative: true
})
.is_ok(),
true
);
assert_eq!(
i64::try_from(IntLiteral {
value: (std::i64::MIN as u64) + 1,
is_negative: true
})
.is_err(),
true
);
assert_eq!(
i32::try_from(IntLiteral {
value: std::i32::MIN as u64,
is_negative: false,
})
.is_err(),
true
);
assert_eq!(
i32::try_from(IntLiteral {
value: std::i32::MAX as u64,
is_negative: false,
})
.is_ok(),
true
);
assert_eq!(
i32::try_from(IntLiteral {
value: std::i32::MIN as u32 as u64,
is_negative: true,
})
.is_ok(),
true
);
assert_eq!(
i32::try_from(IntLiteral {
value: (std::i32::MIN as u32 as u64) + 1,
is_negative: true,
})
.is_err(),
true
);
}
}
|
use std::{fmt::Display, sync::Arc};
use datafusion::{
config::ConfigOptions, execution::runtime_env::RuntimeEnv, prelude::SessionConfig,
};
use object_store::ObjectStore;
use url::Url;
// The default catalog name - this impacts what SQL queries use if not specified
pub const DEFAULT_CATALOG: &str = "public";
// The default schema name - this impacts what SQL queries use if not specified
pub const DEFAULT_SCHEMA: &str = "iox";
/// The maximum number of rows that DataFusion should create in each RecordBatch
pub const BATCH_SIZE: usize = 8 * 1024;
/// Return a SessionConfig object configured for IOx
pub fn iox_session_config() -> SessionConfig {
// Enable parquet predicate pushdown optimization
let mut options = ConfigOptions::new();
options.execution.parquet.pushdown_filters = true;
options.execution.parquet.reorder_filters = true;
options.optimizer.repartition_sorts = true;
SessionConfig::from(options)
.with_batch_size(BATCH_SIZE)
.with_create_default_catalog_and_schema(true)
.with_information_schema(true)
.with_default_catalog_and_schema(DEFAULT_CATALOG, DEFAULT_SCHEMA)
}
/// Register the "IOx" object store provider for URLs of the form "iox://{id}
///
/// Return the previous registered store, if any
pub fn register_iox_object_store<D: Display>(
runtime: impl AsRef<RuntimeEnv>,
id: D,
object_store: Arc<dyn ObjectStore>,
) -> Option<Arc<dyn ObjectStore>> {
let url = Url::parse(&format!("iox://{id}")).unwrap();
runtime.as_ref().register_object_store(&url, object_store)
}
|
use legion::*;
pub struct Events<T> {
pub events: Vec<T>,
}
impl<T> Default for Events<T> {
fn default() -> Self {
Self {
events: Vec::new(),
}
}
}
impl<T> Events<T> {
pub fn send(&mut self, event: T) {
self.events.push(event);
}
}
#[system]
pub fn clear_events<T>(#[resource] events: &mut Events::<T>)
where
T: 'static,
{
events.events.clear();
}
|
pub mod dirs_index;
pub mod http;
|
use nrf51;
use cortex_m;
use cortex_m::interrupt::{Mutex};
use cortex_m_semihosting::hio::{HStdout};
use core::cell::RefCell;
//mod peripherals;
use boards::board::Board;
use boards::peripherals::leds::Led;
use boards::peripherals::buttons::Button;
use boards::peripherals::timers::Timer;
pub static HSTDOUT: Mutex<RefCell<Option<HStdout>>> = Mutex::new(RefCell::new(None));
pub static PERIPH: Mutex<RefCell<Option<nrf51::Peripherals>>> = Mutex::new(RefCell::new(None));
/*
TODO Genericise these so that they get passed
to the boards instead of being a global static
variable
*/
pub static LEDS: [Led; 4] = [
Led { i: 21 },
Led { i: 22 },
Led { i: 23 },
Led { i: 24 }
];
pub static BUTTONS: [Button; 4] = [
Button { i: 17 },
Button { i: 18 },
Button { i: 19 },
Button { i: 20 }
];
pub struct Nrf51dk {
//Leds: [Led_S; 4],
}
impl Nrf51dk {
// What to do when the board comes up
pub fn new() -> Nrf51dk {
Nrf51dk { }
}
pub fn init(&self) {
cortex_m::interrupt::free(|cs| {
/* Initilize the interrupts on cpu*/
// TODO should the device structs handle these?
let mut cp = cortex_m::Peripherals::take().unwrap();
cp.NVIC.enable(nrf51::Interrupt::GPIOTE);
cp.NVIC.clear_pending(nrf51::Interrupt::GPIOTE);
cp.NVIC.enable(nrf51::Interrupt::TIMER0);
cp.NVIC.clear_pending(nrf51::Interrupt::TIMER0);
let p = nrf51::Peripherals::take().unwrap(); // todo don't unwrap
// lets borrow Peripherals
*PERIPH.borrow(cs).borrow_mut() = Some(p);
});
// timer0 with a frequency of 1000000
let timer0 = Timer::new(0, 1000000);
//Lets use timer0 as a systick
//let delay: u32 = 5 * 1000000; // five second delay
let delay: u32 = 1000;
timer0.init(delay);
timer0.start();
Led::init();
Button::init();
}
}
impl Board for Nrf51dk {
fn new( ) -> Nrf51dk {
Nrf51dk {}
}
fn led_on(&self, i: usize) {
LEDS[i].on();
}
fn led_off(&self, i: usize) {
LEDS[i].off();
}
fn led_toggle(&self, i: usize) {
LEDS[i].toggle();
}
}
|
#![cfg_attr(not(feature = "std"), no_std)]
#![no_implicit_prelude]
#[cfg(feature = "alloc")]
extern crate alloc;
#[allow(clippy::eq_op)]
mod assert_eq {
#[cfg(feature = "alloc")]
use ::alloc::string::{String, ToString};
#[cfg(feature = "std")]
use ::std::string::{String, ToString};
#[test]
fn passes() {
let a = "some value";
::pretty_assertions::assert_eq!(a, a);
}
#[test]
fn passes_unsized() {
let a: &[u8] = b"e";
::pretty_assertions::assert_eq!(*a, *a);
}
#[test]
fn passes_comparable_types() {
let s0: &'static str = "foo";
let s1: String = "foo".to_string();
::pretty_assertions::assert_eq!(s0, s1);
}
#[test]
#[should_panic(expected = r#"assertion failed: `(left == right)`
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[31m<[0m[1;48;5;52;31m666[0m
[32m>[0m[1;48;5;22;32m999[0m
"#)]
fn fails() {
::pretty_assertions::assert_eq!(666, 999);
}
#[test]
#[should_panic(expected = r#"assertion failed: `(left == right)`
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[31m<[0m[1;48;5;52;31m666[0m
[32m>[0m[1;48;5;22;32m999[0m
"#)]
fn fails_trailing_comma() {
::pretty_assertions::assert_eq!(666, 999,);
}
#[test]
#[should_panic(expected = r#"assertion failed: `(left == right)`
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[
101,
[32m> 101,[0m
]
"#)]
fn fails_unsized() {
let a: &[u8] = b"e";
let b: &[u8] = b"ee";
::pretty_assertions::assert_eq!(*a, *b);
}
#[test]
#[should_panic(
expected = r#"assertion failed: `(left == right)`: custom panic message
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[31m<[0m[1;48;5;52;31m666[0m
[32m>[0m[1;48;5;22;32m999[0m
"#
)]
fn fails_custom() {
::pretty_assertions::assert_eq!(666, 999, "custom panic message");
}
#[test]
#[should_panic(
expected = r#"assertion failed: `(left == right)`: custom panic message
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[31m<[0m[1;48;5;52;31m666[0m
[32m>[0m[1;48;5;22;32m999[0m
"#
)]
fn fails_custom_trailing_comma() {
::pretty_assertions::assert_eq!(666, 999, "custom panic message",);
}
}
mod assert_ne {
#[cfg(feature = "alloc")]
use ::alloc::string::{String, ToString};
#[cfg(feature = "std")]
use ::std::string::{String, ToString};
#[test]
fn passes() {
let a = "a";
let b = "b";
::pretty_assertions::assert_ne!(a, b);
}
#[test]
fn passes_unsized() {
let a: &[u8] = b"e";
let b: &[u8] = b"ee";
::pretty_assertions::assert_ne!(*a, *b);
}
#[test]
fn passes_comparable_types() {
let s0: &'static str = "foo";
let s1: String = "bar".to_string();
::pretty_assertions::assert_ne!(s0, s1);
}
#[test]
#[should_panic(expected = r#"assertion failed: `(left != right)`
[1mBoth sides[0m:
666
"#)]
fn fails() {
::pretty_assertions::assert_ne!(666, 666);
}
#[test]
#[should_panic(expected = r#"assertion failed: `(left != right)`
[1mBoth sides[0m:
666
"#)]
fn fails_trailing_comma() {
::pretty_assertions::assert_ne!(666, 666,);
}
#[test]
#[should_panic(expected = r#"assertion failed: `(left != right)`
[1mBoth sides[0m:
[
101,
]
"#)]
fn fails_unsized() {
let a: &[u8] = b"e";
::pretty_assertions::assert_ne!(*a, *a);
}
#[test]
#[should_panic(
expected = r#"assertion failed: `(left != right)`: custom panic message
[1mBoth sides[0m:
666
"#
)]
fn fails_custom() {
::pretty_assertions::assert_ne!(666, 666, "custom panic message");
}
#[test]
#[should_panic(
expected = r#"assertion failed: `(left != right)`: custom panic message
[1mBoth sides[0m:
666
"#
)]
fn fails_custom_trailing_comma() {
::pretty_assertions::assert_ne!(666, 666, "custom panic message",);
}
// If the values are equal but their debug outputs are not
// show a specific warning
// Regression tests
#[test]
#[should_panic]
fn assert_ne_non_empty_return() {
fn not_zero(x: u32) -> u32 {
::pretty_assertions::assert_ne!(x, 0);
x
}
not_zero(0);
}
}
#[cfg(feature = "unstable")]
mod assert_matches {
use ::core::option::Option::{None, Some};
#[test]
fn passes() {
let a = Some("some value");
::pretty_assertions::assert_matches!(a, Some(_));
}
#[test]
fn passes_unsized() {
let a: &[u8] = b"e";
::pretty_assertions::assert_matches!(*a, _);
}
#[test]
#[should_panic(expected = r#"assertion failed: `(left matches right)`
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[31m<[0m[1;48;5;52;31mN[0m[31mo[0m[1;48;5;52;31mn[0m[31me[0m
[32m>[0m[1;48;5;22;32mS[0m[32mo[0m[1;48;5;22;32mm[0m[32me[0m[1;48;5;22;32m(_)[0m
"#)]
fn fails() {
::pretty_assertions::assert_matches!(None::<usize>, Some(_));
}
#[test]
#[should_panic(expected = r#"assertion failed: `(left matches right)`
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[31m<Some([0m
[31m< 3,[0m
[31m<)[0m
[32m>Some(3) if 0 > 0[0m
"#)]
fn fails_guard() {
::pretty_assertions::assert_matches!(Some(3), Some(3) if 0 > 0,);
}
#[test]
#[should_panic(expected = r#"assertion failed: `(left matches right)`
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[31m<[[0m
[31m< 101,[0m
[31m<][0m
[32m>ref b if b == b"ee"[0m
"#)]
fn fails_unsized() {
let a: &[u8] = b"e";
::pretty_assertions::assert_matches!(*a, ref b if b == b"ee");
}
#[test]
#[should_panic(
expected = r#"assertion failed: `(left matches right)`: custom panic message
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[31m<[0m[1;48;5;52;31m666[0m
[32m>[0m[1;48;5;22;32m999[0m
"#
)]
fn fails_custom() {
::pretty_assertions::assert_matches!(666, 999, "custom panic message");
}
#[test]
#[should_panic(
expected = r#"assertion failed: `(left matches right)`: custom panic message
[1mDiff[0m [31m< left[0m / [32mright >[0m :
[31m<[0m[1;48;5;52;31m666[0m
[32m>[0m[1;48;5;22;32m999[0m
"#
)]
fn fails_custom_trailing_comma() {
::pretty_assertions::assert_matches!(666, 999, "custom panic message",);
}
}
|
//! The HTTP implementation serves the frontend
use futures::{Async::*, Future, Poll, future};
use http::response::Builder as ResponseBuilder;
use http::{Request, Response, StatusCode, header};
use hyper::{Body, service::Service, header::{HeaderValue, CONTENT_TYPE}};
use hyper_staticfile::{Static, StaticFuture};
use std::path::Path;
use std::io::Error;
use std::net::SocketAddr;
use std::path::PathBuf;
/// Future returned from `MainService`.
enum MainFuture {
Root,
Static((StaticFuture<Body>, PathBuf)),
}
impl Future for MainFuture {
type Item = Response<Body>;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match *self {
MainFuture::Root => {
let res = ResponseBuilder::new()
.status(StatusCode::MOVED_PERMANENTLY)
.header(header::LOCATION, "/index.html")
.body(Body::empty())
.expect("unable to build response");
Ok(Ready(res))
},
MainFuture::Static((ref mut future, ref path)) => {
let mut x = try_ready!(future.poll());
if let Some(ext) = path.extension() {
if let Some("wasm") = ext.to_str() {
x.headers_mut().insert(CONTENT_TYPE, HeaderValue::from_static("application/wasm"));
}
}
Ok(Ready(x))
}
}
}
}
/// The service should just offer all fields in a single directory
struct MainService {
static_: Static,
download: Static
}
impl MainService {
/// Create a new service
fn new(path: &Path, data_path: &Path) -> MainService {
MainService {
static_: Static::new(path),
download: Static::new(data_path.parent().unwrap())
}
}
}
impl Service for MainService {
type ReqBody = Body;
type ResBody = Body;
type Error = Error;
type Future = MainFuture;
fn call(&mut self, req: Request<Body>) -> MainFuture {
let path = PathBuf::from(req.uri().path());
//println!("Path: {:?}", path);
if req.uri().path() == "/" {
MainFuture::Root
} else {
MainFuture::Static((self.static_.serve(req), path))
}
}
}
/// Create the webserver
///
/// * `addr` - Listen to this address
/// * `path` - Serve this directory
/// * `data_path` - Serve the data from this directory
pub fn create_webserver(addr: SocketAddr, path: PathBuf, data_path: PathBuf) {
let server = hyper::Server::bind(&addr)
.serve(move || future::ok::<_, Error>(MainService::new(&path, &data_path)))
.map_err(|e| eprintln!("server error: {}", e));
println!("Web server running on http://{}", addr);
hyper::rt::run(server);
}
|
// Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_exception::Result;
use crate::plans::AggregateFunction;
use crate::plans::AndExpr;
use crate::plans::CastExpr;
use crate::plans::ComparisonExpr;
use crate::plans::FunctionCall;
use crate::plans::NotExpr;
use crate::plans::OrExpr;
use crate::plans::ScalarExpr;
use crate::plans::WindowFunc;
/// Controls how the visitor recursion should proceed.
pub enum Recursion<V: ScalarVisitor> {
/// Attempt to visit all the children, recursively, of this expression.
Continue(V),
/// Do not visit the children of this expression, though the walk
/// of parents of this expression will not be affected
Stop(V),
}
/// Encode the traversal of an scalar tree. When passed to
/// `Scalar::accept`, `ScalarVisitor::visit` is invoked
/// recursively on all nodes of an scalar tree. See the comments
/// on `Scalar::accept` for details on its use
pub trait ScalarVisitor: Sized {
/// Invoked before any children of `expr` are visisted.
fn pre_visit(self, scalar: &ScalarExpr) -> Result<Recursion<Self>>;
fn visit(mut self, predecessor_scalar: &ScalarExpr) -> Result<Self> {
let mut stack = vec![RecursionProcessing::Call(predecessor_scalar)];
while let Some(element) = stack.pop() {
match element {
RecursionProcessing::Ret(scalar) => {
self = self.post_visit(scalar)?;
}
RecursionProcessing::Call(scalar) => {
stack.push(RecursionProcessing::Ret(scalar));
self = match self.pre_visit(scalar)? {
Recursion::Stop(visitor) => visitor,
Recursion::Continue(visitor) => {
match scalar {
ScalarExpr::AggregateFunction(AggregateFunction {
args, ..
}) => {
for arg in args {
stack.push(RecursionProcessing::Call(arg));
}
}
ScalarExpr::WindowFunction(WindowFunc { agg_func, .. }) => {
for arg in &agg_func.args {
stack.push(RecursionProcessing::Call(arg));
}
}
ScalarExpr::ComparisonExpr(ComparisonExpr {
left, right, ..
}) => {
stack.push(RecursionProcessing::Call(left));
stack.push(RecursionProcessing::Call(right));
}
ScalarExpr::AndExpr(AndExpr { left, right, .. }) => {
stack.push(RecursionProcessing::Call(left));
stack.push(RecursionProcessing::Call(right));
}
ScalarExpr::OrExpr(OrExpr { left, right, .. }) => {
stack.push(RecursionProcessing::Call(left));
stack.push(RecursionProcessing::Call(right));
}
ScalarExpr::NotExpr(NotExpr { argument, .. }) => {
stack.push(RecursionProcessing::Call(argument));
}
ScalarExpr::FunctionCall(FunctionCall { arguments, .. }) => {
for arg in arguments.iter() {
stack.push(RecursionProcessing::Call(arg));
}
}
ScalarExpr::BoundColumnRef(_)
| ScalarExpr::BoundInternalColumnRef(_)
| ScalarExpr::ConstantExpr(_) => {}
ScalarExpr::CastExpr(CastExpr { argument, .. }) => {
stack.push(RecursionProcessing::Call(argument))
}
ScalarExpr::SubqueryExpr(_) => {}
}
visitor
}
}
}
}
}
Ok(self)
}
/// Invoked after all children of `expr` are visited. Default
/// implementation does nothing.
fn post_visit(self, _expr: &ScalarExpr) -> Result<Self> {
Ok(self)
}
}
impl ScalarExpr {
/// Performs a depth first walk of an scalar expression and
/// its children, calling [`ScalarVisitor::pre_visit`] and
/// `visitor.post_visit`.
///
/// Implements the [visitor pattern](https://en.wikipedia.org/wiki/Visitor_pattern) to
/// separate scalar expression algorithms from the structure of the
/// `Scalar` tree and make it easier to add new types of scalar expressions
/// and algorithms that walk the tree.
///
/// For a scala rexpression tree such as
/// ```text
/// BinaryExpr (GT)
/// left: Column("foo")
/// right: Column("bar")
/// ```
///
/// The nodes are visited using the following order
/// ```text
/// pre_visit(ScalarFunction(GT))
/// pre_visit(Column("foo"))
/// post_visit(Column("foo"))
/// pre_visit(Column("bar"))
/// post_visit(Column("bar"))
/// post_visit(ScalarFunction(GT))
/// ```
///
/// If an Err result is returned, recursion is stopped immediately
pub fn accept<V: ScalarVisitor>(&self, visitor: V) -> Result<V> {
let visitor = match visitor.pre_visit(self)? {
Recursion::Continue(visitor) => visitor,
// If the recursion should stop, do not visit children
Recursion::Stop(visitor) => return Ok(visitor),
};
let visitor = visitor.visit(self)?;
visitor.post_visit(self)
}
}
enum RecursionProcessing<'a> {
Call(&'a ScalarExpr),
Ret(&'a ScalarExpr),
}
|
use failure::Error;
use yew::{html, Callback, Component, ComponentLink, Html, Renderable, ShouldRender};
use yew::services::fetch::StatusCode;
use yew::services::fetch::FetchTask;
use crate::services::froovie_service::{FroovieService, MovieSearch};
pub struct MovieSearchModel {
froovie: FroovieService,
callback: Callback<Result<Vec<MovieSearch>, Error>>,
add_selection_callback: Callback<Result<StatusCode, Error>>,
pub result: Vec<MovieSearch>,
task: Option<FetchTask>,
error: Option<String>,
}
pub enum Msg {
SearchResult(String),
PickSelection(i32),
SelectionResult(Result<StatusCode, Error>),
FroovieReady(Result<Vec<MovieSearch>, Error>),
}
impl Component for MovieSearchModel {
type Message = Msg;
type Properties = ();
fn create(_: Self::Properties, mut link: ComponentLink<Self>) -> Self {
MovieSearchModel {
froovie: FroovieService::new(),
callback: link.send_back(Msg::FroovieReady),
add_selection_callback: link.send_back(Msg::SelectionResult),
result: vec![],
task: None,
error: None,
}
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Msg::SearchResult(query) => {
let task = self.froovie.search_movie(&query, self.callback.clone());
self.task = Some(task);
}
Msg::FroovieReady(Ok(movies)) => {
self.result = movies;
}
Msg::SelectionResult(Ok(StatusCode::OK)) => {
//
},
Msg::SelectionResult(_) => {
//
}
Msg::SelectionResult(Err(error)) => {
//
}
Msg::FroovieReady(Err(error)) => {
self.result = vec![];
self.error = Some(error.to_string());
}
Msg::PickSelection(moviedb_id) => {
let task = self.froovie.post_user_selection(moviedb_id, 1, self.add_selection_callback.clone());
self.task = Some(task);
}
}
true
}
}
impl Renderable<MovieSearchModel> for MovieSearchModel {
fn view(&self) -> Html<Self> {
let view_movie = |movie: &MovieSearch, id: i32| html! {
<div>
<p> { &movie.title.clone() } </p>
<img src={ &movie.image_url.clone().unwrap_or_else(|| "".to_string()) },
style="width: 200px",/>
<button onclick=|_| Msg::PickSelection(id),> { "Save"} </button>
</div>
};
html! {
<div>
<textarea class=("search-movie", "input"),
placeholder="Search",
value="brute",
oninput=|query| Msg::SearchResult(query.value),
/>
<ul> { for self.result.iter()
.map(|movie| (movie, movie.moviedb_id))
.map(|(movie, id)| view_movie(movie, id)) } </ul>
<p> { &format!("Error status: {:?}", &self.error) } </p>
</div>
}
}
} |
use std::fmt;
use std::rc::Rc;
use crate::platform::traits::*;
use crate::platform::Iterator as PlatformIterator;
use crate::platform::Manager as PlatformManager;
use crate::{Batteries, Battery, Result};
/// Manager for batteries available in system.
///
/// Allows fetching and updating [batteries] information.
///
/// # Example
///
/// ```edition2018
/// # use battery::{Result, Manager};
/// # fn main() -> Result<()> {
/// for battery in Manager::new()?.batteries()? {
/// println!("{:#?}", battery?);
/// }
/// # Ok(())
/// # }
/// ```
///
/// [batteries]: struct.Battery.html
pub struct Manager {
inner: Rc<PlatformManager>,
}
impl Manager {
/// Creates new manager value.
pub fn new() -> Result<Manager> {
let inner = PlatformManager::new()?;
Ok(Manager {
inner: Rc::new(inner),
})
}
/// Returns an iterator over available batteries.
///
/// There are no guarantees provided for [batteries] ordering,
/// multiple calls to this method might result in any particular order
/// depending on underline OS implementation.
///
/// [batteries]: struct.Battery.html
pub fn batteries(&self) -> Result<Batteries> {
let inner = PlatformIterator::new(self.inner.clone())?;
Ok(Batteries::from(inner))
}
/// Refresh battery information in-place.
pub fn refresh(&self, battery: &mut Battery) -> Result<()> {
self.inner.refresh(battery)
}
}
impl fmt::Debug for Manager {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Manager").field("impl", &self.inner).finish()
}
}
|
// Copyright 2020 - 2021 Alex Dukhno
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::*;
use postgre_sql::query_response::QueryEvent;
use std::{
io,
ops::DerefMut,
sync::{Arc, Mutex},
};
#[cfg(test)]
mod delete;
#[cfg(test)]
mod extended_query_flow;
#[cfg(test)]
mod insert;
#[cfg(test)]
mod predicate;
#[cfg(test)]
mod schema;
#[cfg(test)]
mod select;
#[cfg(test)]
mod simple_prepared_statement;
#[cfg(test)]
mod table;
#[cfg(test)]
mod type_constraints;
#[cfg(test)]
mod update;
type InMemory = QueryEngineOld;
type ResultCollector = Arc<Mutex<Collector>>;
#[derive(Clone)]
pub struct Collector(Arc<Mutex<Vec<Vec<u8>>>>);
impl Sender for Collector {
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
fn send(&mut self, message: &[u8]) -> io::Result<()> {
self.0.lock().unwrap().push(message.to_vec());
Ok(())
}
}
impl Collector {
fn new() -> ResultCollector {
Arc::new(Mutex::new(Collector(Arc::new(Mutex::new(vec![])))))
}
#[allow(dead_code)]
fn assert_receive_till_this_moment(&self, expected: Vec<Result<QueryEvent, QueryError>>) {
let result = self.0.lock().expect("locked").drain(0..).collect::<Vec<_>>();
assert_eq!(
result,
expected
.into_iter()
.map(|r| match r {
Ok(ok) => ok.into(),
Err(err) => err.into(),
})
.collect::<Vec<Vec<u8>>>()
)
}
#[allow(dead_code)]
fn assert_receive_intermediate(&self, expected: Result<QueryEvent, QueryError>) {
let mut actual = self.0.lock().expect("locked");
assert_eq!(
actual.deref_mut().pop(),
Some(expected).map(|r| match r {
Ok(ok) => ok.into(),
Err(err) => err.into(),
})
);
}
fn assert_receive_single(&self, expected: Result<QueryEvent, QueryError>) {
self.assert_query_complete();
let mut actual = self.0.lock().expect("locked");
assert_eq!(
actual.deref_mut().pop(),
Some(expected).map(|r| match r {
Ok(ok) => ok.into(),
Err(err) => err.into(),
})
);
}
fn assert_receive_many(&self, expected: Vec<Result<QueryEvent, QueryError>>) {
let actual = self
.0
.lock()
.expect("locked")
.drain(0..expected.len())
.collect::<Vec<_>>();
assert_eq!(
actual,
expected
.into_iter()
.map(|r| match r {
Ok(ok) => ok.into(),
Err(err) => err.into(),
})
.collect::<Vec<Vec<u8>>>()
);
self.assert_query_complete();
}
fn assert_query_complete(&self) {
let mut actual = self.0.lock().expect("locked");
assert_eq!(actual.deref_mut().pop(), Some(QueryEvent::QueryComplete.into()));
}
}
#[rstest::fixture]
fn empty_database() -> (InMemory, ResultCollector) {
setup_logger();
let collector = Collector::new();
(InMemory::new(collector.clone(), Database::new("")), collector)
}
#[rstest::fixture]
fn database_with_schema(empty_database: (InMemory, ResultCollector)) -> (InMemory, ResultCollector) {
let (mut engine, collector) = empty_database;
engine
.execute(Inbound::Query {
sql: "create schema schema_name;".to_string(),
})
.expect("query expected");
collector
.lock()
.unwrap()
.assert_receive_single(Ok(QueryEvent::SchemaCreated));
(engine, collector)
}
#[rstest::fixture]
fn database_with_table(database_with_schema: (InMemory, ResultCollector)) -> (InMemory, ResultCollector) {
let (mut engine, collector) = database_with_schema;
engine
.execute(Inbound::Query {
sql: "create table schema_name.table_name (col1 smallint, col2 smallint, col3 smallint);".to_string(),
})
.expect("query expected");
collector
.lock()
.unwrap()
.assert_receive_single(Ok(QueryEvent::TableCreated));
(engine, collector)
}
fn setup_logger() {
if let Ok(()) = simple_logger::SimpleLogger::new().init() {};
}
|
struct TableRow {
id: i32,
name: String,
admin: bool,
}
pub struct App {
table: Vec<TableRow>,
}
pub enum Msg {}
impl yew::Component for App {
type Message = Msg;
type Properties = ();
fn create(_: &yew::Context<Self>) -> Self {
App {
table: vec![
TableRow {
id: 0,
name: "Alice".into(),
admin: true,
},
TableRow {
id: 1,
name: "Bob".into(),
admin: true,
},
TableRow {
id: 2,
name: "Charles".into(),
admin: false,
},
],
}
}
fn update(&mut self, _: &yew::Context<Self>, _: Self::Message) -> bool {
true
}
fn view(&self, _: &yew::Context<Self>) -> yew::Html {
use yew::html;
html! {
<>
<p>{ "Iterator Type 1" }</p>
<table frame="box" rules="all">
<tr><th>{ "ID" }</th><th>{ "name" }</th></tr>{
self.table.iter().map(|row| {
html! {<tr><td>{row.id}</td><td>{&row.name}</td></tr>}
}).collect::<yew::Html>()
}
</table>
<p>{ "Iterator Type 2" }</p>
<table frame="box" rules="all">
<tr><th>{ "ID" }</th><th>{ "name" }</th></tr>{
for self.table.iter().map(|row| {
html! {<tr><td>{row.id}</td><td>{&row.name}</td></tr>}
})
}
</table>
<p>{ "Iterator with condition" }</p>
<table frame="box" rules="all">
<tr><th>{ "ID" }</th><th>{ "admin name" }</th></tr>{
for self.table.iter().map(|row| {
if row.admin {
html! {<tr><td>{row.id}</td><td>{&row.name}</td></tr>}
}else{
html! {}
}
})
}
</table>
</>
}
}
}
|
pub mod ptr;
pub mod place;
pub mod value;
pub mod analyze;
pub mod pass;
pub mod trans;
pub mod error;
use error::Error;
use lowlang_syntax as syntax;
use lowlang_syntax::layout::TyLayout;
pub use cranelift_module::{Backend, Module, FuncId, DataId};
use cranelift_frontend::FunctionBuilder;
use cranelift_codegen::ir::{self, types};
use cranelift_codegen::settings;
use std::collections::BTreeMap;
pub fn triple(target: &str) -> target_lexicon::Triple {
use std::str::FromStr as _;
target_lexicon::triple!(target)
}
pub fn compile<'t>(
package: &syntax::Package<'t>,
types: &'t syntax::ty::TyCtx<'t>,
target: &str,
optimize: bool,
out_file: std::path::PathBuf
) -> Result<(), Error> {
use settings::Configurable as _;
use std::str::FromStr as _;
let mut flags_builder = settings::builder();
if optimize {
flags_builder.set("opt_level", "speed").unwrap();
}
let isa = cranelift_codegen::isa::lookup(target_lexicon::triple!(target)).unwrap()
.finish(settings::Flags::new(flags_builder));
let builder = cranelift_object::ObjectBuilder::new(
isa,
package.name.clone(),
cranelift_object::ObjectTrapCollection::Enabled,
cranelift_module::default_libcall_names(),
).unwrap();
let module = Module::<cranelift_object::ObjectBackend>::new(builder);
let layout_interner = syntax::layout::LayoutInterner::new();
let layouts = syntax::layout::LayoutCtx::new(types, &layout_interner, module.isa().triple());
let product = trans::translate(module, &layouts, package)?;
let mut tmp_name = out_file.clone();
tmp_name.set_extension("o");
assemble(product, tmp_name.as_ref());
link(tmp_name.as_ref(), out_file.as_ref());
Ok(())
}
pub fn assemble(product: cranelift_object::ObjectProduct, out_file: &std::path::Path) {
use std::io::Write;
let bytes = product.emit().unwrap();
std::fs::File::create(out_file).unwrap().write_all(&bytes).unwrap();
}
pub fn link(obj_file: &std::path::Path, out_file: &std::path::Path) {
let _status = std::process::Command::new("cc")
.args(&[obj_file, std::path::Path::new("-o"), out_file])
.status()
.unwrap();
}
pub struct FunctionCtx<'a, 't, 'l, B: Backend> {
pub layouts: &'a syntax::layout::LayoutCtx<'t, 'l>,
pub module: &'a mut Module<B>,
pub builder: FunctionBuilder<'a>,
pub pointer_type: types::Type,
pub package: *const syntax::Package<'t>,
pub body: &'a syntax::Body<'t>,
pub func_ids: &'a BTreeMap<syntax::ItemId, (FuncId, ir::Signature, Vec<TyLayout<'t, 'l>>)>,
pub data_ids: &'a BTreeMap<syntax::ItemId, (DataId, TyLayout<'t, 'l>)>,
pub blocks: BTreeMap<syntax::BlockId, ir::Ebb>,
pub locals: BTreeMap<syntax::LocalId, place::Place<'t, 'l>>,
bytes_count: &'a mut usize,
}
impl<'a, 't, 'l, B: Backend> FunctionCtx<'a, 't, 'l, B> {
pub fn clif_type(&self, layout: TyLayout<'t, 'l>) -> Option<types::Type> {
self::clif_type(self.module, layout)
}
pub unsafe fn package(&self) -> &syntax::Package<'t> {
&*self.package
}
}
pub fn clif_type<'t, 'l>(module: &Module<impl Backend>, layout: TyLayout<'t, 'l>) -> Option<types::Type> {
use syntax::{Type, IntSize, FloatSize};
match &*layout.ty {
Type::Unit => None,
Type::Bool => Some(types::I8),
Type::Char => Some(types::I32),
Type::Int(size) | Type::UInt(size) => match size {
IntSize::Bits8 => Some(types::I8),
IntSize::Bits16 => Some(types::I16),
IntSize::Bits32 => Some(types::I32),
IntSize::Bits64 => Some(types::I64),
IntSize::Bits128 => Some(types::I128),
IntSize::Size => Some(match module.target_config().pointer_width {
target_lexicon::PointerWidth::U16 => types::I16,
target_lexicon::PointerWidth::U32 => types::I32,
target_lexicon::PointerWidth::U64 => types::I64,
}),
},
Type::Float(size) => match size {
FloatSize::Bits32 => Some(types::F32),
FloatSize::Bits64 => Some(types::F64),
FloatSize::Size => Some(match module.target_config().pointer_width {
target_lexicon::PointerWidth::U16 => types::F32,
target_lexicon::PointerWidth::U32 => types::F32,
target_lexicon::PointerWidth::U64 => types::F64,
}),
},
Type::Ref(_) => Some(module.target_config().pointer_type()),
Type::Proc(_) => Some(module.target_config().pointer_type()),
_ => None,
}
}
|
//! Checker tests, that require a Solver instance, so they cannot be unit tests of the
//! varisat-checker crate.
use anyhow::Error;
use proptest::prelude::*;
use varisat::{
checker::{Checker, ProofTranscriptProcessor, ProofTranscriptStep},
dimacs::write_dimacs,
CnfFormula, ExtendFormula, Lit, ProofFormat, Solver, Var,
};
use varisat_formula::test::{conditional_pigeon_hole, sgen_unsat_formula};
proptest! {
#[test]
fn checked_unsat_via_dimacs(formula in sgen_unsat_formula(1..7usize)) {
let mut dimacs = vec![];
let mut proof = vec![];
let mut solver = Solver::new();
write_dimacs(&mut dimacs, &formula).unwrap();
solver.write_proof(&mut proof, ProofFormat::Varisat);
solver.add_dimacs_cnf(&mut &dimacs[..]).unwrap();
prop_assert_eq!(solver.solve().ok(), Some(false));
solver.close_proof().map_err(|e| TestCaseError::fail(e.to_string()))?;
drop(solver);
let mut checker = Checker::new();
checker.add_dimacs_cnf(&mut &dimacs[..]).unwrap();
checker.check_proof(&mut &proof[..]).unwrap();
}
#[test]
fn sgen_checked_unsat_incremental_clauses(formula in sgen_unsat_formula(1..7usize)) {
let mut proof = vec![];
let mut solver = Solver::new();
solver.write_proof(&mut proof, ProofFormat::Varisat);
let mut expected_models = 0;
// Add all clauses incrementally so they are recorded in the proof
solver.solve().unwrap();
expected_models += 1;
let mut last_state = Some(true);
for clause in formula.iter() {
solver.add_clause(clause);
let state = solver.solve().ok();
if state != last_state {
prop_assert_eq!(state, Some(false));
prop_assert_eq!(last_state, Some(true));
last_state = state;
}
if state == Some(true) {
expected_models += 1;
}
}
prop_assert_eq!(last_state, Some(false));
drop(solver);
#[derive(Default)]
struct FoundModels {
counter: usize,
unsat: bool,
}
impl ProofTranscriptProcessor for FoundModels {
fn process_step(
&mut self,
step: &ProofTranscriptStep,
) -> Result<(), Error> {
if let ProofTranscriptStep::Model { .. } = step {
self.counter += 1;
} else if let ProofTranscriptStep::Unsat = step {
self.unsat = true;
}
Ok(())
}
}
let mut found_models = FoundModels::default();
let mut checker = Checker::new();
checker.add_transcript(&mut found_models);
checker.check_proof(&mut &proof[..]).unwrap();
prop_assert_eq!(found_models.counter, expected_models);
prop_assert!(found_models.unsat);
}
#[test]
fn pigeon_hole_checked_unsat_assumption_core(
(enable_row, columns, formula) in conditional_pigeon_hole(1..5usize, 1..5usize),
) {
let mut proof = vec![];
let mut solver = Solver::new();
solver.write_proof(&mut proof, ProofFormat::Varisat);
let mut expected_sat = 0;
let mut expected_unsat = 0;
solver.solve().unwrap();
expected_sat += 1;
solver.add_formula(&formula);
prop_assert_eq!(solver.solve().ok(), Some(true));
expected_sat += 1;
let mut assumptions = enable_row;
assumptions.push(Lit::positive(Var::from_index(formula.var_count() + 10)));
solver.assume(&assumptions);
prop_assert_eq!(solver.solve().ok(), Some(false));
expected_unsat += 1;
let mut candidates = solver.failed_core().unwrap().to_owned();
let mut core: Vec<Lit> = vec![];
while !candidates.is_empty() {
solver.assume(&candidates[0..candidates.len() - 1]);
match solver.solve() {
Err(_) => unreachable!(),
Ok(true) => {
expected_sat += 1;
let skipped = *candidates.last().unwrap();
core.push(skipped);
let single_clause = CnfFormula::from(Some([skipped]));
solver.add_formula(&single_clause);
},
Ok(false) => {
expected_unsat += 1;
candidates = solver.failed_core().unwrap().to_owned();
}
}
}
prop_assert_eq!(core.len(), columns + 1);
drop(solver);
#[derive(Default)]
struct CountResults {
sat: usize,
unsat: usize,
}
impl ProofTranscriptProcessor for CountResults {
fn process_step(
&mut self,
step: &ProofTranscriptStep,
) -> Result<(), Error> {
match step {
ProofTranscriptStep::Model { .. } => {
self.sat += 1;
}
ProofTranscriptStep::Unsat
| ProofTranscriptStep::FailedAssumptions { .. } => {
self.unsat += 1;
}
_ => (),
}
Ok(())
}
}
let mut count_results = CountResults::default();
let mut checker = Checker::new();
checker.add_transcript(&mut count_results);
checker.check_proof(&mut &proof[..]).unwrap();
prop_assert_eq!(count_results.sat, expected_sat);
prop_assert_eq!(count_results.unsat, expected_unsat);
}
}
|
use std::collections::{HashMap, BinaryHeap, HashSet};
use std::cmp::Ordering;
use std::fmt::Debug;
use rand::distributions::{Exp, Distribution};
use ordered_float::OrderedFloat;
pub type State = HashMap<Place, PlaceState>;
pub type Place = usize;
pub type Time = f64;
#[derive(Debug, Copy, Clone)]
pub struct PlaceState {
pub tokens: i32,
}
#[derive(Debug, Copy, Clone)]
pub struct StateChange {
pub place: Place,
pub value: i32,
}
pub trait Event: Debug {
fn enablement_inputs(&self) -> Vec<Place>;
fn rate_inputs(&self) -> Vec<Place>;
fn outputs(&self) -> Vec<Place>;
fn enabled(&self, inputs: &[PlaceState]) -> bool { true }
fn hazard_rate(&self, inputs: &[PlaceState]) -> f64;
fn fire(&self) -> Vec<StateChange>;
}
#[derive(Debug)]
pub struct Simulation {
pub state: State,
current_time: Time,
events: Vec<Box<dyn Event>>,
valid_firing: Vec<usize>,
dependencies: HashMap<Place, HashSet<usize>>,
upcoming_firings: BinaryHeap<ScheduledFiring>,
}
#[derive(Debug, Copy, Clone)]
struct ScheduledFiring(Time, usize, usize);
impl Ord for ScheduledFiring {
fn cmp(&self, other: &Self) -> Ordering {
// Flip order to make `upcoming_firings` behave as a min heap
OrderedFloat(other.0).cmp(&OrderedFloat(self.0))
}
}
impl PartialOrd for ScheduledFiring {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for ScheduledFiring {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0 &&
//This is only valid when comparing objects from the same simulation but that's
//probably fine...
self.1 == other.1 &&
self.2 == other.2
}
}
impl Eq for ScheduledFiring {}
impl Simulation {
//TODO: This is conspicuously missing a good way to set initial state
pub fn from_events(events: Vec<Box<dyn Event>>) -> Self {
//TODO: I know you can do this by chaining iterators but I can't make it work
let mut places = Vec::new();
let mut dependencies = HashMap::new();
for (event_idx, event) in events.iter().enumerate() {
let mut this_event_places = HashSet::new();
this_event_places.extend(event.enablement_inputs());
this_event_places.extend(event.rate_inputs());
for place in &this_event_places {
dependencies.entry(*place).or_insert(HashSet::new()).insert(event_idx);
}
this_event_places.extend(event.outputs());
places.extend(this_event_places);
}
let state = places.iter().map(|p| (*p, PlaceState { tokens: 0 })).collect();
Self {
state,
current_time: 0.0,
valid_firing: events.iter().map(|_| 0).collect(),
events: events,
dependencies,
upcoming_firings: BinaryHeap::new(),
}
}
pub fn schedule_event(&mut self, event_idx: usize) {
let event = &self.events[event_idx];
let enablement_state: Vec<_> = event.enablement_inputs().iter().map(|p| self.state[p]).collect();
self.valid_firing[event_idx] += 1;
if event.enabled(&enablement_state) {
let rate_state: Vec<_> = event.rate_inputs().iter().map(|p| self.state[p]).collect();
let rate = event.hazard_rate(&rate_state);
let firing_time = Exp::new(rate).sample(&mut rand::thread_rng()) + self.current_time;
self.upcoming_firings.push(ScheduledFiring(firing_time, event_idx, self.valid_firing[event_idx]));
}
}
pub fn setup_initial_firings(&mut self) {
for event_idx in 0..self.events.len() {
self.schedule_event(event_idx);
}
}
fn pop_event(&mut self) -> Option<ScheduledFiring> {
let mut potential_next = self.upcoming_firings.pop();
while let Some(next) = &potential_next {
if self.valid_firing[next.1] == next.2 {
break
}
potential_next = self.upcoming_firings.pop();
}
potential_next
}
pub fn run_until(&mut self, time: Time) {
let mut potential_next = self.pop_event();
while let Some(next) = potential_next {
if next.0 > time {
self.upcoming_firings.push(next);
break
}
self.current_time = next.0;
let event = &self.events[next.1];
let mut events_to_reschedule = HashSet::new();
events_to_reschedule.insert(next.1);
for state_change in event.fire() {
if let Some(events) = self.dependencies.get(&state_change.place) {
events_to_reschedule.extend(events);
}
self.state.get_mut(&state_change.place).unwrap().tokens += state_change.value;
}
for event_idx in events_to_reschedule {
self.schedule_event(event_idx);
}
potential_next = self.pop_event();
}
}
}
|
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::ptr;
use alloc::sync::Arc;
use alloc::boxed::Box;
use alloc::string::ToString;
use super::super::*;
use super::super::kernel::ipc_namespace::*;
use super::super::threadmgr::task_start::*;
use super::super::threadmgr::thread::*;
use super::super::SignalDef::*;
use super::super::qlib::common::*;
use super::super::qlib::linux_def::*;
use super::super::qlib::task_mgr::*;
use super::super::syscalls::sys_tls::*;
use super::super::task::*;
use super::task_block::*;
use super::task_stop::*;
use super::super::perflog::*;
const DEFAULT_STACK_SIZE: usize = MemoryDef::DEFAULT_STACK_SIZE as usize;
const DEFAULT_STACK_PAGES: u64 = DEFAULT_STACK_SIZE as u64 / (4 * 1024);
const DEFAULT_STACK_MAST: u64 = !(DEFAULT_STACK_SIZE as u64 - 1);
#[derive(Debug, Copy, Clone, Default)]
pub struct SharingOptions {
// If NewAddressSpace is true, the task should have an independent virtual
// address space.
pub NewAddressSpace: bool,
// If NewSignalHandlers is true, the task should use an independent set of
// signal handlers.
pub NewSignalHandlers: bool,
// If NewThreadGroup is true, the task should be the leader of its own
// thread group. TerminationSignal is the signal that the thread group
// will send to its parent when it exits. If NewThreadGroup is false,
// TerminationSignal is ignored.
pub NewThreadGroup: bool,
pub TerminationSignal: Signal,
// If NewPIDNamespace is true:
//
// - In the context of Task.Clone, the new task should be the init task
// (TID 1) in a new PID namespace.
//
// - In the context of Task.Unshare, the task should create a new PID
// namespace, and all subsequent clones of the task should be members of
// the new PID namespace.
pub NewPIDNamespace: bool,
// If NewUserNamespace is true, the task should have an independent user
// namespace.
pub NewUserNamespace: bool,
// If NewNetworkNamespace is true, the task should have an independent
// network namespace. (Note that network namespaces are not really
// implemented; see comment on Task.netns for details.)
pub NewNetworkNamespace: bool,
// If NewFiles is true, the task should use an independent file descriptor
// table.
pub NewFiles: bool,
// If NewFSContext is true, the task should have an independent FSContext.
pub NewFSContext: bool,
// If NewUTSNamespace is true, the task should have an independent UTS
// namespace.
pub NewUTSNamespace: bool,
// If NewIPCNamespace is true, the task should have an independent IPC
// namespace.
pub NewIPCNamespace: bool,
}
#[derive(Debug, Copy, Clone, Default)]
pub struct CloneOptions {
// SharingOptions defines the set of resources that the new task will share
// with its parent.
pub sharingOption: SharingOptions,
// Stack is the initial stack pointer of the new task. If Stack is 0, the
// new task will start with the same stack pointer as its parent.
pub Stack: u64,
// If SetTLS is true, set the new task's TLS (thread-local storage)
// descriptor to TLS. If SetTLS is false, TLS is ignored.
pub SetTLS: bool,
pub TLS: u64,
// If ChildClearTID is true, when the child exits, 0 is written to the
// address ChildTID in the child's memory, and if the write is successful a
// futex wake on the same address is performed.
//
// If ChildSetTID is true, the child's thread ID (in the child's PID
// namespace) is written to address ChildTID in the child's memory. (As in
// Linux, failed writes are silently ignored.)
pub ChildClearTID: bool,
pub ChildSetTID: bool,
pub ChildTID: u64,
// If ParentSetTID is true, the child's thread ID (in the parent's PID
// namespace) is written to address ParentTID in the parent's memory. (As
// in Linux, failed writes are silently ignored.)
//
// Older versions of the clone(2) man page state that CLONE_PARENT_SETTID
// causes the child's thread ID to be written to ptid in both the parent
// and child's memory, but this is a documentation error fixed by
// 87ab04792ced ("clone.2: Fix description of CLONE_PARENT_SETTID").
pub ParentSetTID: bool,
pub ParentTID: u64,
// If Vfork is true, place the parent in vforkStop until the cloned task
// releases its TaskContext.
pub Vfork: bool,
// If Untraced is true, do not report PTRACE_EVENT_CLONE/FORK/VFORK for
// this clone(), and do not ptrace-attach the caller's tracer to the new
// task. (PTRACE_EVENT_VFORK_DONE will still be reported if appropriate).
pub Untraced: bool,
// If InheritTracer is true, ptrace-attach the caller's tracer to the new
// task, even if no PTRACE_EVENT_CLONE/FORK/VFORK event would be reported
// for it. If both Untraced and InheritTracer are true, no event will be
// reported, but tracer inheritance will still occur.
pub InheritTracer: bool,
}
impl CloneOptions {
const EXIT_SIGNAL_MASK: i32 = 0xff;
pub fn New(flags: u64, cStack: u64, pTid: u64, cTid: u64, tls: u64, hasChildPIdNamespace: bool) -> Result<Self> {
let flags = flags as i32;
let opts = CloneOptions {
sharingOption: SharingOptions {
NewAddressSpace: flags & CloneOp::CLONE_VM == 0,
NewSignalHandlers: flags & CloneOp::CLONE_SIGHAND == 0,
NewThreadGroup: flags & CloneOp::CLONE_THREAD == 0,
TerminationSignal: Signal((flags & Self::EXIT_SIGNAL_MASK) as i32),
NewPIDNamespace: flags & CloneOp::CLONE_NEWPID != 0,
NewUserNamespace: flags & CloneOp::CLONE_NEWUSER != 0,
NewNetworkNamespace: flags & CloneOp::CLONE_NEWNET != 0,
NewFiles: flags & CloneOp::CLONE_FILES == 0,
NewFSContext: flags & CloneOp::CLONE_FS == 0,
NewUTSNamespace: flags & CloneOp::CLONE_NEWUTS != 0,
NewIPCNamespace: flags & CloneOp::CLONE_NEWIPC != 0,
},
Stack: cStack,
SetTLS: flags & CloneOp::CLONE_SETTLS != 0,
TLS: tls,
ChildClearTID: flags & CloneOp::CLONE_CHILD_CLEARTID != 0,
ChildSetTID: flags & CloneOp::CLONE_CHILD_SETTID != 0,
ChildTID: cTid,
ParentSetTID: flags & CloneOp::CLONE_PARENT_SETTID != 0,
ParentTID: pTid,
Vfork: flags & CloneOp::CLONE_VFORK != 0,
Untraced: flags & CloneOp::CLONE_UNTRACED != 0,
InheritTracer: flags & CloneOp::CLONE_PTRACE != 0,
};
if opts.sharingOption.NewUserNamespace {
// todo: handle NewUserNamespace
//panic!("doesn't support new usernamespace ...");
error!("doesn't support new usernamespace ...");
return Err(Error::SysError(SysErr::EINVAL));
}
// Since signal actions may refer to application signal handlers by virtual
// address, any set of signal handlers must refer to the same address
// space.
if !opts.sharingOption.NewSignalHandlers && opts.sharingOption.NewAddressSpace {
return Err(Error::SysError(SysErr::EINVAL));
}
// In order for the behavior of thread-group-directed signals to be sane,
// all tasks in a thread group must share signal handlers.
if !opts.sharingOption.NewThreadGroup && opts.sharingOption.NewSignalHandlers {
return Err(Error::SysError(SysErr::EINVAL));
}
if !opts.sharingOption.NewThreadGroup && (opts.sharingOption.NewPIDNamespace || hasChildPIdNamespace) {
return Err(Error::SysError(SysErr::EINVAL));
}
// The two different ways of specifying a new PID namespace are
// incompatible.
if opts.sharingOption.NewPIDNamespace && hasChildPIdNamespace {
return Err(Error::SysError(SysErr::EINVAL));
}
if opts.sharingOption.NewUserNamespace && (!opts.sharingOption.NewThreadGroup || !opts.sharingOption.NewFSContext) {
return Err(Error::SysError(SysErr::EINVAL));
}
return Ok(opts);
}
}
impl Thread {
pub fn Clone(&self, opts: &CloneOptions, stackAddr: u64) -> Result<Self> {
let pidns = self.PIDNamespace();
let ts = pidns.Owner();
let _wl = ts.WriteLock();
let t = self.lock();
let creds = t.creds.clone();
let mut userns = creds.lock().UserNamespace.clone();
if opts.sharingOption.NewUserNamespace {
if t.IsChrooted() {
return Err(Error::SysError(SysErr::EPERM))
}
userns = creds.NewChildUserNamespace()?;
}
if opts.sharingOption.NewPIDNamespace
|| opts.sharingOption.NewNetworkNamespace
|| opts.sharingOption.NewUTSNamespace && !creds.HasCapabilityIn(Capability::CAP_SYS_ADMIN, &userns) {
return Err(Error::SysError(SysErr::EPERM))
}
let mut utsns = t.utsns.clone();
if opts.sharingOption.NewUTSNamespace {
let tmp = utsns.Fork(&userns);
utsns = tmp;
}
let mut ipcns = t.ipcns.clone();
if opts.sharingOption.NewIPCNamespace {
ipcns = IPCNamespace::New(&userns);
}
let mut memoryMgr = t.memoryMgr.clone();
if opts.sharingOption.NewAddressSpace {
let newMM = memoryMgr.Fork()?;
memoryMgr = newMM;
}
let vforkParent = if opts.Vfork {
Some(self.clone())
} else {
None
};
let mut fsc = t.fsc.clone();
if opts.sharingOption.NewFSContext {
let temp = fsc.Fork();
fsc = temp;
}
let mut fdTbl = t.fdTbl.clone();
if opts.sharingOption.NewFiles {
let newFDTbl = fdTbl.Fork();
fdTbl = newFDTbl;
}
let pidns = t.tg.PIDNamespace();
if t.childPIDNamespace.is_some() {
panic!("doesn't support childPIDNamespace********************");
//pidns = t.childPIDNamespace.clone().unwrap();
} else if opts.sharingOption.NewPIDNamespace {
panic!("doesn't support NewPIDNamespace********************");
//pidns = pidns.NewChild(&userns);
}
let mut tg = t.tg.clone();
if opts.sharingOption.NewThreadGroup {
let mut sh = tg.lock().signalHandlers.clone();
if opts.sharingOption.NewSignalHandlers {
sh = sh.Fork();
}
let kernel = t.k.clone();
let limit = tg.lock().limits.clone();
tg = kernel.newThreadGroup(&pidns,
&sh,
opts.sharingOption.TerminationSignal.clone(),
&limit.GetCopy());
}
let mut cfg = TaskConfig {
TaskId: stackAddr,
Kernel: t.k.clone(),
Parent: None,
InheritParent: None,
ThreadGroup: tg.clone(),
SignalMask: t.signalMask.clone(),
MemoryMgr: memoryMgr,
FSContext: fsc,
Fdtbl: fdTbl,
Credentials: creds.clone(),
Niceness: t.niceness,
NetworkNamespaced: false,
AllowedCPUMask: t.allowedCPUMask.Copy(),
UTSNamespace: utsns,
IPCNamespace: ipcns,
Blocker: Blocker::New(stackAddr),
ContainerID: t.containerID.to_string(),
};
if opts.sharingOption.NewThreadGroup {
cfg.Parent = Some(self.clone());
} else {
cfg.InheritParent = Some(self.clone())
}
if opts.sharingOption.NewNetworkNamespace {
cfg.NetworkNamespaced = true;
}
let pidns = tg.PIDNamespace();
let ts = pidns.lock().owner.clone();
let name = t.name.to_string();
core::mem::drop(t);
let kernel = self.lock().k.clone();
let nt = ts.NewTask(&cfg, false, &kernel)?;
nt.lock().name = name;
if userns != creds.lock().UserNamespace.clone() {
nt.SetUserNamespace(&userns).expect("Task.Clone: SetUserNamespace failed: ")
}
if opts.Vfork {
nt.lock().vforkParent = vforkParent;
self.MaybeBeginVforkStop(&nt);
}
return Ok(nt)
}
pub fn MaybeBeginVforkStop(&self, child: &Thread) {
let tg = self.ThreadGroup();
let _owner = tg.PIDNamespace().Owner();
//let _r = owner.ReadLock();
let lock = tg.lock().signalLock.clone();
let _s = lock.lock();
{
let mut threadLocked = self.lock();
if threadLocked.killedLocked() {
threadLocked.vforkParent = None;
return;
}
}
let vforkParent = child.lock().vforkParent.clone();
if vforkParent == Some(self.clone()) {
self.lock().beginInternalStopLocked(&Arc::new(VforkStop {}))
}
}
pub fn UnstopVforkParent(&self) {
let tg = self.ThreadGroup();
let owner = tg.PIDNamespace().Owner();
let _r = owner.ReadLock();
let p = self.lock().vforkParent.take();
match p {
None => (),
Some(p) => {
let ptg = p.ThreadGroup();
let lock = ptg.lock().signalLock.clone();
let _s = lock.lock();
let stop = p.lock().stop.clone();
if stop.is_some() && stop.unwrap().Type() == TaskStopType::VFORKSTOP {
p.lock().endInternalStopLocked();
}
}
}
}
}
impl Task {
pub fn SetClearTID(&mut self, addr: u64) {
self.tidInfo.clear_child_tid = Some(addr);
}
pub fn Clone(&self, flags: u64, cStack: u64, pTid: u64, cTid: u64, tls: u64) -> Result<i32> {
let opts = CloneOptions::New(flags, cStack, pTid, cTid, tls, false)?;
if opts.SetTLS && !IsValidSegmentBase(opts.TLS) {
return Err(Error::SysError(SysErr::EPERM));
}
let mut userSp = cStack;
if opts.sharingOption.NewAddressSpace || cStack == 0 {
userSp = Self::Current().GetPtRegs().rsp;
}
info!("Clone opts is {:x?}", &opts);
let (pid, childTask) = self.CloneVM(&opts, userSp)?; //, cStack as * const u8);
if opts.ParentSetTID {
self.CopyOutObj(&pid, pTid)?;
}
let cTask = unsafe {
&mut (*childTask)
};
if opts.ChildClearTID == true {
cTask.SetClearTID(cTid);
}
if opts.ChildSetTID == true {
// can't use the GetTypeMut as it is used with current pagetable.
//*Task::GetTask(cTask.taskId).GetTypeMut(cTid)? = pid;
cTask.CopyOutObj(&pid, cTid)?;
}
if opts.SetTLS {
cTask.context.fs = tls;
}
taskMgr::NewTask(TaskId::New(cTask.taskId));
return Ok(pid);
}
pub fn CloneVM(&self, opts: &CloneOptions, userSp: u64) -> Result<(i32, *mut Self)> {
//let pid = self.GetProcessId();
let cPid;
let s_ptr = KERNEL_STACK_ALLOCATOR.Allocate().unwrap() as *mut u8;
let taskPtr = s_ptr as *mut Self;
let task = Task::Current();
let thread = task.Thread();
let nt = thread.Clone(&opts, s_ptr as u64)?;
unsafe {
let mm = nt.lock().memoryMgr.clone();
let creds = nt.lock().creds.clone();
let utsns = nt.lock().utsns.clone();
let ipcns = nt.lock().ipcns.clone();
let fsContext = nt.lock().fsc.clone();
let fdTbl = nt.lock().fdTbl.clone();
let blocker = nt.lock().blocker.clone();
let sched = nt.lock().sched.clone();
let tg = nt.lock().tg.clone();
tg.lock().liveThreads.Add(1);
let pidns = tg.PIDNamespace();
let ntid = pidns.IDOfTask(&nt);
let futexMgr = if opts.sharingOption.NewAddressSpace {
task.futexMgr.Fork()
} else {
task.futexMgr.clone()
};
cPid = ntid;
let signalStack = if opts.sharingOption.NewAddressSpace || opts.Vfork {
self.CloneSignalStack()
} else {
SignalStack::default()
};
let ioUsage = nt.lock().ioUsage.clone();
ptr::write(taskPtr, Self {
context: Default::default(),
queueId: 0,
taskId: s_ptr as u64,
mm: mm,
tidInfo: Default::default(),
isWaitThread: false,
signalStack: signalStack,
mountNS: task.mountNS.clone(),
// Arc::new(Mutex::new(Default::default())),
creds: creds,
utsns: utsns,
ipcns: ipcns,
fsContext: fsContext,
fdTbl: fdTbl,
blocker: blocker,
//Blocker::New(s_ptr as u64),
thread: Some(nt.clone()),
haveSyscallReturn: false,
syscallRestartBlock: None,
futexMgr: futexMgr,
ioUsage: ioUsage,
sched: sched,
iovs: Vec::with_capacity(4),
perfcounters: Some(THREAD_COUNTS.lock().NewCounters()),
guard: Guard::default(),
});
}
let curr = Self::Current();
let new = unsafe { &mut *taskPtr };
new.PerfGoto(PerfType::Blocked);
new.PerfGoto(PerfType::User);
CreateCloneTask(curr, new, userSp);
return Ok((cPid, taskPtr));
}
pub fn Unshare(&mut self, opts: &SharingOptions) -> Result<()> {
// In Linux unshare(2), NewThreadGroup implies NewSignalHandlers and
// NewSignalHandlers implies NewAddressSpace. All three flags are no-ops if
// t is the only task using its MM, which due to clone(2)'s rules imply
// that it is also the only task using its signal handlers / in its thread
// group, and cause EINVAL to be returned otherwise.
//
// Since we don't count the number of tasks using each address space or set
// of signal handlers, we reject NewSignalHandlers and NewAddressSpace
// altogether, and interpret NewThreadGroup as requiring that t be the only
// member of its thread group. This seems to be logically coherent, in the
// sense that clone(2) allows a task to share signal handlers and address
// spaces with tasks in other thread groups.
if opts.NewAddressSpace || opts.NewSignalHandlers {
return Err(Error::SysError(SysErr::EINVAL));
}
let t = self.Thread();
let tg = t.lock().tg.clone();
let signallock = tg.lock().signalLock.clone();
if opts.NewThreadGroup {
let _s = signallock.lock();
if tg.lock().tasksCount != 1 {
return Err(Error::SysError(SysErr::EINVAL));
// This isn't racy because we're the only living task, and therefore
// the only task capable of creating new ones, in our thread group.
}
}
if opts.NewUserNamespace {
if self.IsChrooted() {
return Err(Error::SysError(SysErr::EPERM));
}
let creds = t.Credentials();
let newUserNs = creds.NewChildUserNamespace()?;
t.SetUserNamespace(&newUserNs)?;
self.creds = creds;
}
let creds = self.creds.clone();
let haveCapSysAdmin = t.HasCapability(Capability::CAP_SYS_ADMIN);
if opts.NewPIDNamespace {
if !haveCapSysAdmin {
return Err(Error::SysError(SysErr::EPERM));
}
let userns = creds.lock().UserNamespace.clone();
let pidns = tg.PIDNamespace();
t.lock().childPIDNamespace = Some(pidns.NewChild(&userns));
}
let mut tlock = t.lock();
if opts.NewNetworkNamespace {
if !haveCapSysAdmin {
return Err(Error::SysError(SysErr::EPERM))
}
tlock.netns = true;
}
if opts.NewUTSNamespace {
if !haveCapSysAdmin {
return Err(Error::SysError(SysErr::EPERM))
}
let userns = creds.lock().UserNamespace.clone();
let utsns = self.utsns.clone();
self.utsns = utsns.Fork(&userns);
tlock.utsns = self.utsns.clone();
}
if opts.NewIPCNamespace {
if !haveCapSysAdmin {
return Err(Error::SysError(SysErr::EPERM))
}
let userns = creds.lock().UserNamespace.clone();
self.ipcns = IPCNamespace::New(&userns);
tlock.ipcns = self.ipcns.clone();
}
if opts.NewFiles {
let fdtbl = self.fdTbl.clone();
self.fdTbl = fdtbl.Fork();
tlock.fdTbl = self.fdTbl.clone();
}
if opts.NewFSContext {
let fsc = self.fsContext.clone();
self.fsContext = fsc.Fork();
tlock.fsc = self.fsContext.clone();
}
return Ok(())
}
}
pub fn CreateCloneTask(fromTask: &Task, toTask: &mut Task, userSp: u64) {
let mut from = fromTask.GetKernelSp();
let fromSp = fromTask.GetPtRegs() as *const _ as u64;
let mut to = toTask.GetKernelSp();
let toPtRegs = toTask.GetPtRegs();
unsafe {
while from >= fromSp {
*(to as *mut u64) = *(from as *const u64);
from -= 8;
to -= 8;
}
toTask.context.ready = 1;
toTask.context.fs = fromTask.context.fs;
toTask.context.gs = fromTask.context.gs;
toTask.context.rsp = toTask.GetPtRegs() as *const _ as u64 - 8;
toTask.context.rdi = userSp;
toTask.context.X86fpstate = Box::new(fromTask.context.X86fpstate.Fork());
toPtRegs.rax = 0;
*(toTask.context.rsp as *mut u64) = child_clone as u64;
// put the floattpointer state address in the stack, and the "child_clone" call can restore that
*((toTask.context.rsp - 8) as *mut u64) = toTask.context.X86fpstate.FloatingPointData();
}
}
pub struct VforkStop {}
impl TaskStop for VforkStop {
fn Type(&self) -> TaskStopType {
return TaskStopType::VFORKSTOP;
}
fn Killable(&self) -> bool {
return true;
}
}
|
use super::{run_data_test, InfluxRpcTest};
use async_trait::async_trait;
use futures::{prelude::*, FutureExt};
use influxdb_storage_client::tag_key_bytes_to_strings;
use std::sync::Arc;
use test_helpers_end_to_end::{
maybe_skip_integration, DataGenerator, GrpcRequestBuilder, MiniCluster, StepTestState,
};
#[tokio::test]
async fn tag_values() {
let generator = Arc::new(DataGenerator::new());
run_data_test(
Arc::clone(&generator),
Box::new(move |state: &mut StepTestState| {
let generator = Arc::clone(&generator);
async move {
let mut storage_client = state.cluster().querier_storage_client();
let tag_values_request = GrpcRequestBuilder::new()
.source(state.cluster())
.timestamp_range(generator.min_time(), generator.max_time())
.tag_predicate("host", "server01")
.build_tag_values("host");
let tag_values_response =
storage_client.tag_values(tag_values_request).await.unwrap();
let responses: Vec<_> = tag_values_response
.into_inner()
.try_collect()
.await
.unwrap();
let values = &responses[0].values;
let values: Vec<_> = values
.iter()
.map(|v| tag_key_bytes_to_strings(v.clone()))
.collect();
assert_eq!(values, vec!["server01"]);
}
.boxed()
}),
)
.await
}
#[tokio::test]
async fn data_without_tags_no_predicate() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "tag_not_in_chunks",
request: GrpcRequestBuilder::new(),
// If the tag is not present, expect no values back (not error)
expected_values: vec![],
})
.run()
.await;
}
#[tokio::test]
async fn data_with_tags_no_predicate() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "state",
request: GrpcRequestBuilder::new(),
expected_values: vec!["CA", "MA", "NY"],
})
.run()
.await;
}
#[tokio::test]
async fn timestamp_range_predicate() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "state",
request: GrpcRequestBuilder::new().timestamp_range(50, 201),
expected_values: vec!["CA", "MA"],
})
.run()
.await;
}
#[tokio::test]
async fn tag_predicates() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "city",
request: GrpcRequestBuilder::new().tag_predicate("state", "MA"),
expected_values: vec!["Boston"],
})
.run()
.await;
}
#[tokio::test]
async fn measurement_predicates() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "state",
request: GrpcRequestBuilder::new().measurement_predicate("h2o"),
expected_values: vec!["CA", "MA"],
})
.run()
.await;
}
#[tokio::test]
async fn not_measurement_predicates() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "state",
request: GrpcRequestBuilder::new()
.not_measurement_predicate("o2")
// filters out the NY row
.timestamp_range(1, 600),
expected_values: vec!["CA", "MA"],
})
.run()
.await;
}
#[tokio::test]
async fn timestamp_range_and_measurement_predicates() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "state",
request: GrpcRequestBuilder::new()
.timestamp_range(50, 201)
.measurement_predicate("o2"),
expected_values: vec!["MA"],
})
.run()
.await;
}
#[tokio::test]
async fn timestamp_range_and_tag_predicates() {
Arc::new(TagValuesTest {
setup_name: "MeasurementsSortableTags",
tag_key: "zz_tag",
request: GrpcRequestBuilder::new()
.timestamp_range(700, 900)
.tag_predicate("state", "MA"),
expected_values: vec!["A"],
})
.run()
.await;
}
#[tokio::test]
async fn measurement_and_tag_predicate() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsMultiTagValue",
tag_key: "city",
request: GrpcRequestBuilder::new()
.measurement_predicate("h2o")
.tag_predicate("state", "MA"),
expected_values: vec!["Boston", "Lowell"],
})
.run()
.await;
}
#[tokio::test]
async fn timestamp_range_measurement_and_tag_predicate() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "state",
request: GrpcRequestBuilder::new()
.measurement_predicate("o2")
.tag_predicate("state", "NY")
.timestamp_range(1, 550),
expected_values: vec!["NY"],
})
.run()
.await;
}
#[tokio::test]
async fn predicate_no_results() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "state",
request: GrpcRequestBuilder::new()
.measurement_predicate("o2")
.tag_predicate("state", "NY")
// filters out the NY row
.timestamp_range(1, 300),
expected_values: vec![],
})
.run()
.await;
}
#[tokio::test]
async fn or_predicate() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "city",
request: GrpcRequestBuilder::new()
.measurement_predicate("o2")
// since there is an 'OR' in this predicate, can't answer
// with metadata alone
.or_field_value_predicates([70.0].into_iter())
// filters out the Brooklyn row
.timestamp_range(1, 600),
expected_values: vec!["Boston", "LA", "NYC"],
})
.run()
.await;
}
#[tokio::test]
#[should_panic(
expected = "gRPC planner error: column \'temp\' is not a tag, it is Some(Field(Float))"
)]
async fn tag_values_on_field_is_invalid() {
// Tell the test to panic with the expected message if `TEST_INTEGRATION` isn't set so that
// this still passes
maybe_skip_integration!(
"gRPC planner error: column \'temp\' is not a tag, it is Some(Field(Float))"
);
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
// temp is a field, not a tag
tag_key: "temp",
request: GrpcRequestBuilder::new(),
expected_values: vec!["Boston", "LA", "NYC"],
})
.run()
.await;
}
#[tokio::test]
async fn nonexistent_field_predicates() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyNulls",
tag_key: "state",
request: GrpcRequestBuilder::new()
// since this field doesn't exist this predicate should match no values
.field_predicate("not_a_column"),
expected_values: vec![],
})
.run()
.await;
}
#[tokio::test]
async fn field_predicates() {
Arc::new(TagValuesTest {
setup_name: "TwoMeasurementsManyFields",
tag_key: "state",
request: GrpcRequestBuilder::new()
// this field does exist, but only for rows MA (not CA)
.field_predicate("moisture"),
expected_values: vec!["MA"],
})
.run()
.await;
}
#[tokio::test]
async fn periods() {
Arc::new(TagValuesTest {
setup_name: "PeriodsInNames",
tag_key: "tag.one",
request: GrpcRequestBuilder::new().timestamp_range(0, 1_700_000_001_000_000_000),
expected_values: vec!["value", "value2"],
})
.run()
.await;
}
#[derive(Debug)]
struct TagValuesTest {
setup_name: &'static str,
tag_key: &'static str,
request: GrpcRequestBuilder,
expected_values: Vec<&'static str>,
}
#[async_trait]
impl InfluxRpcTest for TagValuesTest {
fn setup_name(&self) -> &'static str {
self.setup_name
}
async fn request_and_assert(&self, cluster: &MiniCluster) {
let mut storage_client = cluster.querier_storage_client();
let tag_values_request = self
.request
.clone()
.source(cluster)
.build_tag_values(self.tag_key);
let tag_values_response = storage_client.tag_values(tag_values_request).await.unwrap();
let responses: Vec<_> = tag_values_response
.into_inner()
.try_collect()
.await
.unwrap();
let values = &responses[0].values;
let values: Vec<_> = values
.iter()
.map(|s| tag_key_bytes_to_strings(s.to_vec()))
.collect();
assert_eq!(values, self.expected_values);
}
}
|
use std::collections::BTreeSet;
enum States {
A,
B,
C,
D,
E,
F,
}
fn tasks(n: i32) -> i32 {
let mut tape = BTreeSet::new();
let mut state = States::A;
let mut pos = 0i32;
for _ in 0..n {
let x = tape.contains(&pos);
let (x2, pos2, state2) = match (state, x) {
(States::A, false) => (true, 1, States::B),
(States::A, true) => (false, -1, States::F),
(States::B, false) => (false, 1, States::C),
(States::B, true) => (false, 1, States::D),
(States::C, false) => (true, -1, States::D),
(States::C, true) => (true, 1, States::E),
(States::D, false) => (false, -1, States::E),
(States::D, true) => (false, -1, States::D),
(States::E, false) => (false, 1, States::A),
(States::E, true) => (true, 1, States::C),
(States::F, false) => (true, -1, States::A),
(States::F, true) => (true, 1, States::A),
};
state = state2;
if x2 {
tape.insert(pos);
} else {
tape.remove(&pos);
}
pos += pos2;
}
tape.len() as i32
}
fn main() {
println!("{:?}", tasks(12794428));
}
|
//! Performs a sword attack
use amethyst::{
core::timing::Time,
ecs::{Component, DenseVecStorage, Entities, Join, System, WriteStorage, Read},
};
pub struct DelayedRemove {
pub current: f32,
pub end: f32,
}
impl Component for DelayedRemove {
type Storage = DenseVecStorage<Self>;
}
impl DelayedRemove {
pub fn new(end: f32) -> Self {
DelayedRemove { current: 0.0, end }
}
}
pub struct DelayedRemoveSystem;
impl<'s> System<'s> for DelayedRemoveSystem {
type SystemData = (
Read<'s, Time>,
Entities<'s>,
WriteStorage<'s, DelayedRemove>,
);
fn run(&mut self, (time, entities, mut delayed_removes): Self::SystemData) {
for (delayed_remove, entity) in (&mut delayed_removes, &entities).join() {
delayed_remove.current += time.delta_seconds();
if delayed_remove.current > delayed_remove.end {
info!("Delayed remove of {}", entity.id());
if let Err(error) = entities.delete(entity) {
warn!("Delayed remove of {} failed: {}", entity.id(), error);
}
}
}
}
}
|
use anyhow::Result;
use rocket::http::{Cookies, Status};
use rocket::response::{status::Custom, Response};
use rocket::Rocket;
use rocket_contrib::json::Json;
use serde_json::Value;
use crate::core::users::{entity::User, repository};
use crate::api::catchers::*;
use crate::utils::{db::DbConn, get_session_id};
#[post("/", data = "<user>")]
fn create(mut user: Json<User>, conn: DbConn) -> Custom<Json<Value>> {
if user.username.is_none() || user.password.is_none() {
return unprocessable_entity();
}
let user_id = user.id.clone();
let mut found_user = match repository::find_or_create(user_id, &conn) {
Ok(user) => user,
Err(_) => {
return internal_error();
}
};
found_user.activated = Some(true);
let user = repository::update_user(&mut user, &conn);
match user {
Ok(u) => Custom(Status::Ok, Json(json!(u))),
Err(_) => return internal_error(),
}
}
#[get("/<id>")]
fn get(id: String, conn: DbConn) -> Result<Json<User>> {
Ok(Json(repository::fetch_user(id, &conn)?))
}
#[get("/get_me")]
fn get_me(conn: DbConn, mut cookies: Cookies) -> Result<Json<User>> {
let id = get_session_id(&mut cookies);
Ok(Json(repository::fetch_user(id, &conn)?))
}
pub fn fuel(rocket: Rocket) -> Rocket {
rocket.mount("/api/user", routes![get, create, get_me])
}
|
//! This module contains implementations for the storage gRPC service
//! implemented in terms of the [`QueryNamespace`](iox_query::QueryNamespace).
use super::{TAG_KEY_FIELD, TAG_KEY_MEASUREMENT};
use crate::{
data::{
fieldlist_to_measurement_fields_response, series_or_groups_to_frames, tag_keys_to_byte_vecs,
},
expr::{self, DecodedTagKey, GroupByAndAggregate, InfluxRpcPredicateBuilder, Loggable},
input::GrpcInputs,
permit::StreamWithPermit,
query_completed_token::QueryCompletedTokenStream,
response_chunking::ChunkReadResponses,
StorageService,
};
use data_types::NamespaceName;
use datafusion::error::DataFusionError;
use futures::{stream::BoxStream, Stream, StreamExt, TryStreamExt};
use generated_types::{
google::protobuf::{Any as ProtoAny, Empty},
influxdata::platform::errors::InfluxDbError,
literal_or_regex::Value as RegexOrLiteralValue,
offsets_response::PartitionOffsetResponse,
read_response::Frame,
storage_server::Storage,
tag_key_predicate, CapabilitiesResponse, Capability, Int64ValuesResponse, LiteralOrRegex,
MeasurementFieldsRequest, MeasurementFieldsResponse, MeasurementNamesRequest,
MeasurementTagKeysRequest, MeasurementTagValuesRequest, OffsetsResponse, Predicate,
ReadFilterRequest, ReadGroupRequest, ReadResponse, ReadSeriesCardinalityRequest,
ReadWindowAggregateRequest, StringValuesResponse, TagKeyMetaNames, TagKeysRequest,
TagValuesGroupedByMeasurementAndTagKeyRequest, TagValuesRequest, TagValuesResponse,
TimestampRange,
};
use iox_query::{
exec::{
fieldlist::FieldList, seriesset::converter::Error as SeriesSetError, IOxSessionContext,
},
QueryCompletedToken, QueryNamespace, QueryText,
};
use observability_deps::tracing::{error, info, trace};
use prost::{bytes::BytesMut, Message};
use service_common::{datafusion_error_to_tonic_code, planner::Planner, QueryNamespaceProvider};
use snafu::{OptionExt, ResultExt, Snafu};
use std::{
collections::{BTreeSet, HashMap},
fmt::{Display, Formatter, Result as FmtResult},
sync::Arc,
};
use tokio_stream::wrappers::ReceiverStream;
use tonic::{metadata::MetadataMap, Response, Status};
use trace::{ctx::SpanContext, span::SpanExt};
use trace_http::ctx::{RequestLogContext, RequestLogContextExt};
use tracker::InstrumentedAsyncOwnedSemaphorePermit;
/// The size to which we limit our [`ReadResponse`] payloads.
///
/// We will regroup the returned frames (preserving order) to only produce [`ReadResponse`] objects of approximately
/// this size (there's a bit of additional encoding overhead on top of that, but that should be OK).
const MAX_READ_RESPONSE_SIZE: usize = 4194304 - 100_000; // 4MB - <wiggle room>
/// The max number of points allowed in each output data frame. This is the same value TSM uses,
/// and is used to avoid overlarge individual gRPC messages.
const MAX_POINTS_PER_FRAME: usize = 1000;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Namespace not found: {}", db_name))]
NamespaceNotFound { db_name: String },
#[snafu(display("Error listing tables in namespace '{}': {}", db_name, source))]
ListingTables {
db_name: String,
source: DataFusionError,
},
#[snafu(display("Error listing columns in namespace '{}': {}", db_name, source))]
ListingColumns {
db_name: String,
source: DataFusionError,
},
#[snafu(display("Error listing fields in namespace '{}': {}", db_name, source))]
ListingFields {
db_name: String,
source: DataFusionError,
},
#[snafu(display("Error creating series plans for namespace '{}': {}", db_name, source))]
PlanningFilteringSeries {
db_name: String,
source: DataFusionError,
},
#[snafu(display("Error creating group plans for namespace '{}': {}", db_name, source))]
PlanningGroupSeries {
db_name: String,
source: DataFusionError,
},
#[snafu(display("Error running series plans for namespace '{}': {}", db_name, source))]
FilteringSeries {
db_name: String,
source: DataFusionError,
},
#[snafu(display("Error running grouping plans for namespace '{}': {}", db_name, source))]
GroupingSeries {
db_name: String,
source: DataFusionError,
},
#[snafu(display(
"Can not retrieve tag values for '{}' in namespace '{}': {}",
tag_name,
db_name,
source
))]
ListingTagValues {
db_name: String,
tag_name: String,
source: DataFusionError,
},
#[snafu(display("Error setting predicate table '{:?}': {}", table, source))]
SettingPredicateTable {
table: Option<String>,
source: super::expr::Error,
},
#[snafu(display("Error converting Predicate '{}: {}", rpc_predicate_string, source))]
ConvertingPredicate {
rpc_predicate_string: String,
source: super::expr::Error,
},
#[snafu(display("Error converting group type '{}': {}", aggregate_string, source))]
ConvertingReadGroupType {
aggregate_string: String,
source: super::expr::Error,
},
#[snafu(display(
"Error converting read_group aggregate '{}': {}",
aggregate_string,
source
))]
ConvertingReadGroupAggregate {
aggregate_string: String,
source: super::expr::Error,
},
#[snafu(display(
"Error converting read_aggregate_window aggregate definition '{}': {}",
aggregate_string,
source
))]
ConvertingWindowAggregate {
aggregate_string: String,
source: super::expr::Error,
},
#[snafu(display("Error converting tag_key to UTF-8 in tag_values request, tag_key value '{}': {}", String::from_utf8_lossy(source.as_bytes()), source))]
ConvertingTagKeyInTagValues { source: std::string::FromUtf8Error },
#[snafu(display("Error computing groups series: {}", source))]
ComputingGroupedSeriesSet { source: SeriesSetError },
#[snafu(display("Converting field information series into gRPC response: {}", source))]
ConvertingFieldList { source: super::data::Error },
#[snafu(display("Error processing measurement constraint {:?}", pred))]
MeasurementLiteralOrRegex { pred: LiteralOrRegex },
#[snafu(display("Missing tag key predicate"))]
MissingTagKeyPredicate {},
#[snafu(display("Tag Key regex error: {}", source))]
InvalidTagKeyRegex { source: regex::Error },
#[snafu(display("Error sending results via channel: {}", source))]
SendingResults {
source: Box<dyn std::error::Error + Send + Sync>,
},
#[snafu(display(
"Unexpected hint value on read_group request. Expected 0, got {}",
hints
))]
InternalHintsFieldNotSupported { hints: u32 },
#[snafu(display("Operation not yet implemented: {}", operation))]
NotYetImplemented { operation: String },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
impl From<Error> for Status {
/// Converts a result from the business logic into the appropriate tonic
/// status
fn from(err: Error) -> Self {
error!(e=%err, "Error handling gRPC request");
err.into_status()
}
}
impl Error {
/// Converts a result from the business logic into the appropriate tonic
/// status
fn into_status(self) -> Status {
let msg = self.to_string();
let code = match self {
Self::NamespaceNotFound { .. } => tonic::Code::NotFound,
Self::ListingTables { source, .. }
| Self::ListingColumns { source, .. }
| Self::ListingFields { source, .. }
| Self::PlanningFilteringSeries { source, .. }
| Self::PlanningGroupSeries { source, .. }
| Self::FilteringSeries { source, .. }
| Self::GroupingSeries { source, .. }
| Self::ListingTagValues { source, .. } => datafusion_error_to_tonic_code(&source),
Self::ConvertingPredicate { source, .. }
| Self::ConvertingReadGroupType { source, .. }
| Self::ConvertingReadGroupAggregate { source, .. }
| Self::ConvertingWindowAggregate { source, .. }
| Self::SettingPredicateTable { source, .. }
if matches!(
source,
super::expr::Error::FieldColumnsNotSupported { .. }
| super::expr::Error::MultipleTablePredicateNotSupported { .. }
) =>
{
tonic::Code::Unimplemented
}
Self::ConvertingPredicate { .. }
| Self::ConvertingReadGroupAggregate { .. }
| Self::ConvertingReadGroupType { .. }
| Self::ConvertingWindowAggregate { .. }
| Self::ConvertingTagKeyInTagValues { .. }
| Self::ComputingGroupedSeriesSet { .. }
| Self::ConvertingFieldList { .. }
| Self::SettingPredicateTable { .. }
| Self::MeasurementLiteralOrRegex { .. }
| Self::MissingTagKeyPredicate {}
| Self::InvalidTagKeyRegex { .. } => tonic::Code::InvalidArgument,
Self::SendingResults { .. } | Self::InternalHintsFieldNotSupported { .. } => {
tonic::Code::Internal
}
Self::NotYetImplemented { .. } => tonic::Code::Unimplemented,
};
// InfluxRPC clients expect an instance of InfluxDbError
// (or another error type from platform.influxdata.errors)
// to appear in the `details` field of the gRPC status, which
// helps the client determine if the error should be
// displayed to users, is retryable, etc.
let influxdb_error = InfluxDbError {
code: InfluxCode::from(code).to_string(),
message: msg.clone(),
op: "iox/influxrpc".to_string(),
error: None,
};
let mut err_bytes = BytesMut::new();
match influxdb_error.encode(&mut err_bytes) {
Ok(()) => (),
Err(e) => {
error!(e=%e, "failed to serialized InfluxDBError");
return Status::unknown(format!("failed to serialize InfluxDB error: {e}"));
}
}
let any_err = ProtoAny {
type_url: generated_types::protobuf_type_url(
"influxdata.platform.errors.InfluxDBError",
),
value: err_bytes.freeze(),
};
let mut tonic_status = generated_types::google::encode_status(code, msg, any_err);
add_headers(tonic_status.metadata_mut());
tonic_status
}
}
/// These are the set of error codes that can appear in an InfluxDBError.
/// Taken from here:
/// <https://github.com/influxdata/idpe/blob/master/pkg/influxerror/errors.go>
/// Disabling Clippy warning about variant names so that they can match what
/// is in idpe.
#[allow(clippy::enum_variant_names)]
enum InfluxCode {
EInternal,
ENotFound,
EConflict,
EInvalid,
// EUnprocessableEntity,
// EEmptyValue,
EUnavailable,
// EForbidden,
// ETooManyRequests,
EUnauthorized,
// EMethodNotAllowed,
ETooLarge,
ENotImplemented,
// EUpstreamServer,
ERequestCanceled,
}
impl Display for InfluxCode {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
let str = match self {
InfluxCode::EInternal => "internal error",
InfluxCode::ENotFound => "not found",
InfluxCode::EConflict => "conflict",
InfluxCode::EInvalid => "invalid",
// InfluxCode::EUnprocessableEntity => "unprocessable entity",
// InfluxCode::EEmptyValue => "empty value",
InfluxCode::EUnavailable => "unavailable",
// InfluxCode::EForbidden => "forbidden",
// InfluxCode::ETooManyRequests => "too many requests",
InfluxCode::EUnauthorized => "unauthorized",
// InfluxCode::EMethodNotAllowed => "method not allowed",
InfluxCode::ETooLarge => "request too large",
InfluxCode::ENotImplemented => "not implemented",
// InfluxCode::EUpstreamServer => "upstream server",
InfluxCode::ERequestCanceled => "request canceled",
};
f.write_str(str)
}
}
impl From<tonic::Code> for InfluxCode {
fn from(tonic_code: tonic::Code) -> InfluxCode {
match tonic_code {
tonic::Code::Cancelled => InfluxCode::ERequestCanceled,
tonic::Code::InvalidArgument => InfluxCode::EInvalid,
tonic::Code::NotFound => InfluxCode::ENotFound,
tonic::Code::AlreadyExists => InfluxCode::EConflict,
tonic::Code::PermissionDenied => InfluxCode::EUnauthorized,
tonic::Code::ResourceExhausted => InfluxCode::ETooLarge,
tonic::Code::FailedPrecondition => InfluxCode::EInvalid,
tonic::Code::OutOfRange => InfluxCode::EInvalid,
tonic::Code::Unimplemented => InfluxCode::ENotImplemented,
tonic::Code::Unavailable => InfluxCode::EUnavailable,
_ => InfluxCode::EInternal,
}
}
}
/// Add IOx specific headers to the response
///
/// storage-type: iox (needed so IDPE can show the errors to users)
/// see <https://github.com/influxdata/conductor/issues/1208>
fn add_headers(metadata: &mut MetadataMap) {
// Note we can't use capital letters otherwise the http header
// library asserts, so return lowercase storage-type
metadata.insert("storage-type", "iox".parse().unwrap());
}
/// Implements the protobuf defined Storage service for a [`QueryNamespaceProvider`]
#[tonic::async_trait]
impl<T> Storage for StorageService<T>
where
T: QueryNamespaceProvider + 'static,
{
type ReadFilterStream =
StreamWithPermit<QueryCompletedTokenStream<ChunkReadResponses, ReadResponse, Status>>;
async fn read_filter(
&self,
req: tonic::Request<ReadFilterRequest>,
) -> Result<Response<Self::ReadFilterStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
info!(
%db_name,
?req.range,
predicate=%req.predicate.loggable(),
trace=%external_span_ctx.format_jaeger(),
"read filter",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token = db.record_query(&ctx, "read_filter", defer_json(&req));
let frames = read_filter_impl(Arc::clone(&db), db_name, req, &ctx)
.await?
.map_err(|e| e.into_status());
make_response(
ChunkReadResponses::new(frames, MAX_READ_RESPONSE_SIZE),
query_completed_token,
permit,
)
}
type ReadGroupStream =
StreamWithPermit<QueryCompletedTokenStream<ChunkReadResponses, ReadResponse, Status>>;
async fn read_group(
&self,
req: tonic::Request<ReadGroupRequest>,
) -> Result<Response<Self::ReadGroupStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
info!(
%db_name,
?req.range,
?req.group_keys,
?req.group,
?req.aggregate,
predicate=%req.predicate.loggable(),
trace=%external_span_ctx.format_jaeger(),
"read_group",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token = db.record_query(&ctx, "read_group", defer_json(&req));
let ReadGroupRequest {
read_source: _read_source,
range,
predicate,
group_keys,
group,
aggregate,
} = req;
let aggregate_string =
format!("aggregate: {aggregate:?}, group: {group:?}, group_keys: {group_keys:?}");
let group = expr::convert_group_type(group).context(ConvertingReadGroupTypeSnafu {
aggregate_string: &aggregate_string,
})?;
let gby_agg = expr::make_read_group_aggregate(aggregate, group, group_keys)
.context(ConvertingReadGroupAggregateSnafu { aggregate_string })?;
let frames = query_group_impl(
Arc::clone(&db),
db_name,
range,
predicate,
gby_agg,
TagKeyMetaNames::Text,
&ctx,
)
.await
.map_err(|e| e.into_status())?
.map_err(|e| e.into_status());
make_response(
ChunkReadResponses::new(frames, MAX_READ_RESPONSE_SIZE),
query_completed_token,
permit,
)
}
type ReadWindowAggregateStream =
StreamWithPermit<QueryCompletedTokenStream<ChunkReadResponses, ReadResponse, Status>>;
async fn read_window_aggregate(
&self,
req: tonic::Request<ReadWindowAggregateRequest>,
) -> Result<Response<Self::ReadGroupStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
info!(
%db_name,
?req.range,
?req.window_every,
?req.offset,
?req.aggregate,
?req.window,
predicate=%req.predicate.loggable(),
trace=%external_span_ctx.format_jaeger(),
"read_window_aggregate",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token =
db.record_query(&ctx, "read_window_aggregate", defer_json(&req));
let ReadWindowAggregateRequest {
read_source: _read_source,
range,
predicate,
window_every,
offset,
aggregate,
window,
tag_key_meta_names,
} = req;
let aggregate_string = format!(
"aggregate: {aggregate:?}, window_every: {window_every:?}, offset: {offset:?}, window: {window:?}"
);
let gby_agg = expr::make_read_window_aggregate(aggregate, window_every, offset, window)
.context(ConvertingWindowAggregateSnafu { aggregate_string })?;
let frames = query_group_impl(
Arc::clone(&db),
db_name,
range,
predicate,
gby_agg,
TagKeyMetaNames::from_i32(tag_key_meta_names).unwrap_or_default(),
&ctx,
)
.await
.map_err(|e| e.into_status())?
.map_err(|e| e.into_status());
make_response(
ChunkReadResponses::new(frames, MAX_READ_RESPONSE_SIZE),
query_completed_token,
permit,
)
}
type TagKeysStream = StreamWithPermit<
QueryCompletedTokenStream<
BoxStream<'static, Result<StringValuesResponse, Status>>,
StringValuesResponse,
Status,
>,
>;
async fn tag_keys(
&self,
req: tonic::Request<TagKeysRequest>,
) -> Result<Response<Self::TagKeysStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
info!(
%db_name,
?req.range,
predicate=%req.predicate.loggable(),
trace=%external_span_ctx.format_jaeger(),
"tag_keys",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token = db.record_query(&ctx, "tag_keys", defer_json(&req));
let TagKeysRequest {
tags_source: _tag_source,
range,
predicate,
} = req;
let measurement = None;
let response = tag_keys_impl(
Arc::clone(&db),
db_name,
measurement,
range,
predicate,
&ctx,
)
.await
.map_err(|e| e.into_status());
make_response(
futures::stream::once(async move { response }).boxed(),
query_completed_token,
permit,
)
}
type TagValuesStream = StreamWithPermit<
QueryCompletedTokenStream<
BoxStream<'static, Result<StringValuesResponse, Status>>,
StringValuesResponse,
Status,
>,
>;
async fn tag_values(
&self,
req: tonic::Request<TagValuesRequest>,
) -> Result<Response<Self::TagValuesStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
let tag_key = DecodedTagKey::try_from(req.tag_key.clone())
.context(ConvertingTagKeyInTagValuesSnafu)?;
info!(
%db_name,
?req.range,
%tag_key,
predicate=%req.predicate.loggable(),
trace=%external_span_ctx.format_jaeger(),
"tag_values",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token = db.record_query(&ctx, "tag_values", defer_json(&req));
let TagValuesRequest {
tags_source: _tag_source,
range,
predicate,
..
} = req;
let measurement = None;
// Special case a request for 'tag_key=_measurement" means to list all
// measurements
let response = match tag_key {
DecodedTagKey::Measurement => {
if predicate.is_some() {
return Err(Error::NotYetImplemented {
operation: "tag_value for a measurement, with general predicate"
.to_string(),
}
.into_status());
}
measurement_name_impl(Arc::clone(&db), db_name, range, predicate, &ctx).await
}
DecodedTagKey::Field => {
let fieldlist =
field_names_impl(Arc::clone(&db), db_name, None, range, predicate, &ctx)
.await?;
// Pick out the field names into a Vec<Vec<u8>>for return
let values = fieldlist
.fields
.into_iter()
.map(|f| f.name.bytes().collect())
.collect::<Vec<_>>();
Ok(StringValuesResponse { values })
}
DecodedTagKey::Normal(tag_key) => {
tag_values_impl(
Arc::clone(&db),
db_name,
tag_key,
measurement,
range,
predicate,
&ctx,
)
.await
}
};
let response = response.map_err(|e| e.into_status());
make_response(
futures::stream::once(async move { response }).boxed(),
query_completed_token,
permit,
)
}
type TagValuesGroupedByMeasurementAndTagKeyStream = StreamWithPermit<
QueryCompletedTokenStream<
futures::stream::Iter<std::vec::IntoIter<Result<TagValuesResponse, Status>>>,
TagValuesResponse,
Status,
>,
>;
async fn tag_values_grouped_by_measurement_and_tag_key(
&self,
req: tonic::Request<TagValuesGroupedByMeasurementAndTagKeyRequest>,
) -> Result<Response<Self::TagValuesGroupedByMeasurementAndTagKeyStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
info!(
%db_name,
?req.measurement_patterns,
?req.tag_key_predicate,
predicate=%req.condition.loggable(),
trace=%external_span_ctx.format_jaeger(),
"tag_values_grouped_by_measurement_and_tag_key",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token = db.record_query(
&ctx,
"tag_values_grouped_by_measurement_and_tag_key",
defer_json(&req),
);
let results =
tag_values_grouped_by_measurement_and_tag_key_impl(Arc::clone(&db), db_name, req, &ctx)
.await
.map_err(|e| e.into_status())?
.into_iter()
.map(Ok)
.collect::<Vec<_>>();
make_response(
futures::stream::iter(results),
query_completed_token,
permit,
)
}
type ReadSeriesCardinalityStream = ReceiverStream<Result<Int64ValuesResponse, Status>>;
async fn read_series_cardinality(
&self,
_req: tonic::Request<ReadSeriesCardinalityRequest>,
) -> Result<Response<Self::ReadSeriesCardinalityStream>, Status> {
unimplemented!("read_series_cardinality not yet implemented. https://github.com/influxdata/influxdb_iox/issues/447");
}
async fn capabilities(
&self,
_req: tonic::Request<Empty>,
) -> Result<Response<CapabilitiesResponse>, Status> {
// Full list of go capabilities in
// idpe/storage/read/capabilities.go (aka window aggregate /
// pushdown)
//
info!("capabilities");
// For now, hard code our list of support
let caps = [
("KeySortCapability", vec!["ReadFilter"]),
("Group", vec!["First", "Last", "Min", "Max"]),
(
"TagKeyMetaNamesCapability",
vec!["TagKeyMetaNamesWindowAggregate"],
),
(
"WindowAggregate",
vec![
"Count", "Sum", // "First"
// "Last",
"Min", "Max", "Mean",
// "Offset"
],
),
];
// Turn it into the HashMap -> Capabiltity
let caps = caps
.iter()
.map(|(cap_name, features)| {
let features = features.iter().map(|f| f.to_string()).collect::<Vec<_>>();
(cap_name.to_string(), Capability { features })
})
.collect::<HashMap<String, Capability>>();
let caps = CapabilitiesResponse { caps };
Ok(Response::new(caps))
}
type MeasurementNamesStream = StreamWithPermit<
QueryCompletedTokenStream<
BoxStream<'static, Result<StringValuesResponse, Status>>,
StringValuesResponse,
Status,
>,
>;
async fn measurement_names(
&self,
req: tonic::Request<MeasurementNamesRequest>,
) -> Result<Response<Self::MeasurementNamesStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
info!(
%db_name,
?req.range,
predicate=%req.predicate.loggable(),
trace=%external_span_ctx.format_jaeger(),
"measurement_names",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token = db.record_query(&ctx, "measurement_names", defer_json(&req));
let MeasurementNamesRequest {
source: _source,
range,
predicate,
} = req;
let response = measurement_name_impl(Arc::clone(&db), db_name, range, predicate, &ctx)
.await
.map_err(|e| e.into_status());
make_response(
futures::stream::once(async move { response }).boxed(),
query_completed_token,
permit,
)
}
type MeasurementTagKeysStream = StreamWithPermit<
QueryCompletedTokenStream<
BoxStream<'static, Result<StringValuesResponse, Status>>,
StringValuesResponse,
Status,
>,
>;
async fn measurement_tag_keys(
&self,
req: tonic::Request<MeasurementTagKeysRequest>,
) -> Result<Response<Self::MeasurementTagKeysStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
info!(
%db_name,
?req.range,
%req.measurement,
predicate=%req.predicate.loggable(),
trace=%external_span_ctx.format_jaeger(),
"measurement_tag_keys",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token = db.record_query(&ctx, "measurement_tag_keys", defer_json(&req));
let MeasurementTagKeysRequest {
source: _source,
measurement,
range,
predicate,
} = req;
let measurement = Some(measurement);
let response = tag_keys_impl(
Arc::clone(&db),
db_name,
measurement,
range,
predicate,
&ctx,
)
.await
.map_err(|e| e.into_status());
make_response(
futures::stream::once(async move { response }).boxed(),
query_completed_token,
permit,
)
}
type MeasurementTagValuesStream = StreamWithPermit<
QueryCompletedTokenStream<
BoxStream<'static, Result<StringValuesResponse, Status>>,
StringValuesResponse,
Status,
>,
>;
async fn measurement_tag_values(
&self,
req: tonic::Request<MeasurementTagValuesRequest>,
) -> Result<Response<Self::MeasurementTagValuesStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
info!(
%db_name,
?req.range,
%req.measurement,
%req.tag_key,
predicate=%req.predicate.loggable(),
trace=%external_span_ctx.format_jaeger(),
"measurement_tag_values",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token =
db.record_query(&ctx, "measurement_tag_values", defer_json(&req));
let MeasurementTagValuesRequest {
source: _source,
measurement,
range,
predicate,
tag_key,
} = req;
let measurement = Some(measurement);
let response = tag_values_impl(
Arc::clone(&db),
db_name,
tag_key,
measurement,
range,
predicate,
&ctx,
)
.await
.map_err(|e| e.into_status());
make_response(
futures::stream::once(async move { response }).boxed(),
query_completed_token,
permit,
)
}
type MeasurementFieldsStream = StreamWithPermit<
QueryCompletedTokenStream<
BoxStream<'static, Result<MeasurementFieldsResponse, Status>>,
MeasurementFieldsResponse,
Status,
>,
>;
async fn measurement_fields(
&self,
req: tonic::Request<MeasurementFieldsRequest>,
) -> Result<Response<Self::MeasurementFieldsStream>, Status> {
let external_span_ctx: Option<RequestLogContext> = req.extensions().get().cloned();
let span_ctx: Option<SpanContext> = req.extensions().get().cloned();
let req = req.into_inner();
let permit = self
.db_store
.acquire_semaphore(span_ctx.child_span("query rate limit semaphore"))
.await;
let db_name = get_namespace_name(&req)?;
info!(
%db_name,
?req.range,
%req.measurement,
predicate=%req.predicate.loggable(),
trace=%external_span_ctx.format_jaeger(),
"measurement_fields",
);
let db = self
.db_store
.db(&db_name, span_ctx.child_span("get namespace"), false)
.await
.context(NamespaceNotFoundSnafu { db_name: &db_name })?;
let ctx = db.new_query_context(span_ctx);
let query_completed_token = db.record_query(&ctx, "measurement_fields", defer_json(&req));
let MeasurementFieldsRequest {
source: _source,
measurement,
range,
predicate,
} = req;
let measurement = Some(measurement);
let response = field_names_impl(
Arc::clone(&db),
db_name,
measurement,
range,
predicate,
&ctx,
)
.await
.map(|fieldlist| {
fieldlist_to_measurement_fields_response(fieldlist)
.context(ConvertingFieldListSnafu)
.map_err(|e| e.into_status())
})
.map_err(|e| e.into_status())?;
make_response(
futures::stream::once(async move { response }).boxed(),
query_completed_token,
permit,
)
}
async fn offsets(
&self,
_req: tonic::Request<Empty>,
) -> Result<Response<OffsetsResponse>, Status> {
// We present ourselves to the rest of IDPE as a single storage node with 1 partition.
// (Returning offset 1 just in case offset 0 is interpreted by query nodes as being special)
let the_partition = PartitionOffsetResponse { id: 0, offset: 1 };
Ok(Response::new(OffsetsResponse {
partitions: vec![the_partition],
}))
}
}
fn get_namespace_name(input: &impl GrpcInputs) -> Result<NamespaceName<'static>, Status> {
NamespaceName::from_org_and_bucket(input.org_id()?.to_string(), input.bucket_name()?)
.map_err(|e| Status::internal(e.to_string()))
}
// The following code implements the business logic of the requests as
// methods that return Results with module specific Errors (and thus
// can use ?, etc). The trait implementations then handle mapping
// to the appropriate tonic Status
/// Gathers all measurement names that have data in the specified
/// (optional) range
async fn measurement_name_impl<N>(
db: Arc<N>,
db_name: NamespaceName<'static>,
range: Option<TimestampRange>,
rpc_predicate: Option<Predicate>,
ctx: &IOxSessionContext,
) -> Result<StringValuesResponse>
where
N: QueryNamespace + 'static,
{
let rpc_predicate_string = format!("{rpc_predicate:?}");
let db_name = db_name.as_str();
let predicate = InfluxRpcPredicateBuilder::default()
.set_range(range)
.rpc_predicate(rpc_predicate)
.context(ConvertingPredicateSnafu {
rpc_predicate_string,
})?
.build();
let plan = Planner::new(ctx)
.table_names(db, predicate)
.await
.context(ListingTablesSnafu { db_name })?;
let table_names = ctx
.to_string_set(plan)
.await
.context(ListingTablesSnafu { db_name })?;
// Map the resulting collection of Strings into a Vec<Vec<u8>>for return
let values: Vec<Vec<u8>> = table_names
.iter()
.map(|name| name.bytes().collect())
.collect();
trace!(measurement_names=?values.iter().map(|k| String::from_utf8_lossy(k)).collect::<Vec<_>>(), "Measurement names response");
Ok(StringValuesResponse { values })
}
/// Return tag keys with optional measurement, timestamp and arbitrary
/// predicates
async fn tag_keys_impl<N>(
db: Arc<N>,
db_name: NamespaceName<'static>,
measurement: Option<String>,
range: Option<TimestampRange>,
rpc_predicate: Option<Predicate>,
ctx: &IOxSessionContext,
) -> Result<StringValuesResponse>
where
N: QueryNamespace + 'static,
{
let rpc_predicate_string = format!("{rpc_predicate:?}");
let db_name = db_name.as_str();
let predicate = InfluxRpcPredicateBuilder::default()
.set_range(range)
.table_option(measurement.clone())
.context(SettingPredicateTableSnafu { table: measurement })?
.rpc_predicate(rpc_predicate)
.context(ConvertingPredicateSnafu {
rpc_predicate_string,
})?
.build();
let tag_key_plan = Planner::new(ctx)
.tag_keys(db, predicate)
.await
.context(ListingColumnsSnafu { db_name })?;
let tag_keys = ctx
.to_string_set(tag_key_plan)
.await
.context(ListingColumnsSnafu { db_name })?;
// Map the resulting collection of Strings into a Vec<Vec<u8>>for return
let values = tag_keys_to_byte_vecs(tag_keys);
trace!(tag_keys=?values.iter().map(|k| String::from_utf8_lossy(k)).collect::<Vec<_>>(), "Tag keys response");
Ok(StringValuesResponse { values })
}
/// Return tag values for tag_name, with optional measurement, timestamp and
/// arbitratry predicates
async fn tag_values_impl<N>(
db: Arc<N>,
db_name: NamespaceName<'static>,
tag_name: String,
measurement: Option<String>,
range: Option<TimestampRange>,
rpc_predicate: Option<Predicate>,
ctx: &IOxSessionContext,
) -> Result<StringValuesResponse>
where
N: QueryNamespace + 'static,
{
let rpc_predicate_string = format!("{rpc_predicate:?}");
let predicate = InfluxRpcPredicateBuilder::default()
.set_range(range)
.table_option(measurement.clone())
.context(SettingPredicateTableSnafu { table: measurement })?
.rpc_predicate(rpc_predicate)
.context(ConvertingPredicateSnafu {
rpc_predicate_string,
})?
.build();
let db_name = db_name.as_str();
let tag_name = &tag_name;
let tag_value_plan = Planner::new(ctx)
.tag_values(db, tag_name, predicate)
.await
.context(ListingTagValuesSnafu { db_name, tag_name })?;
let tag_values = ctx
.to_string_set(tag_value_plan)
.await
.context(ListingTagValuesSnafu { db_name, tag_name })?;
// Map the resulting collection of Strings into a Vec<Vec<u8>>for return
let values: Vec<Vec<u8>> = tag_values
.iter()
.map(|name| name.bytes().collect())
.collect();
trace!(tag_values=?values.iter().map(|k| String::from_utf8_lossy(k)).collect::<Vec<_>>(), "Tag values response");
Ok(StringValuesResponse { values })
}
/// Return tag values grouped by one or more measurements with optional
/// filtering predicate and optionally scoped to one or more tag keys.
async fn tag_values_grouped_by_measurement_and_tag_key_impl<N>(
db: Arc<N>,
db_name: NamespaceName<'static>,
req: TagValuesGroupedByMeasurementAndTagKeyRequest,
ctx: &IOxSessionContext,
) -> Result<Vec<TagValuesResponse>, Error>
where
N: QueryNamespace + 'static,
{
// Extract the tag key predicate.
// See https://docs.influxdata.com/influxdb/v1.8/query_language/explore-schema/#show-tag-values
// for more details.
let tag_key_pred = req
.tag_key_predicate
.context(MissingTagKeyPredicateSnafu {})?
.value
.context(MissingTagKeyPredicateSnafu {})?;
// Because we need to return tag values grouped by measurements and tag
// keys we will materialise the measurements up front, so we can build up
// groups of tag values grouped by a measurement and tag key.
let measurements = materialise_measurement_names(
Arc::clone(&db),
db_name.clone(),
req.measurement_patterns,
ctx,
)
.await?;
let mut responses = vec![];
for name in measurements.into_iter() {
let tag_keys = materialise_tag_keys(
Arc::clone(&db),
db_name.clone(),
name.clone(),
tag_key_pred.clone(),
ctx,
)
.await?;
for key in tag_keys {
// get all the tag values associated with this measurement name and tag key
let values = tag_values_impl(
Arc::clone(&db),
db_name.clone(),
key.clone(),
Some(name.clone()),
None,
req.condition.clone(),
ctx,
)
.await?
.values
.into_iter()
.map(|v| String::from_utf8(v).expect("tag values should be UTF-8 valid"))
.collect::<Vec<_>>();
// Don't emit a response if there are no matching tag values.
if values.is_empty() {
continue;
}
responses.push(TagValuesResponse {
measurement: name.clone(),
key: key.clone(),
values,
});
}
}
Ok(responses)
}
/// Launch async tasks that materialises the result of executing read_filter.
async fn read_filter_impl<N>(
db: Arc<N>,
db_name: NamespaceName<'static>,
req: ReadFilterRequest,
ctx: &IOxSessionContext,
) -> Result<impl Stream<Item = Result<Frame, Error>>, Error>
where
N: QueryNamespace + 'static,
{
let db_name = db_name.as_str();
let rpc_predicate_string = format!("{:?}", req.predicate);
let predicate = InfluxRpcPredicateBuilder::default()
.set_range(req.range)
.rpc_predicate(req.predicate)
.context(ConvertingPredicateSnafu {
rpc_predicate_string,
})?
.build();
// PERF - This used to send responses to the client before execution had
// completed, but now it doesn't. We may need to revisit this in the future
// if big queries are causing a significant latency in TTFB.
// Build the plans
let series_plan = Planner::new(ctx)
.read_filter(db, predicate)
.await
.context(PlanningFilteringSeriesSnafu { db_name })?;
// Execute the plans.
let db_name = db_name.to_owned();
let series_or_groups = ctx
.to_series_and_groups(
series_plan,
Arc::clone(&ctx.inner().runtime_env().memory_pool),
MAX_POINTS_PER_FRAME,
)
.await
.context(FilteringSeriesSnafu {
db_name: db_name.clone(),
})
.log_if_error("Running series set plan")?
.map_err(move |e| Error::FilteringSeries {
db_name: db_name.clone(),
source: e,
});
let emit_tag_keys_binary_format = req.tag_key_meta_names == TagKeyMetaNames::Binary as i32;
Ok(series_or_groups_to_frames(
series_or_groups,
emit_tag_keys_binary_format,
))
}
/// Launch async tasks that send the result of executing read_group to `tx`
async fn query_group_impl<N>(
db: Arc<N>,
db_name: NamespaceName<'static>,
range: Option<TimestampRange>,
rpc_predicate: Option<Predicate>,
gby_agg: GroupByAndAggregate,
tag_key_meta_names: TagKeyMetaNames,
ctx: &IOxSessionContext,
) -> Result<impl Stream<Item = Result<Frame, Error>>>
where
N: QueryNamespace + 'static,
{
let db_name = db_name.as_str();
let rpc_predicate_string = format!("{rpc_predicate:?}");
let predicate = InfluxRpcPredicateBuilder::default()
.set_range(range)
.rpc_predicate(rpc_predicate)
.context(ConvertingPredicateSnafu {
rpc_predicate_string,
})?
.build();
let planner = Planner::new(ctx);
let grouped_series_set_plan = match gby_agg {
GroupByAndAggregate::Columns { agg, group_columns } => {
planner.read_group(db, predicate, agg, group_columns).await
}
GroupByAndAggregate::Window { agg, every, offset } => {
planner
.read_window_aggregate(db, predicate, agg, every, offset)
.await
}
};
let grouped_series_set_plan =
grouped_series_set_plan.context(PlanningGroupSeriesSnafu { db_name })?;
// PERF - This used to send responses to the client before execution had
// completed, but now it doesn't. We may need to revisit this in the future
// if big queries are causing a significant latency in TTFB.
// Execute the plans
let db_name = db_name.to_owned();
let series_or_groups = ctx
.to_series_and_groups(
grouped_series_set_plan,
Arc::clone(&ctx.inner().runtime_env().memory_pool),
MAX_POINTS_PER_FRAME,
)
.await
.context(GroupingSeriesSnafu {
db_name: db_name.clone(),
})
.log_if_error("Running Grouped SeriesSet Plan")?
.map_err(move |e| Error::FilteringSeries {
db_name: db_name.clone(),
source: e,
});
let tag_key_binary_format = tag_key_meta_names == TagKeyMetaNames::Binary;
Ok(series_or_groups_to_frames(
series_or_groups,
tag_key_binary_format,
))
}
/// Return field names, restricted via optional measurement, timestamp and
/// predicate
async fn field_names_impl<N>(
db: Arc<N>,
db_name: NamespaceName<'static>,
measurement: Option<String>,
range: Option<TimestampRange>,
rpc_predicate: Option<Predicate>,
ctx: &IOxSessionContext,
) -> Result<FieldList>
where
N: QueryNamespace + 'static,
{
let rpc_predicate_string = format!("{rpc_predicate:?}");
let predicate = InfluxRpcPredicateBuilder::default()
.set_range(range)
.table_option(measurement.clone())
.context(SettingPredicateTableSnafu { table: measurement })?
.rpc_predicate(rpc_predicate)
.context(ConvertingPredicateSnafu {
rpc_predicate_string,
})?
.build();
let db_name = db_name.as_str();
let field_list_plan = Planner::new(ctx)
.field_columns(db, predicate)
.await
.context(ListingFieldsSnafu { db_name })?;
let field_list = ctx
.to_field_list(field_list_plan)
.await
.context(ListingFieldsSnafu { db_name })?;
trace!(field_names=?field_list, "Field names response");
Ok(field_list)
}
/// Materialises a collection of measurement names. Typically used as part of
/// a plan to scope and group multiple plans by measurement name.
async fn materialise_measurement_names<N>(
db: Arc<N>,
db_name: NamespaceName<'static>,
measurement_exprs: Vec<LiteralOrRegex>,
ctx: &IOxSessionContext,
) -> Result<BTreeSet<String>, Error>
where
N: QueryNamespace + 'static,
{
use generated_types::{
node::{Comparison, Type, Value},
Node,
};
let mut names = BTreeSet::new();
// Materialise all measurements
if measurement_exprs.is_empty() {
let resp = measurement_name_impl(Arc::clone(&db), db_name.clone(), None, None, ctx).await?;
for name in resp.values {
names
.insert(String::from_utf8(name).expect("table/measurement name to be valid UTF-8"));
}
return Ok(names);
}
// Materialise measurements that satisfy the provided predicates.
for expr in measurement_exprs {
match expr.value {
Some(expr) => match expr {
RegexOrLiteralValue::LiteralValue(lit) => {
names.insert(lit);
}
RegexOrLiteralValue::RegexValue(pattern) => {
let regex_node = Node {
node_type: Type::ComparisonExpression as i32,
children: vec![
Node {
node_type: Type::TagRef as i32,
children: vec![],
value: Some(Value::TagRefValue(TAG_KEY_MEASUREMENT.to_vec())),
},
Node {
node_type: Type::Literal as i32,
children: vec![],
value: Some(Value::RegexValue(pattern)),
},
],
value: Some(Value::Comparison(Comparison::Regex as i32)),
};
let resp = measurement_name_impl(
Arc::clone(&db),
db_name.clone(),
None,
Some(Predicate {
root: Some(regex_node),
}),
ctx,
)
.await?;
for name in resp.values {
names.insert(
String::from_utf8(name)
.expect("table/measurement name to be valid UTF-8"),
);
}
}
},
None => return MeasurementLiteralOrRegexSnafu { pred: expr }.fail(),
}
}
Ok(names)
}
/// Materialises a collection of tag keys for a given measurement.
///
/// TODO(edd): this might be better represented as a plan against the `columns`
/// system table.
async fn materialise_tag_keys<N>(
db: Arc<N>,
db_name: NamespaceName<'static>,
measurement_name: String,
tag_key_predicate: tag_key_predicate::Value,
ctx: &IOxSessionContext,
) -> Result<BTreeSet<String>, Error>
where
N: QueryNamespace + 'static,
{
use generated_types::tag_key_predicate::Value;
if let Value::Eq(elem) = tag_key_predicate {
// If the predicate is a simple literal match then return that value
// regardless of whether or not the tag key exists.
return Ok(vec![elem].into_iter().collect::<BTreeSet<_>>());
} else if let Value::In(elem) = tag_key_predicate {
// If the predicate is a list of literal matches then return those
// regardless of whether or not those tag keys exist.
return Ok(elem.vals.into_iter().collect::<BTreeSet<_>>());
}
// Otherwise materialise the tag keys for this measurement and filter out
// any that don't pass the provided tag key predicate.
let mut tag_keys = tag_keys_impl(
Arc::clone(&db),
db_name.clone(),
Some(measurement_name),
None,
None,
ctx,
)
.await?
.values
.into_iter()
.filter_map(|v| match v.as_slice() {
// The tag_keys plan will yield the special measurement and field tag keys
// which are not real tag keys. Filter them out.
TAG_KEY_MEASUREMENT | TAG_KEY_FIELD => None,
_ => Some(String::from_utf8(v).expect("tag keys should be UTF-8 valid")),
})
.collect::<BTreeSet<_>>();
// Filter out tag keys according to the type of expression provided.
match tag_key_predicate {
Value::Neq(value) => tag_keys.retain(|elem| elem != &value),
Value::EqRegex(pattern) => {
let re = regex::Regex::new(&pattern).context(InvalidTagKeyRegexSnafu)?;
tag_keys.retain(|elem| re.is_match(elem));
}
Value::NeqRegex(pattern) => {
let re = regex::Regex::new(&pattern).context(InvalidTagKeyRegexSnafu)?;
tag_keys.retain(|elem| !re.is_match(elem));
}
x => unreachable!("predicate should have been handled already {:?}", x),
}
Ok(tag_keys)
}
/// Return something which can be formatted as json ("pbjson"
/// specifically)
fn defer_json<S>(s: &S) -> QueryText
where
S: serde::Serialize + Send + Sync + Clone + 'static,
{
/// Defers conversion into a String
struct DeferredToJson<S>
where
S: serde::Serialize,
{
s: S,
}
impl<S: serde::Serialize> std::fmt::Display for DeferredToJson<S> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// This buffering is unfortunate but `Formatter` doesn't implement `std::io::Write`
match serde_json::to_string_pretty(&self.s) {
Ok(s) => f.write_str(&s),
Err(e) => write!(f, "error formatting: {e}"),
}
}
}
Box::new(DeferredToJson { s: s.clone() })
}
/// Add ability for Results to log error messages via `error!` logs.
/// This is useful when using async tasks that may not have any code
/// checking their return values.
pub trait ErrorLogger {
/// Log the contents of self with a string of context. The context
/// should appear in a message such as
///
/// "Error `<context>`: `<formatted error message>`
fn log_if_error(self, context: &str) -> Self;
/// Provided method to log an error via the `error!` macro
fn log_error<E: std::fmt::Debug>(context: &str, e: E) {
error!("Error {}: {:?}", context, e);
}
}
/// Implement logging for all results
impl<T, E: std::fmt::Debug> ErrorLogger for Result<T, E> {
fn log_if_error(self, context: &str) -> Self {
if let Err(e) = &self {
Self::log_error(context, e);
}
self
}
}
/// Return the stream of results as a gRPC (tonic) response
#[allow(clippy::type_complexity)]
pub fn make_response<S, T, E>(
stream: S,
token: QueryCompletedToken,
permit: InstrumentedAsyncOwnedSemaphorePermit,
) -> Result<Response<StreamWithPermit<QueryCompletedTokenStream<S, T, E>>>, Status>
where
S: Stream<Item = Result<T, E>> + Unpin + Send,
{
let mut response = Response::new(StreamWithPermit::new(
QueryCompletedTokenStream::new(stream, token),
permit,
));
add_headers(response.metadata_mut());
Ok(response)
}
#[cfg(test)]
mod tests {
use crate::test_util::Fixture;
use super::*;
use futures::Future;
use generated_types::{google::rpc::Status as GrpcStatus, tag_key_predicate::Value};
use influxdb_storage_client::{generated_types::*, Client as StorageClient, OrgAndBucket};
use iox_query::test::TestChunk;
use metric::{Attributes, Metric, U64Counter, U64Gauge};
use service_common::test_util::TestDatabaseStore;
use std::{any::Any, num::NonZeroU64, sync::Arc};
use test_helpers::{assert_contains, maybe_start_logging};
use tokio::pin;
fn to_str_vec(s: &[&str]) -> Vec<String> {
s.iter().map(|s| s.to_string()).collect()
}
// Helper function to assert that metric tracking all gRPC requests has
// correctly updated.
fn grpc_request_metric_has_count(
fixture: &Fixture,
path: &'static str,
status: &'static str,
expected: u64,
) {
let metrics = fixture
.test_storage
.metric_registry
.get_instrument::<Metric<U64Counter>>("grpc_requests")
.unwrap();
let observation = metrics
.get_observer(&Attributes::from([
(
"path",
format!("/influxdata.platform.storage.Storage/{path}").into(),
),
("status", status.into()),
]))
.unwrap()
.fetch();
assert_eq!(
observation, expected,
"\n\npath: {path}\nstatus:{status}\nobservation:{observation}\nexpected:{expected}\n\nAll metrics:\n\n{metrics:#?}"
);
}
#[tokio::test]
async fn test_storage_rpc_capabilities() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
// Test response from storage server
let mut expected_capabilities: HashMap<String, Vec<String>> = HashMap::new();
expected_capabilities.insert("KeySortCapability".into(), to_str_vec(&["ReadFilter"]));
expected_capabilities.insert(
"TagKeyMetaNamesCapability".into(),
to_str_vec(&["TagKeyMetaNamesWindowAggregate"]),
);
expected_capabilities.insert("Group".into(), to_str_vec(&["First", "Last", "Min", "Max"]));
expected_capabilities.insert(
"WindowAggregate".into(),
to_str_vec(&["Count", "Sum", "Min", "Max", "Mean"]),
);
assert_eq!(
expected_capabilities,
fixture.storage_client.capabilities().await.unwrap()
);
}
fn org_and_bucket() -> OrgAndBucket {
OrgAndBucket::new(NonZeroU64::new(123).unwrap(), NonZeroU64::new(456).unwrap())
}
#[tokio::test]
async fn test_storage_rpc_measurement_names() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk0 = TestChunk::new("h2o")
.with_id(0)
.with_tag_column("state")
.with_time_column_with_stats(Some(1000), Some(1000))
.with_one_row_of_data();
let chunk1 = TestChunk::new("o2")
.with_id(1)
.with_tag_column("state")
.with_time_column_with_stats(Some(1000), Some(1000))
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk0))
.add_chunk("my_partition_key", Arc::new(chunk1));
let source = Some(StorageClient::read_source(&db_info, 1));
// --- No timestamps
let request = MeasurementNamesRequest {
source: source.clone(),
range: None,
predicate: None,
};
let actual_measurements = fixture
.storage_client
.measurement_names(request)
.await
.unwrap();
let expected_measurements = to_string_vec(&["h2o", "o2"]);
assert_eq!(actual_measurements, expected_measurements);
// --- Timestamp range
let range = TimestampRange {
start: 900,
end: 1100,
};
let request = MeasurementNamesRequest {
source,
range: Some(range),
predicate: None,
};
let actual_measurements = fixture
.storage_client
.measurement_names(request)
.await
.unwrap();
let expected_measurements = to_string_vec(&["h2o", "o2"]);
assert_eq!(actual_measurements, expected_measurements);
// --- general predicate
let request = MeasurementNamesRequest {
source: Some(StorageClient::read_source(&db_info, 1)),
range: Some(TimestampRange {
start: 900,
end: 1100,
}),
predicate: Some(make_state_eq_ma_predicate()),
};
let actual_measurements = fixture
.storage_client
.measurement_names(request)
.await
.unwrap();
let expected_measurements = to_string_vec(&["h2o", "o2"]);
assert_eq!(actual_measurements, expected_measurements);
grpc_request_metric_has_count(&fixture, "MeasurementNames", "ok", 3);
}
/// test the plumbing of the RPC layer for tag_keys -- specifically that
/// the right parameters are passed into the Namespace interface
/// and that the returned values are sent back via gRPC.
#[tokio::test]
async fn test_storage_rpc_tag_keys() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Note multiple tables / measureemnts:
let chunk0 = TestChunk::new("m1")
.with_id(0)
.with_tag_column("state")
.with_tag_column("k1")
.with_tag_column("k2")
.with_time_column()
.with_one_row_of_data();
let chunk1 = TestChunk::new("m2")
.with_id(1)
.with_tag_column("state")
.with_tag_column("k3")
.with_tag_column("k4")
.with_time_column()
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk0))
.add_chunk("my_partition_key", Arc::new(chunk1));
let source = Some(StorageClient::read_source(&db_info, 1));
let request = TagKeysRequest {
tags_source: source.clone(),
range: Some(make_timestamp_range(950, 1050)),
predicate: Some(make_state_eq_ma_predicate()),
};
let actual_tag_keys = fixture.storage_client.tag_keys(request).await.unwrap();
let expected_tag_keys = vec!["_f(0xff)", "_m(0x00)", "k1", "k2", "k3", "k4", "state"];
assert_eq!(actual_tag_keys, expected_tag_keys,);
grpc_request_metric_has_count(&fixture, "TagKeys", "ok", 1);
}
#[tokio::test]
async fn test_storage_rpc_tag_keys_error() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("my_table").with_error("Sugar we are going down");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test error
// ---
let request = TagKeysRequest {
tags_source: source.clone(),
// add some time range or predicate, otherwise we do a pure metadata lookup
range: Some(make_timestamp_range(150, 200)),
predicate: None,
};
let response = fixture.storage_client.tag_keys(request).await;
assert_contains!(response.unwrap_err().to_string(), "Sugar we are going down");
grpc_request_metric_has_count(&fixture, "TagKeys", "server_error", 1);
}
/// test the plumbing of the RPC layer for measurement_tag_keys--
/// specifically that the right parameters are passed into the Namespace
/// interface and that the returned values are sent back via gRPC.
#[tokio::test]
async fn test_storage_rpc_measurement_tag_keys() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk0 = TestChunk::new("m1")
// predicate specifies m4, so this is filtered out
.with_tag_column("k0")
.with_time_column()
.with_one_row_of_data();
let chunk1 = TestChunk::new("m4")
.with_tag_column("state")
.with_tag_column("k1")
.with_tag_column("k2")
.with_tag_column("k3")
.with_tag_column("k4")
.with_time_column()
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk0))
.add_chunk("my_partition_key", Arc::new(chunk1));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// Timestamp + Predicate
// ---
let request = MeasurementTagKeysRequest {
measurement: "m4".into(),
source: source.clone(),
range: Some(make_timestamp_range(950, 1050)),
predicate: Some(make_state_eq_ma_predicate()),
};
let actual_tag_keys = fixture
.storage_client
.measurement_tag_keys(request)
.await
.unwrap();
let expected_tag_keys = vec!["_f(0xff)", "_m(0x00)", "k1", "k2", "k3", "k4", "state"];
assert_eq!(
actual_tag_keys, expected_tag_keys,
"unexpected tag keys while getting column names"
);
grpc_request_metric_has_count(&fixture, "MeasurementTagKeys", "ok", 1);
}
#[tokio::test]
async fn test_storage_rpc_measurement_tag_keys_error() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// predicate specifies m5
let chunk = TestChunk::new("m5").with_error("This is an error");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test error
// ---
let request = MeasurementTagKeysRequest {
measurement: "m5".into(),
source: source.clone(),
// add some time range or predicate, otherwise we do a pure metadata lookup
range: Some(make_timestamp_range(150, 200)),
predicate: None,
};
let response = fixture.storage_client.measurement_tag_keys(request).await;
assert_contains!(response.unwrap_err().to_string(), "This is an error");
grpc_request_metric_has_count(&fixture, "MeasurementTagKeys", "server_error", 1);
}
/// test the plumbing of the RPC layer for tag_values -- specifically that
/// the right parameters are passed into the Namespace interface
/// and that the returned values are sent back via gRPC.
#[tokio::test]
async fn test_storage_rpc_tag_values() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
let request = TagValuesRequest {
tags_source: source.clone(),
range: Some(make_timestamp_range(150, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
tag_key: "state".into(),
};
let actual_tag_values = fixture.storage_client.tag_values(request).await.unwrap();
assert_eq!(actual_tag_values, vec!["MA"]);
grpc_request_metric_has_count(&fixture, "TagValues", "ok", 1);
}
/// test the plumbing of the RPC layer for tag_values
///
/// For the special case of
///
/// tag_key = _measurement means listing all measurement names
#[tokio::test]
async fn test_storage_rpc_tag_values_with_measurement() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test tag_key = _measurement means listing all measurement names
// ---
let request = TagValuesRequest {
tags_source: source.clone(),
range: Some(make_timestamp_range(1000, 1500)),
predicate: None,
tag_key: [0].into(),
};
let chunk = TestChunk::new("h2o")
.with_tag_column("tag")
.with_time_column_with_stats(Some(1100), Some(1200))
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let tag_values = vec!["h2o"];
let actual_tag_values = fixture.storage_client.tag_values(request).await.unwrap();
assert_eq!(
actual_tag_values, tag_values,
"unexpected tag values while getting tag values for measurement names"
);
grpc_request_metric_has_count(&fixture, "TagValues", "ok", 1);
}
#[tokio::test]
async fn test_storage_rpc_tag_values_field() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_i64_field_column("Field1")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test tag_key = _field means listing all field names
// ---
let request = TagValuesRequest {
tags_source: source.clone(),
range: Some(make_timestamp_range(0, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
tag_key: [255].into(),
};
let expected_tag_values = vec!["Field1"];
let actual_tag_values = fixture.storage_client.tag_values(request).await.unwrap();
assert_eq!(
actual_tag_values, expected_tag_values,
"unexpected tag values while getting tag values for field names"
);
grpc_request_metric_has_count(&fixture, "TagValues", "ok", 1);
}
#[tokio::test]
async fn test_storage_rpc_tag_values_error() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("my_table")
.with_tag_column("the_tag_key")
.with_error("Sugar we are going down");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test error
// ---
let request = TagValuesRequest {
tags_source: source.clone(),
range: None,
predicate: None,
tag_key: "the_tag_key".into(),
};
let response_string = fixture
.storage_client
.tag_values(request)
.await
.unwrap_err()
.to_string();
assert_contains!(response_string, "Sugar we are going down");
// ---
// test error with non utf8 value
// ---
let request = TagValuesRequest {
tags_source: source.clone(),
range: None,
predicate: None,
tag_key: [0, 255].into(), // this is not a valid UTF-8 string
};
let response_string = fixture
.storage_client
.tag_values(request)
.await
.unwrap_err()
.to_string();
assert_contains!(
response_string,
"Error converting tag_key to UTF-8 in tag_values request"
);
// error from backend error
grpc_request_metric_has_count(&fixture, "TagValues", "server_error", 1);
// error from bad utf8
grpc_request_metric_has_count(&fixture, "TagValues", "client_error", 1);
}
#[tokio::test]
async fn test_storage_rpc_tag_values_grouped_by_measurement_and_tag_key() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk1 = TestChunk::new("table_a")
.with_time_column()
.with_id(0)
.with_tag_column("state")
.with_one_row_of_data();
let chunk2 = TestChunk::new("table_b")
.with_time_column()
.with_id(1)
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk1))
.add_chunk("my_partition_key", Arc::new(chunk2));
let source = Some(StorageClient::read_source(&db_info, 1));
let cases = vec![
(
"SHOW TAG VALUES WITH KEY = 'state'",
vec![],
Some(TagKeyPredicate {
value: Some(Value::Eq("state".into())),
}),
None,
vec![
TagValuesResponse {
measurement: "table_a".into(),
key: "state".into(),
values: vec!["MA".into()],
},
TagValuesResponse {
measurement: "table_b".into(),
key: "state".into(),
values: vec!["MA".into()],
},
],
),
(
"SHOW TAG VALUES FROM 'table_b' WITH KEY = 'state'",
vec![LiteralOrRegex {
value: Some(RegexOrLiteralValue::LiteralValue("table_a".into())),
}],
Some(TagKeyPredicate {
value: Some(Value::Eq("state".into())),
}),
None,
vec![TagValuesResponse {
measurement: "table_a".into(),
key: "state".into(),
values: vec!["MA".into()],
}],
),
(
"SHOW TAG VALUES FROM /table.*/ WITH KEY = 'state'",
vec![LiteralOrRegex {
value: Some(RegexOrLiteralValue::RegexValue("table.*".into())),
}],
Some(TagKeyPredicate {
value: Some(Value::Eq("state".into())),
}),
None,
vec![
TagValuesResponse {
measurement: "table_a".into(),
key: "state".into(),
values: vec!["MA".into()],
},
TagValuesResponse {
measurement: "table_b".into(),
key: "state".into(),
values: vec!["MA".into()],
},
],
),
(
"SHOW TAG VALUES FROM /.*a$/ WITH KEY = 'state'",
vec![LiteralOrRegex {
value: Some(RegexOrLiteralValue::RegexValue(".*a$".into())),
}],
Some(TagKeyPredicate {
value: Some(Value::Eq("state".into())),
}),
None,
vec![TagValuesResponse {
measurement: "table_a".into(),
key: "state".into(),
values: vec!["MA".into()],
}],
),
(
"SHOW TAG VALUES FROM /.*a$/ WITH KEY = 'state' WHERE state != 'MA'",
vec![],
Some(TagKeyPredicate {
value: Some(Value::Eq("state".into())),
}),
Some(make_state_neq_ma_predicate()),
vec![],
),
(
"SHOW TAG VALUES FROM /.*a$/ WITH KEY = 'state' WHERE state >= 'MA'",
vec![],
Some(TagKeyPredicate {
value: Some(Value::Eq("state".into())),
}),
Some(make_state_geq_ma_predicate()),
vec![
TagValuesResponse {
measurement: "table_a".into(),
key: "state".into(),
values: vec!["MA".into()],
},
TagValuesResponse {
measurement: "table_b".into(),
key: "state".into(),
values: vec!["MA".into()],
},
],
),
(
"SHOW TAG VALUES FROM 'table_b' WITH KEY != 'foo'",
vec![LiteralOrRegex {
value: Some(RegexOrLiteralValue::LiteralValue("table_b".into())),
}],
Some(TagKeyPredicate {
value: Some(Value::Neq("foo".into())),
}),
None,
vec![TagValuesResponse {
measurement: "table_b".into(),
key: "state".into(),
values: vec!["MA".into()],
}],
),
(
"SHOW TAG VALUES WITH KEY =~ /sta.*/",
vec![],
Some(TagKeyPredicate {
value: Some(Value::EqRegex("sta.*".into())),
}),
Some(make_state_geq_ma_predicate()),
vec![
TagValuesResponse {
measurement: "table_a".into(),
key: "state".into(),
values: vec!["MA".into()],
},
TagValuesResponse {
measurement: "table_b".into(),
key: "state".into(),
values: vec!["MA".into()],
},
],
),
(
"SHOW TAG VALUES FROM 'table_b' WITH KEY !~ /$ab/",
vec![LiteralOrRegex {
value: Some(RegexOrLiteralValue::LiteralValue("table_b".into())),
}],
Some(TagKeyPredicate {
value: Some(Value::NeqRegex("$ab".into())),
}),
Some(make_state_geq_ma_predicate()),
vec![TagValuesResponse {
measurement: "table_b".into(),
key: "state".into(),
values: vec!["MA".into()],
}],
),
(
"SHOW TAG VALUES FROM 'table_a' WITH KEY in (\"state\", \"foo\")",
vec![LiteralOrRegex {
value: Some(RegexOrLiteralValue::LiteralValue("table_a".into())),
}],
Some(TagKeyPredicate {
value: Some(Value::NeqRegex("$ab".into())),
}),
Some(make_state_geq_ma_predicate()),
vec![TagValuesResponse {
measurement: "table_a".into(),
key: "state".into(),
values: vec!["MA".into()],
}],
),
];
for (
description,
measurement_patterns,
tag_key_predicate,
condition,
expected_tag_values,
) in cases
{
let request = TagValuesGroupedByMeasurementAndTagKeyRequest {
source: source.clone(),
measurement_patterns,
tag_key_predicate,
condition,
};
let actual_tag_values = fixture
.storage_client
.tag_values_grouped_by_measurement_and_tag_key(request)
.await
.unwrap();
assert_eq!(
actual_tag_values, expected_tag_values,
"{description} failed"
);
}
}
#[tokio::test]
async fn test_storage_rpc_tag_values_grouped_by_measurement_and_tag_key_error() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("my_table");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test error
// ---
let request = TagValuesGroupedByMeasurementAndTagKeyRequest {
source: source.clone(),
measurement_patterns: vec![],
tag_key_predicate: None,
condition: None,
};
let response_string = fixture
.storage_client
.tag_values_grouped_by_measurement_and_tag_key(request)
.await
.unwrap_err()
.to_string();
assert_contains!(response_string, "Missing tag key predicate");
}
/// test the plumbing of the RPC layer for measurement_tag_values
#[tokio::test]
async fn test_storage_rpc_measurement_tag_values() {
test_helpers::maybe_start_logging();
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
let request = MeasurementTagValuesRequest {
measurement: "TheMeasurement".into(),
source: source.clone(),
range: Some(make_timestamp_range(150, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
tag_key: "state".into(),
};
let actual_tag_values = fixture
.storage_client
.measurement_tag_values(request)
.await
.unwrap();
assert_eq!(
actual_tag_values,
vec!["MA"],
"unexpected tag values while getting tag values",
);
grpc_request_metric_has_count(&fixture, "MeasurementTagValues", "ok", 1);
}
#[tokio::test]
async fn test_storage_rpc_measurement_tag_values_error() {
test_helpers::maybe_start_logging();
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("m5")
.with_tag_column("the_tag_key")
.with_error("Sugar we are going down");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test error
// ---
let request = MeasurementTagValuesRequest {
measurement: "m5".into(),
source: source.clone(),
range: None,
predicate: None,
tag_key: "the_tag_key".into(),
};
// Note we don't set the column_names on the test namespace, so we expect an
// error
let response_string = fixture
.storage_client
.measurement_tag_values(request)
.await
.unwrap_err()
.to_string();
assert_contains!(response_string, "Sugar we are going down");
grpc_request_metric_has_count(&fixture, "MeasurementTagValues", "server_error", 1);
}
#[tokio::test]
async fn test_read_filter() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
let request = ReadFilterRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(0, 10000)),
predicate: Some(make_state_eq_ma_predicate()),
..Default::default()
};
let frames = fixture
.storage_client
.read_filter(request.clone())
.await
.unwrap();
// TODO: encode the actual output in the test case or something
assert_eq!(
frames.len(),
0,
"unexpected frames returned by query_series"
);
grpc_request_metric_has_count(&fixture, "ReadFilter", "ok", 1);
}
#[tokio::test]
async fn test_read_filter_empty_string() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
let request = ReadFilterRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(0, 10000)),
predicate: Some(make_tag_predicate("state", "", node::Comparison::Equal)),
..Default::default()
};
fixture
.storage_client
.read_filter(request.clone())
.await
.unwrap();
}
#[tokio::test]
async fn test_read_filter_field_as_tag() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_tag_column("state")
.with_string_field_column_with_stats("fff", None, None)
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// Create a tag predicate that happens to match the name
// of a field.
let request = ReadFilterRequest {
read_source: source.clone(),
range: None,
predicate: Some(make_tag_predicate("fff", "MA", node::Comparison::Equal)),
..Default::default()
};
let frames = fixture
.storage_client
.read_filter(request.clone())
.await
.unwrap();
// should return no data because `fff` is not a tag, it is a field.
assert_eq!(
frames.len(),
0,
"unexpected frames returned by query_series"
);
grpc_request_metric_has_count(&fixture, "ReadFilter", "ok", 1);
}
#[tokio::test]
async fn test_read_filter_error() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("my_table").with_error("Sugar we are going down");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test error
// ---
let request = ReadFilterRequest {
read_source: source.clone(),
range: None,
predicate: None,
..Default::default()
};
// Note we don't set the response on the test namespace, so we expect an error
let response = fixture.storage_client.read_filter(request).await;
assert_contains!(response.unwrap_err().to_string(), "Sugar we are going down");
grpc_request_metric_has_count(&fixture, "ReadFilter", "server_error", 1);
}
#[tokio::test]
async fn test_read_group() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_i64_field_column("my field")
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
let group = generated_types::read_group_request::Group::By as i32;
let request = ReadGroupRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(0, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
group_keys: vec!["state".into()],
group,
aggregate: Some(Aggregate {
r#type: aggregate::AggregateType::Sum as i32,
}),
};
let frames = fixture.storage_client.read_group(request).await.unwrap();
// three frames:
// GroupFrame
// SeriesFrame (tag=state, field=my field)
// DataFrame
assert_eq!(frames.len(), 3);
grpc_request_metric_has_count(&fixture, "ReadGroup", "ok", 1);
}
#[tokio::test]
async fn test_read_group_error() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("my_table").with_error("Sugar we are going down");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
let group = generated_types::read_group_request::Group::By as i32;
// ---
// test error returned in namespace processing
// ---
let request = ReadGroupRequest {
read_source: source.clone(),
range: None,
predicate: None,
group_keys: vec!["tag1".into()],
group,
aggregate: Some(Aggregate {
r#type: aggregate::AggregateType::Sum as i32,
}),
};
// Note we don't set the response on the test namespace, so we expect an error
let response_string = fixture
.storage_client
.read_group(request)
.await
.unwrap_err()
.to_string();
assert_contains!(response_string, "Sugar we are going down");
grpc_request_metric_has_count(&fixture, "ReadGroup", "server_error", 1);
}
#[tokio::test]
async fn test_read_window_aggregate_window_every() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// -----
// Test with window_every/offset setup
// -----
let request_window_every = ReadWindowAggregateRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(0, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
window_every: 1122,
offset: 15,
aggregate: vec![Aggregate {
r#type: aggregate::AggregateType::Sum as i32,
}],
// old skool window definition
window: None,
tag_key_meta_names: TagKeyMetaNames::Text as i32,
};
let frames = fixture
.storage_client
.read_window_aggregate(request_window_every)
.await
.unwrap();
assert_eq!(
frames.len(),
0,
"unexpected frames returned by query_groups"
);
grpc_request_metric_has_count(&fixture, "ReadWindowAggregate", "ok", 1);
}
#[tokio::test]
async fn test_read_window_aggregate_every_offset() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// -----
// Test with window.every and window.offset durations specified
// -----
let request_window = ReadWindowAggregateRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(150, 200)),
predicate: Some(make_state_eq_ma_predicate()),
window_every: 0,
offset: 0,
aggregate: vec![Aggregate {
r#type: aggregate::AggregateType::Sum as i32,
}],
// old skool window definition
window: Some(Window {
every: Some(Duration {
nsecs: 1122,
months: 0,
negative: false,
}),
offset: Some(Duration {
nsecs: 0,
months: 4,
negative: false,
}),
}),
tag_key_meta_names: TagKeyMetaNames::Text as i32,
};
let frames = fixture
.storage_client
.read_window_aggregate(request_window)
.await
.unwrap();
assert_eq!(
frames.len(),
0,
"unexpected frames returned by query_groups"
)
}
#[tokio::test]
async fn test_read_window_aggregate_error() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("my_table").with_error("Sugar we are going down");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test error
// ---
let request_window = ReadWindowAggregateRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(0, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
window_every: 1122,
offset: 15,
aggregate: vec![Aggregate {
r#type: aggregate::AggregateType::Sum as i32,
}],
// old skool window definition
window: None,
tag_key_meta_names: TagKeyMetaNames::Text as i32,
};
let response_string = fixture
.storage_client
.read_window_aggregate(request_window)
.await
.unwrap_err()
.to_string();
assert_contains!(response_string, "Sugar we are going down");
grpc_request_metric_has_count(&fixture, "ReadWindowAggregate", "server_error", 1);
}
#[tokio::test]
async fn test_measurement_fields() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_i64_field_column("Field1")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
let request = MeasurementFieldsRequest {
source: source.clone(),
measurement: "TheMeasurement".into(),
range: Some(make_timestamp_range(0, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
};
let actual_fields = fixture
.storage_client
.measurement_fields(request)
.await
.unwrap();
let expected_fields: Vec<String> = vec!["key: Field1, type: 1, timestamp: 1000".into()];
assert_eq!(
actual_fields, expected_fields,
"unexpected frames returned by measurement_fields"
);
grpc_request_metric_has_count(&fixture, "MeasurementFields", "ok", 1);
}
#[tokio::test]
async fn test_measurement_fields_error() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let mut fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("TheMeasurement").with_error("Sugar we are going down");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let source = Some(StorageClient::read_source(&db_info, 1));
// ---
// test error
// ---
let request = MeasurementFieldsRequest {
source: source.clone(),
measurement: "TheMeasurement".into(),
range: Some(make_timestamp_range(i64::MIN, i64::MAX - 2)),
predicate: None,
};
let response_string = fixture
.storage_client
.measurement_fields(request)
.await
.unwrap_err()
.to_string();
assert_contains!(response_string, "Sugar we are going down");
}
#[derive(Debug, Clone)]
enum SemaphoredRequest {
MeasurementFields,
MeasurementNames,
MeasurementTagKeys,
MeasurementTagValues,
ReadFilter,
ReadGroup,
ReadWindowAggregate,
TagKeys,
TagValues,
TagValuesGroupedByMeasurementAndTagKey,
}
impl SemaphoredRequest {
async fn request(&self, service: &StorageService<TestDatabaseStore>) -> Box<dyn Any> {
let db_info = org_and_bucket();
let source = Some(StorageClient::read_source(&db_info, 1));
match self {
Self::MeasurementFields => {
let request = MeasurementFieldsRequest {
source: source.clone(),
measurement: "TheMeasurement".into(),
range: Some(make_timestamp_range(0, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
};
let streaming_resp = service
.measurement_fields(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
Self::MeasurementNames => {
let request = MeasurementNamesRequest {
source: source.clone(),
range: None,
predicate: None,
};
let streaming_resp = service
.measurement_names(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
Self::MeasurementTagKeys => {
let request = MeasurementTagKeysRequest {
measurement: "TheMeasurement".into(),
source: source.clone(),
range: Some(make_timestamp_range(0, 200)),
predicate: Some(make_state_eq_ma_predicate()),
};
let streaming_resp = service
.measurement_tag_keys(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
Self::MeasurementTagValues => {
let request = MeasurementTagValuesRequest {
measurement: "TheMeasurement".into(),
source: source.clone(),
range: Some(make_timestamp_range(150, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
tag_key: "state".into(),
};
let streaming_resp = service
.measurement_tag_values(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
Self::ReadFilter => {
let request = ReadFilterRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(0, 10000)),
..Default::default()
};
let streaming_resp = service
.read_filter(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
Self::ReadGroup => {
let group = generated_types::read_group_request::Group::By as i32;
let request = ReadGroupRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(0, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
group_keys: vec!["state".into()],
group,
aggregate: Some(Aggregate {
r#type: aggregate::AggregateType::Sum as i32,
}),
};
let streaming_resp = service
.read_group(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
Self::ReadWindowAggregate => {
let request = ReadWindowAggregateRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(150, 200)),
predicate: Some(make_state_eq_ma_predicate()),
window_every: 0,
offset: 0,
aggregate: vec![Aggregate {
r#type: aggregate::AggregateType::Sum as i32,
}],
// old skool window definition
window: Some(Window {
every: Some(Duration {
nsecs: 1122,
months: 0,
negative: false,
}),
offset: Some(Duration {
nsecs: 0,
months: 4,
negative: false,
}),
}),
tag_key_meta_names: TagKeyMetaNames::Text as i32,
};
let streaming_resp = service
.read_window_aggregate(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
Self::TagKeys => {
let request = TagKeysRequest {
tags_source: source.clone(),
range: Some(make_timestamp_range(0, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
};
let streaming_resp = service
.tag_keys(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
Self::TagValues => {
let request = TagValuesRequest {
tags_source: source.clone(),
range: Some(make_timestamp_range(0, 2000)),
predicate: Some(make_state_eq_ma_predicate()),
tag_key: [255].into(),
};
let streaming_resp = service
.tag_values(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
Self::TagValuesGroupedByMeasurementAndTagKey => {
let request = TagValuesGroupedByMeasurementAndTagKeyRequest {
source: source.clone(),
measurement_patterns: vec![],
tag_key_predicate: Some(TagKeyPredicate {
value: Some(Value::Eq("state".into())),
}),
condition: None,
};
let streaming_resp = service
.tag_values_grouped_by_measurement_and_tag_key(tonic::Request::new(request))
.await
.unwrap();
Box::new(streaming_resp) as _
}
}
}
fn all() -> Vec<Self> {
vec![
Self::MeasurementFields,
Self::MeasurementNames,
Self::MeasurementTagKeys,
Self::MeasurementTagValues,
Self::ReadFilter,
Self::ReadGroup,
Self::ReadWindowAggregate,
Self::TagKeys,
Self::TagValues,
Self::TagValuesGroupedByMeasurementAndTagKey,
]
}
}
#[tokio::test]
async fn test_query_semaphore() {
maybe_start_logging();
let semaphore_size = 2;
let test_storage = Arc::new(TestDatabaseStore::new_with_semaphore_size(semaphore_size));
// add some data
let db_info = org_and_bucket();
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
// construct request
for t in SemaphoredRequest::all() {
println!("Testing with request: {t:?}");
let service = StorageService {
db_store: Arc::clone(&test_storage),
};
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_total",
2,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_pending",
0,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_acquired",
0,
);
let streaming_resp1 = t.request(&service).await;
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_total",
2,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_pending",
0,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_acquired",
1,
);
let streaming_resp2 = t.request(&service).await;
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_total",
2,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_pending",
0,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_acquired",
2,
);
// 3rd request is pending
let fut = t.request(&service);
pin!(fut);
assert_fut_pending(&mut fut).await;
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_total",
2,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_pending",
1,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_acquired",
2,
);
// free permit
drop(streaming_resp1);
let streaming_resp3 = fut.await;
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_total",
2,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_pending",
0,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_acquired",
2,
);
drop(streaming_resp2);
drop(streaming_resp3);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_total",
2,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_pending",
0,
);
assert_semaphore_metric(
&test_storage.metric_registry,
"iox_async_semaphore_permits_acquired",
0,
);
}
}
#[tokio::test]
// ensure that the expected IOx header is included with successes
async fn test_headers() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
// use the raw gRPR client to examine the headers
let mut storage_client = storage_client::StorageClient::new(
fixture.client_connection.clone().into_grpc_connection(),
);
let source = Some(StorageClient::read_source(&db_info, 1));
let request = ReadFilterRequest {
read_source: source.clone(),
range: None,
predicate: None,
..Default::default()
};
let response = storage_client.read_filter(request).await.unwrap();
println!("Result is {response:?}");
assert_eq!(response.metadata().get("storage-type").unwrap(), "iox");
}
#[tokio::test]
// ensure that the expected IOx header is included with errors
async fn test_headers_error() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
let chunk = TestChunk::new("my_table").with_error("Sugar we are going down");
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
// use the raw gRPR client to examine the headers
let mut storage_client = storage_client::StorageClient::new(
fixture.client_connection.clone().into_grpc_connection(),
);
let source = Some(StorageClient::read_source(&db_info, 1));
let request = ReadFilterRequest {
read_source: source.clone(),
range: None,
predicate: None,
..Default::default()
};
let response = storage_client.read_filter(request).await.unwrap_err();
println!("Result is {response:?}");
assert_eq!(response.metadata().get("storage-type").unwrap(), "iox");
}
#[tokio::test]
async fn test_marshal_errors() {
test_helpers::maybe_start_logging();
// Start a test gRPC server on a randomally allocated port
let fixture = Fixture::new().await.expect("Connecting to test server");
let db_info = org_and_bucket();
// Add a chunk with a field
let chunk = TestChunk::new("TheMeasurement")
.with_time_column()
.with_string_field_column_with_stats("str", None, None)
.with_tag_column("state")
.with_one_row_of_data();
fixture
.test_storage
.db_or_create(db_info.db_name())
.await
.add_chunk("my_partition_key", Arc::new(chunk));
let mut storage_client = storage_client::StorageClient::new(
fixture.client_connection.clone().into_grpc_connection(),
);
let source = Some(StorageClient::read_source(&db_info, 1));
let request = ReadWindowAggregateRequest {
read_source: source.clone(),
range: Some(make_timestamp_range(1000, 2000)),
predicate: None,
window_every: 0,
offset: 0,
aggregate: vec![Aggregate {
r#type: aggregate::AggregateType::Mean as i32,
}],
window: Some(Window {
every: Some(Duration {
nsecs: 1122,
months: 0,
negative: false,
}),
offset: Some(Duration {
nsecs: 0,
months: 4,
negative: false,
}),
}),
tag_key_meta_names: TagKeyMetaNames::Text as i32,
};
let tonic_status = storage_client
.read_window_aggregate(request)
.await
.unwrap_err();
assert!(tonic_status
.message()
.contains("Avg does not support inputs of type Utf8"));
assert_eq!(tonic::Code::InvalidArgument, tonic_status.code());
let mut rpc_status = GrpcStatus::decode(tonic_status.details()).unwrap();
assert!(rpc_status
.message
.contains("Avg does not support inputs of type Utf8"));
assert_eq!(tonic::Code::InvalidArgument as i32, rpc_status.code);
assert_eq!(1, rpc_status.details.len());
let detail = rpc_status.details.pop().unwrap();
let influx_err = InfluxDbError::decode(detail.value).unwrap();
assert_eq!("invalid", influx_err.code);
assert!(influx_err
.message
.contains("Avg does not support inputs of type Utf8"));
assert_eq!("iox/influxrpc", influx_err.op);
assert_eq!(None, influx_err.error);
}
fn make_timestamp_range(start: i64, end: i64) -> TimestampRange {
TimestampRange { start, end }
}
/// return a gRPC predicate like
///
/// state="MA"
fn make_state_eq_ma_predicate() -> generated_types::Predicate {
make_state_predicate(node::Comparison::Equal)
}
/// return a gRPC predicate like
///
/// state != "MA"
fn make_state_neq_ma_predicate() -> generated_types::Predicate {
make_state_predicate(node::Comparison::NotEqual)
}
/// return a gRPC predicate like
///
/// state >= "MA"
fn make_state_geq_ma_predicate() -> generated_types::Predicate {
make_state_predicate(node::Comparison::Gte)
}
fn make_state_predicate(op: node::Comparison) -> generated_types::Predicate {
make_tag_predicate("state", "MA", op)
}
/// TagRef(tag_name) op tag_value
fn make_tag_predicate(
tag_name: impl Into<String>,
tag_value: impl Into<String>,
op: node::Comparison,
) -> generated_types::Predicate {
use node::{Type, Value};
let root = Node {
node_type: Type::ComparisonExpression as i32,
value: Some(Value::Comparison(op as i32)),
children: vec![
Node {
node_type: Type::TagRef as i32,
value: Some(Value::TagRefValue(tag_name.into().into_bytes())),
children: vec![],
},
Node {
node_type: Type::Literal as i32,
value: Some(Value::StringValue(tag_value.into())),
children: vec![],
},
],
};
generated_types::Predicate { root: Some(root) }
}
/// Convert to a Vec<String> to facilitate comparison with results of client
fn to_string_vec(v: &[&str]) -> Vec<String> {
v.iter().map(|s| s.to_string()).collect()
}
/// Assert that given future is pending.
///
/// This will try to poll the future a bit to ensure that it is not stuck in tokios task preemption.
async fn assert_fut_pending<F>(fut: &mut F)
where
F: Future + Send + Unpin,
{
tokio::select! {
_ = fut => panic!("future is not pending, yielded"),
_ = tokio::time::sleep(std::time::Duration::from_millis(10)) => {},
};
}
fn assert_semaphore_metric(registry: &metric::Registry, name: &'static str, expected: u64) {
let actual = registry
.get_instrument::<Metric<U64Gauge>>(name)
.expect("failed to read metric")
.get_observer(&Attributes::from(&[("semaphore", "query_execution")]))
.expect("failed to get observer")
.fetch();
assert_eq!(actual, expected);
}
}
|
use proptest::prop_assert_eq;
use proptest::strategy::{Just, Strategy};
use liblumen_alloc::erts::term::prelude::*;
use crate::erlang::size_1::result;
use crate::test::strategy;
#[test]
fn without_tuple_or_bitstring_errors_badarg() {
run!(
|arc_process| {
(
Just(arc_process.clone()),
strategy::term(arc_process.clone())
.prop_filter("Term must not be a tuple or bitstring", |term| {
!(term.is_boxed_tuple() || term.is_bitstring())
}),
)
},
|(arc_process, binary_or_tuple)| {
prop_assert_badarg!(
result(&arc_process, binary_or_tuple),
format!(
"binary_or_tuple ({}) is neither a binary nor a tuple",
binary_or_tuple
)
);
Ok(())
},
);
}
#[test]
fn with_tuple_returns_arity() {
run!(
|arc_process| {
(Just(arc_process.clone()), 0_usize..=3_usize).prop_flat_map(|(arc_process, size)| {
(
Just(arc_process.clone()),
Just(size),
strategy::term::tuple::intermediate(
strategy::term(arc_process.clone()),
(size..=size).into(),
arc_process.clone(),
),
)
})
},
|(arc_process, size, term)| {
prop_assert_eq!(result(&arc_process, term), Ok(arc_process.integer(size)));
Ok(())
},
);
}
#[test]
fn with_bitstring_is_byte_len() {
run!(
|arc_process| {
(
Just(arc_process.clone()),
strategy::term::is_bitstring(arc_process.clone()),
)
},
|(arc_process, term)| {
let full_byte_len = match term.decode().unwrap() {
TypedTerm::HeapBinary(heap_binary) => heap_binary.full_byte_len(),
TypedTerm::SubBinary(subbinary) => subbinary.full_byte_len(),
_ => unreachable!(),
};
prop_assert_eq!(
result(&arc_process, term),
Ok(arc_process.integer(full_byte_len))
);
Ok(())
},
);
}
|
#![allow(unused)]
include!(concat!(env!("OUT_DIR"), "/glue.rs"));
|
use std::{path::PathBuf, str::FromStr};
use structopt::StructOpt;
pub trait TypeExtension {
fn extension(&self) -> &'static str;
}
#[derive(Debug, Copy, Clone)]
pub enum CompressionType {
Gzip,
Bzip,
Detect,
None,
}
impl TypeExtension for CompressionType {
fn extension(&self) -> &'static str {
match self {
Self::None => "",
Self::Bzip => ".bz",
Self::Gzip => ".gz",
Self::Detect => {
panic!("No reasonable extension for CompressionType::Detect");
}
}
}
}
#[derive(StructOpt, Debug)]
pub struct Config {
#[structopt(
short = "g",
long = "group-col",
help = "The zero based column with values that must remain together. \
To work properly the file must already be sorted by this column"
)]
pub group_column: Option<usize>,
#[structopt(
short = "n",
long = "num-rows",
help = "The maximum number of rows to put in each file. If we're \
grouping rows it can be more than this."
)]
pub max_rows: usize,
#[structopt(long = "stdin", help = "Directs csv-split to read from standard input")]
pub stdin: bool,
#[structopt(
short = "i",
long = "input-compression",
help = "Treat input data as 'g'zipped, 'b'zipped, or 'd'etect",
default_value = ""
)]
pub input_compression: CompressionType,
#[structopt(
short = "z",
long = "output-compression",
help = "If present, each file will be compressed when written",
default_value = ""
)]
pub output_compression: CompressionType,
#[structopt(
short = "t",
long = "trigger",
help = "A command to execute each time a csv file is written."
)]
pub trigger: Option<String>,
#[structopt(
short = "d",
long = "header",
help = "Treat the first row as a header which will be injected into
each split file."
)]
pub headers: bool,
#[structopt(
short = "q",
long = "queue-depth",
help = "io_uring queue depth / buffer count",
default_value = "32"
)]
pub queue_depth: usize,
#[structopt(
short = "b",
long = "buffer-size",
help = "Buffer size",
default_value = "32768"
)]
pub buffer_size: usize,
#[structopt(
short = "a",
long = "suffix-length",
help = "Generate suffixes of length N",
default_value = "2"
)]
pub suffix_len: usize,
#[structopt(help = "Our input filename to process")]
pub file: PathBuf,
#[structopt(
help = "Output directory (defaults to current directory)",
default_value = "."
)]
pub out_path: PathBuf,
}
impl FromStr for CompressionType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match &*s.to_lowercase() {
"" => Ok(Self::None),
"g" | "gzip" => Ok(Self::Gzip),
"b" | "bzip" => Ok(Self::Bzip),
"d" | "detect" => Ok(Self::Detect),
_ => Err(format!(
"Unknown compression format '{}': 'gzip', 'bzip', 'detect', are supported",
s
)),
}
}
}
|
use crate::interface::{
RedeemStakeBatch, StakeBatch, TimestampedNearBalance, TimestampedStakeBalance, YoctoNear,
};
use near_sdk::serde::{Deserialize, Serialize};
/// View model for a registered account with the contract
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(crate = "near_sdk::serde")]
pub struct StakeAccount {
/// account storage usage payment that is escrowed
/// - the balance will be refunded when the account unregisters
/// - timestamp also shows when the account registered
pub storage_escrow: TimestampedNearBalance,
/// NEAR balance that is available for withdrawal from the contract
pub near: Option<TimestampedNearBalance>,
/// account STAKE token balance
pub stake: Option<TimestampedStakeBalance>,
/// NEAR funds that have been deposited to be staked when the batch is run
pub stake_batch: Option<StakeBatch>,
/// While batches are running, the contract is locked. The account can still deposit NEAR funds
/// to stake into the next batch while the contract is locked.
pub next_stake_batch: Option<StakeBatch>,
/// STAKE tokens that have been set aside to be redeemed in the next batch
pub redeem_stake_batch: Option<RedeemStakeBatch>,
/// While batches are running, the contract is locked. The account can still set submit requests
/// to redeem STAKE tokens into the next batch while the contract is locked.
pub next_redeem_stake_batch: Option<RedeemStakeBatch>,
/// only applies if the account has a [RedeemStakeBatch](crate::domain::RedeemStakeBatch) with a
/// [RedeemStakeBatchReceipt](crate::domain::RedeemStakeBatchReceipt) that is pending withdrawal
/// from the staking pool. If the contract has liquidity, then this returns the current liquidity
/// that is available to withdraw against the redeemed STAKE. The account is not guaranteed the
/// funds because other accounts might have withdrawn them first.
///
/// returns None if there is currently no NEAR liquidity to withdraw against
pub contract_near_liquidity: Option<YoctoNear>,
}
|
// Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_exception::Result;
use common_expression::BlockEntry;
use common_expression::DataBlock;
use common_expression::DataSchemaRef;
use common_expression::Evaluator;
use common_expression::Expr;
use common_expression::FunctionContext;
use common_functions::BUILTIN_FUNCTIONS;
use crate::pipelines::processors::port::InputPort;
use crate::pipelines::processors::port::OutputPort;
use crate::pipelines::processors::processor::ProcessorPtr;
use crate::pipelines::processors::transforms::transform::Transform;
use crate::pipelines::processors::transforms::transform::Transformer;
/// TransformRuntimeCastSchema is used to cast block to the specified schema.
/// Different from `TransformCastSchema`, it is used at the runtime
pub struct TransformRuntimeCastSchema {
func_ctx: FunctionContext,
insert_schema: DataSchemaRef,
}
impl TransformRuntimeCastSchema
where Self: Transform
{
pub fn try_create(
input_port: Arc<InputPort>,
output_port: Arc<OutputPort>,
insert_schema: DataSchemaRef,
func_ctx: FunctionContext,
) -> Result<ProcessorPtr> {
Ok(ProcessorPtr::create(Transformer::create(
input_port,
output_port,
Self {
func_ctx,
insert_schema,
},
)))
}
}
impl Transform for TransformRuntimeCastSchema {
const NAME: &'static str = "CastSchemaTransform";
fn transform(&mut self, data_block: DataBlock) -> Result<DataBlock> {
let exprs: Vec<Expr> = data_block
.columns()
.iter()
.zip(self.insert_schema.fields().iter().enumerate())
.map(|(from, (index, to))| {
let expr = Expr::ColumnRef {
span: None,
id: index,
data_type: from.data_type.clone(),
display_name: to.name().clone(),
};
if &from.data_type != to.data_type() {
Expr::Cast {
span: None,
is_try: false,
expr: Box::new(expr),
dest_type: to.data_type().clone(),
}
} else {
expr
}
})
.collect();
let mut columns = Vec::with_capacity(exprs.len());
let evaluator = Evaluator::new(&data_block, self.func_ctx, &BUILTIN_FUNCTIONS);
for (field, expr) in self.insert_schema.fields().iter().zip(exprs.iter()) {
let value = evaluator.run(expr)?;
let column = BlockEntry {
data_type: field.data_type().clone(),
value,
};
columns.push(column);
}
Ok(DataBlock::new(columns, data_block.num_rows()))
}
}
|
#![feature(path_ext)]
#![feature(fs_walk)]
#![feature(convert)]
extern crate rustc_serialize;
use std::path::Path;
use std::fs::PathExt;
use std::env;
use std::fs;
use std::process;
use std::fs::File;
use std::io::Write;
use std::io::Read;
use rustc_serialize::json;
#[derive(RustcDecodable, RustcEncodable)]
pub struct Config {
config: Vec<String>,
}
const CONFIG : &'static str = ".ffbc-config.json";
fn main() {
let args: Vec<String> = env::args().collect();
first_run();
handle_arguments(args);
}
fn first_run() {
let config_file = Path::new("./").join(CONFIG);
if config_file.exists() == false {
let default_config = Config {
config: vec!("".to_string()),
};
save_config(default_config);
}
}
fn handle_arguments(arguments: Vec<String>) {
match arguments[1].as_ref() {
"-c" | "--config" => { change_config(&arguments); return;},
"-h" | "--help" => help(),
_ => (),
}
let mut args: Vec<&String> = Vec::new();
let real_arguments = &arguments[1..arguments.len()];
for arg in real_arguments {
args.push(arg);
}
let mut dirs_and_exts: Vec<&String> = Vec::new();
for x in 0..args.len() {
if x % 2 == 0 {
dirs_and_exts.push(args[x]);
} else if x % 3 == 0 {
dirs_and_exts.push(args[x]);
} else {
dirs_and_exts.push(args[x]);
}
}
if dirs_and_exts.len() != 0 {
handle_directories(dirs_and_exts);
} else {
println!("You didn't supply enough arguments.
Did you forget to specify the old extension and the new extension for the files?");
return;
}
}
fn convert(file: String, ext_from: &String, ext_to: &String, options: &Config) {
let file_str = file.as_str();
let file_path = Path::new(file_str);
let fuckyou = Path::with_extension(file_path, ext_to);
let new_file = fuckyou.as_path().to_str().unwrap();
if file_path.extension().unwrap().to_str().unwrap() == ext_from {
let mut ffmpeg = process::Command::new("ffmpeg");
ffmpeg.args(&["-i", file.as_ref()]);
//ffmpeg.args(&["-b:a", "128k"]);
for x in &options.config {
ffmpeg.arg(x);
}
ffmpeg.arg(new_file);
match ffmpeg.output() {
Ok(r) => {
if String::from_utf8_lossy(r.stdout.as_ref()) == "" {
//println!("Failed to convert: {}", file);
//println!("stderr: {}", String::from_utf8_lossy(r.stderr.as_ref()));
println!("Converted: {}", file);
} else {
println!("{}", String::from_utf8_lossy(r.stdout.as_ref()));
//println!("{}\n{}", f, String::from_utf8_lossy(r.stdout.as_ref()));
}
},
Err(e) => panic!("Failed to convert, here's why: {}", e),
}
}
}
fn handle_directories(dir: Vec<&String>) {
// DIRECTORY
// Handles what files in the directory should be uploaded.
let options = load_config();
for d in 0..dir.len() {
if d % 2 == 0 {
let path = Path::new(dir[d]);
let mut fls: Vec<_> = vec!();
if path.is_dir() == true {
match fs::walk_dir(&path) {
Err(why) => println!("! {:?}", why.kind()),
Ok(paths) => for path in paths {
fls.push(path.unwrap().path());
},
}
for x in fls {
// dir[d + 1] is the extension you want to convert FROM
// dir[d + 2] is the extensions you want to convert TO
//let filename = x.as_path().file_name().unwrap().to_str().unwrap().to_string();
let file = x.to_str().unwrap().to_string();
convert(file, dir[d + 1], dir[d + 2], &options);
}
}
}
}
}
/*
fn convert_temp(file: String, ext_from: &String, ext_to: &String, options: &Config) {
println!("{} : {} : {}", file, ext_from, ext_to);
//let option = options.clone();
for x in &options.config {
println!("{}", x);
}
}
* */
fn change_config(args: &Vec<String>) {
// Changes config options
// TODO: Make it take several values at once.
// Example: ./uguupload -c options1 true option2 false option3 false
let config_arguments_collection = &args[2..args.len()];
let mut config_arguments: Vec<String> = Vec::new();
for x in config_arguments_collection {
config_arguments.push(x.clone());
}
let mut current_config: Config = load_config();
current_config.config = config_arguments;
save_config(current_config);
}
fn load_config() -> Config {
// Reads the config files, decodes it and then returns a Config struct
let config_file = Path::new("./").join(CONFIG);
let mut boop = File::open(&config_file).unwrap();
let mut output_from_config = "".to_string();
let _ = match File::read_to_string(&mut boop, &mut output_from_config) {
Ok(o) => o,
Err(e) => panic!("HELP: {}", e),
};
let current_config: Config = json::decode(&output_from_config).unwrap();
return current_config;
}
fn save_config(new_config: Config) {
// Takes a Config as input and encodes the input to JSON and then writes it to the config file
let config_file = Path::new("./").join(CONFIG);
let conf_file = File::create(&config_file);
let _ = conf_file.unwrap().write_all(json::encode(&new_config).unwrap().as_bytes());
}
fn help() {
println!("Usage: ./ffbc [DIRECTORY] [OLD_EXT] [NEW_EXT]
[OLD_EXT] and [NEW_EXT] are file extensions.
USAGE
./ffbc some_directory flac mp3
The first argument that gets passed is the target directory (some_directory).
The second argument that gets passed is the current file extension you want to convert FROM (flac).
The third argument that gets passed is the new file extension you want to convert TO (mp3).
This will convert everything in that directory (recursively, so directories in the first directory will also be targeted).
It targets flac files and converts them to mp3 using ffmpeg.
You can also use the -c argument to pass some options to ffmpeg, like -b:a 320k to set the audio quality to 320kbps.
./ffbc -c -b:a 320k
./ffbc directory wav ogg
The file formats you can convert from and to is limited by ffmpeg.
The original files will NOT be replaced.
Technically, you're supposed to be able to be able to supply the program with several directories and it will work.
I haven't tried this though. ./ffbc dir1 wav flac dir2 wav flac dir3 mkv mp4
OPTIONS
-h, --help
Prints out this help message.
-c, --config
Changes the ffmpeg command-line arguments,
See example 1,
EXAMPLES
Example 1
./ffbc -c b:a 320k
This will set the audio quality to 320kbps.
After you've set that option, do this: ./ffbc some_directory flac mp3");
return;
}
|
mod backend;
mod file_handle;
pub use file_handle::FileHandle;
mod dialog;
#[cfg(not(target_arch = "wasm32"))]
pub use dialog::FileDialog;
pub use dialog::AsyncFileDialog;
pub use dialog::{AsyncMessageDialog, MessageButtons, MessageDialog, MessageLevel};
|
//
// atom2.rs
// Copyright (C) 2019 Malcolm Ramsay <malramsay64@gmail.com>
// Distributed under terms of the MIT license.
//
use std::fmt;
use nalgebra::Point2;
use serde::{Deserialize, Serialize};
use crate::traits::Intersect;
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct Atom2 {
pub position: Point2<f64>,
pub radius: f64,
}
impl Intersect for Atom2 {
fn intersects(&self, other: &Self) -> bool {
let r_squared = (self.radius + other.radius).powi(2);
// We have an intersection when the distance between the particles is less than the
// combined radius of the two particles.
(self.position - other.position).norm_squared() < r_squared
}
fn area(&self) -> f64 {
std::f64::consts::PI * self.radius.powi(2)
}
}
impl fmt::Display for Atom2 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Atom2 {{ {}, {}, {} }}",
self.position.x, self.position.y, self.radius
)
}
}
impl Atom2 {
pub fn new(x: f64, y: f64, radius: f64) -> Self {
Atom2 {
position: Point2::new(x, y),
radius,
}
}
}
#[cfg(test)]
mod test {
use approx::assert_abs_diff_eq;
use super::*;
#[test]
fn init_test() {
let a = Atom2::new(0., 0., 1.);
assert_abs_diff_eq!(a.position.x, 0.);
assert_abs_diff_eq!(a.position.y, 0.);
assert_abs_diff_eq!(a.radius, 1.);
}
#[test]
fn distance_squared_test() {
let a0 = Atom2::new(0., 0., 1.);
let a1 = Atom2::new(0.5, 0., 1.);
assert_abs_diff_eq!((a0.position - a1.position).norm_squared(), 0.25);
assert!(a0.intersects(&a1));
}
#[test]
fn intersection_test() {
let a0 = Atom2::new(0., 0., 1.);
let a1 = Atom2::new(1., 0., 1.);
let a2 = Atom2::new(0.5, 0.5, 1.);
let a3 = Atom2::new(1.5, 1.5, 1.);
assert!(a0.intersects(&a1));
assert!(a1.intersects(&a2));
assert!(a3.intersects(&a2));
assert!(!a0.intersects(&a3));
}
#[test]
fn intersection_calculation_test() {
let a0 = Atom2::new(0., 0., f64::sqrt(2.) / 2.);
let a1 = Atom2::new(1., 1., f64::sqrt(2.) / 2.);
let a2 = Atom2::new(1., 1., f64::sqrt(2.) / 2. - 2. * std::f64::EPSILON);
println!("Radii: {}", a0.radius * a0.radius + a1.radius * a1.radius);
assert!(a0.intersects(&a1));
assert!(a1.intersects(&a2));
assert!(!a0.intersects(&a2));
}
}
|
use crate::prelude::*;
use crate::api;
use crate::config;
use crate::repo;
use crate::models;
use chrono::prelude::*;
use actix_web::{get, web, App, HttpServer, HttpResponse, Responder};
#[derive(Clone)]
pub struct Node {
conf: config::Settings,
repo: repo::Repo,
ip: String,
}
const METADATA_URL: &str = "http://169.254.169.254/latest/meta-data/local-ipv4";
impl Node {
pub async fn new(conf: config::Settings) -> Result<Self> {
let repo = repo::Repo::new(&conf).await?;
let ip = Self::local_ip().await;
Ok(Node{ conf, repo, ip })
}
pub async fn run(&self) -> Result<()> {
let clone = self.clone();
actix_rt::spawn(async move {
clone.run_heartbeats().await
});
self.run_service().await
}
async fn local_ip() -> String {
match Self::discover_ip().await {
Ok(ip) => {
log::info!("Discovered local ip: {}", ip);
ip
},
Err(error) => {
let fake = fakedata_generator::gen_ipv4();
log::warn!("Failed to discover local ip: {}, going to use fake ip {}", error, fake);
fake
}
}
}
async fn discover_ip() -> Result<String> {
log::debug!("Going to send ip discovery request");
let client = reqwest::ClientBuilder::new()
.timeout(std::time::Duration::from_millis(1000))
.build()?;
let ip = client.get(METADATA_URL)
.send()
.await?
.text()
.await?;
log::debug!("Successfully discovered ip {}", ip);
Ok(ip)
}
async fn run_heartbeats(&self) -> ! {
loop {
tokio::time::delay_for(std::time::Duration::from_millis(1000)).await;
self.send_heartbeat().await;
}
}
async fn send_heartbeat(&self) {
match self.repo.update_status(models::NodeStatus::new(self.ip.clone())).await {
Ok(()) => log::info!("Successfully sent heartbeat"),
Err(err) => log::warn!("Failed to send heartbeat: {}", err),
};
}
async fn run_service(&self) -> Result<()> {
log::info!("Starting node service at {}", &self.conf.bind_address);
let clone = self.clone();
HttpServer::new(move || App::new().data(clone.clone()).service(healthcheck))
.bind(self.conf.bind_address)?
.run()
.await?;
Ok(())
}
async fn healthcheck(&self) -> impl Responder {
match self.process_healthcheck().await {
Ok(status) => HttpResponse::Ok().json(status),
Err(e) => {
log::error!("Healthcheck failed: {:?}", e);
HttpResponse::ServiceUnavailable().json(api::Error{ error: e.to_string() })
},
}
}
async fn process_healthcheck(&self) -> Result<api::Status> {
let first_timepoint = Utc::now() - chrono::Duration::seconds(10);
let res = self
.repo
.list_statuses()
.await?
.into_iter()
.filter(|status| status.timestamp > first_timepoint)
.map(|status| api::Service{
ip: status.ip,
status: status.status,
})
.collect::<Vec<_>>();
Ok(api::Status{
ip: self.ip.clone(),
services: res,
})
}
}
#[get("/healthcheck")]
async fn healthcheck(node: web::Data<Node>) -> impl Responder {
node.healthcheck().await
}
|
use std::cell::{Cell, RefCell};
use std::cmp;
use std::convert::TryFrom;
use std::fs;
use std::io::prelude::*;
use std::io::{self, SeekFrom};
use std::marker;
use std::path::Path;
use crate::entry::{EntryFields, EntryIo};
use crate::error::TarError;
use crate::other;
use crate::pax::*;
use crate::{Entry, GnuExtSparseHeader, GnuSparseHeader, Header};
/// A top-level representation of an archive file.
///
/// This archive can have an entry added to it and it can be iterated over.
pub struct Archive<R: ?Sized + Read> {
inner: ArchiveInner<R>,
}
pub struct ArchiveInner<R: ?Sized> {
pos: Cell<u64>,
mask: u32,
unpack_xattrs: bool,
preserve_permissions: bool,
preserve_ownerships: bool,
preserve_mtime: bool,
overwrite: bool,
ignore_zeros: bool,
obj: RefCell<R>,
}
/// An iterator over the entries of an archive.
pub struct Entries<'a, R: 'a + Read> {
fields: EntriesFields<'a>,
_ignored: marker::PhantomData<&'a Archive<R>>,
}
trait SeekRead: Read + Seek {}
impl<R: Read + Seek> SeekRead for R {}
struct EntriesFields<'a> {
archive: &'a Archive<dyn Read + 'a>,
seekable_archive: Option<&'a Archive<dyn SeekRead + 'a>>,
next: u64,
done: bool,
raw: bool,
}
impl<R: Read> Archive<R> {
/// Create a new archive with the underlying object as the reader.
pub fn new(obj: R) -> Archive<R> {
Archive {
inner: ArchiveInner {
mask: u32::MIN,
unpack_xattrs: false,
preserve_permissions: false,
preserve_ownerships: false,
preserve_mtime: true,
overwrite: true,
ignore_zeros: false,
obj: RefCell::new(obj),
pos: Cell::new(0),
},
}
}
/// Unwrap this archive, returning the underlying object.
pub fn into_inner(self) -> R {
self.inner.obj.into_inner()
}
/// Construct an iterator over the entries in this archive.
///
/// Note that care must be taken to consider each entry within an archive in
/// sequence. If entries are processed out of sequence (from what the
/// iterator returns), then the contents read for each entry may be
/// corrupted.
pub fn entries(&mut self) -> io::Result<Entries<R>> {
let me: &mut Archive<dyn Read> = self;
me._entries(None).map(|fields| Entries {
fields: fields,
_ignored: marker::PhantomData,
})
}
/// Unpacks the contents tarball into the specified `dst`.
///
/// This function will iterate over the entire contents of this tarball,
/// extracting each file in turn to the location specified by the entry's
/// path name.
///
/// This operation is relatively sensitive in that it will not write files
/// outside of the path specified by `dst`. Files in the archive which have
/// a '..' in their path are skipped during the unpacking process.
///
/// # Examples
///
/// ```no_run
/// use std::fs::File;
/// use tar::Archive;
///
/// let mut ar = Archive::new(File::open("foo.tar").unwrap());
/// ar.unpack("foo").unwrap();
/// ```
pub fn unpack<P: AsRef<Path>>(&mut self, dst: P) -> io::Result<()> {
let me: &mut Archive<dyn Read> = self;
me._unpack(dst.as_ref())
}
/// Set the mask of the permission bits when unpacking this entry.
///
/// The mask will be inverted when applying against a mode, similar to how
/// `umask` works on Unix. In logical notation it looks like:
///
/// ```text
/// new_mode = old_mode & (~mask)
/// ```
///
/// The mask is 0 by default and is currently only implemented on Unix.
pub fn set_mask(&mut self, mask: u32) {
self.inner.mask = mask;
}
/// Indicate whether extended file attributes (xattrs on Unix) are preserved
/// when unpacking this archive.
///
/// This flag is disabled by default and is currently only implemented on
/// Unix using xattr support. This may eventually be implemented for
/// Windows, however, if other archive implementations are found which do
/// this as well.
pub fn set_unpack_xattrs(&mut self, unpack_xattrs: bool) {
self.inner.unpack_xattrs = unpack_xattrs;
}
/// Indicate whether extended permissions (like suid on Unix) are preserved
/// when unpacking this entry.
///
/// This flag is disabled by default and is currently only implemented on
/// Unix.
pub fn set_preserve_permissions(&mut self, preserve: bool) {
self.inner.preserve_permissions = preserve;
}
/// Indicate whether numeric ownership ids (like uid and gid on Unix)
/// are preserved when unpacking this entry.
///
/// This flag is disabled by default and is currently only implemented on
/// Unix.
pub fn set_preserve_ownerships(&mut self, preserve: bool) {
self.inner.preserve_ownerships = preserve;
}
/// Indicate whether files and symlinks should be overwritten on extraction.
pub fn set_overwrite(&mut self, overwrite: bool) {
self.inner.overwrite = overwrite;
}
/// Indicate whether access time information is preserved when unpacking
/// this entry.
///
/// This flag is enabled by default.
pub fn set_preserve_mtime(&mut self, preserve: bool) {
self.inner.preserve_mtime = preserve;
}
/// Ignore zeroed headers, which would otherwise indicate to the archive that it has no more
/// entries.
///
/// This can be used in case multiple tar archives have been concatenated together.
pub fn set_ignore_zeros(&mut self, ignore_zeros: bool) {
self.inner.ignore_zeros = ignore_zeros;
}
}
impl<R: Seek + Read> Archive<R> {
/// Construct an iterator over the entries in this archive for a seekable
/// reader. Seek will be used to efficiently skip over file contents.
///
/// Note that care must be taken to consider each entry within an archive in
/// sequence. If entries are processed out of sequence (from what the
/// iterator returns), then the contents read for each entry may be
/// corrupted.
pub fn entries_with_seek(&mut self) -> io::Result<Entries<R>> {
let me: &Archive<dyn Read> = self;
let me_seekable: &Archive<dyn SeekRead> = self;
me._entries(Some(me_seekable)).map(|fields| Entries {
fields: fields,
_ignored: marker::PhantomData,
})
}
}
impl Archive<dyn Read + '_> {
fn _entries<'a>(
&'a self,
seekable_archive: Option<&'a Archive<dyn SeekRead + 'a>>,
) -> io::Result<EntriesFields<'a>> {
if self.inner.pos.get() != 0 {
return Err(other(
"cannot call entries unless archive is at \
position 0",
));
}
Ok(EntriesFields {
archive: self,
seekable_archive,
done: false,
next: 0,
raw: false,
})
}
fn _unpack(&mut self, dst: &Path) -> io::Result<()> {
if dst.symlink_metadata().is_err() {
fs::create_dir_all(&dst)
.map_err(|e| TarError::new(format!("failed to create `{}`", dst.display()), e))?;
}
// Canonicalizing the dst directory will prepend the path with '\\?\'
// on windows which will allow windows APIs to treat the path as an
// extended-length path with a 32,767 character limit. Otherwise all
// unpacked paths over 260 characters will fail on creation with a
// NotFound exception.
let dst = &dst.canonicalize().unwrap_or(dst.to_path_buf());
// Delay any directory entries until the end (they will be created if needed by
// descendants), to ensure that directory permissions do not interfer with descendant
// extraction.
let mut directories = Vec::new();
for entry in self._entries(None)? {
let mut file = entry.map_err(|e| TarError::new("failed to iterate over archive", e))?;
if file.header().entry_type() == crate::EntryType::Directory {
directories.push(file);
} else {
file.unpack_in(dst)?;
}
}
for mut dir in directories {
dir.unpack_in(dst)?;
}
Ok(())
}
}
impl<'a, R: Read> Entries<'a, R> {
/// Indicates whether this iterator will return raw entries or not.
///
/// If the raw list of entries are returned, then no preprocessing happens
/// on account of this library, for example taking into account GNU long name
/// or long link archive members. Raw iteration is disabled by default.
pub fn raw(self, raw: bool) -> Entries<'a, R> {
Entries {
fields: EntriesFields {
raw: raw,
..self.fields
},
_ignored: marker::PhantomData,
}
}
}
impl<'a, R: Read> Iterator for Entries<'a, R> {
type Item = io::Result<Entry<'a, R>>;
fn next(&mut self) -> Option<io::Result<Entry<'a, R>>> {
self.fields
.next()
.map(|result| result.map(|e| EntryFields::from(e).into_entry()))
}
}
impl<'a> EntriesFields<'a> {
fn next_entry_raw(
&mut self,
pax_extensions: Option<&[u8]>,
) -> io::Result<Option<Entry<'a, io::Empty>>> {
let mut header = Header::new_old();
let mut header_pos = self.next;
loop {
// Seek to the start of the next header in the archive
let delta = self.next - self.archive.inner.pos.get();
self.skip(delta)?;
// EOF is an indicator that we are at the end of the archive.
if !try_read_all(&mut &self.archive.inner, header.as_mut_bytes())? {
return Ok(None);
}
// If a header is not all zeros, we have another valid header.
// Otherwise, check if we are ignoring zeros and continue, or break as if this is the
// end of the archive.
if !header.as_bytes().iter().all(|i| *i == 0) {
self.next += 512;
break;
}
if !self.archive.inner.ignore_zeros {
return Ok(None);
}
self.next += 512;
header_pos = self.next;
}
// Make sure the checksum is ok
let sum = header.as_bytes()[..148]
.iter()
.chain(&header.as_bytes()[156..])
.fold(0, |a, b| a + (*b as u32))
+ 8 * 32;
let cksum = header.cksum()?;
if sum != cksum {
return Err(other("archive header checksum mismatch"));
}
let mut pax_size: Option<u64> = None;
if let Some(pax_extensions_ref) = &pax_extensions {
pax_size = pax_extensions_value(pax_extensions_ref, PAX_SIZE);
if let Some(pax_uid) = pax_extensions_value(pax_extensions_ref, PAX_UID) {
header.set_uid(pax_uid);
}
if let Some(pax_gid) = pax_extensions_value(pax_extensions_ref, PAX_GID) {
header.set_gid(pax_gid);
}
}
let file_pos = self.next;
let mut size = header.entry_size()?;
if size == 0 {
if let Some(pax_size) = pax_size {
size = pax_size;
}
}
let ret = EntryFields {
size: size,
header_pos: header_pos,
file_pos: file_pos,
data: vec![EntryIo::Data((&self.archive.inner).take(size))],
header: header,
long_pathname: None,
long_linkname: None,
pax_extensions: None,
mask: self.archive.inner.mask,
unpack_xattrs: self.archive.inner.unpack_xattrs,
preserve_permissions: self.archive.inner.preserve_permissions,
preserve_mtime: self.archive.inner.preserve_mtime,
overwrite: self.archive.inner.overwrite,
preserve_ownerships: self.archive.inner.preserve_ownerships,
};
// Store where the next entry is, rounding up by 512 bytes (the size of
// a header);
let size = size
.checked_add(511)
.ok_or_else(|| other("size overflow"))?;
self.next = self
.next
.checked_add(size & !(512 - 1))
.ok_or_else(|| other("size overflow"))?;
Ok(Some(ret.into_entry()))
}
fn next_entry(&mut self) -> io::Result<Option<Entry<'a, io::Empty>>> {
if self.raw {
return self.next_entry_raw(None);
}
let mut gnu_longname = None;
let mut gnu_longlink = None;
let mut pax_extensions = None;
let mut processed = 0;
loop {
processed += 1;
let entry = match self.next_entry_raw(pax_extensions.as_deref())? {
Some(entry) => entry,
None if processed > 1 => {
return Err(other(
"members found describing a future member \
but no future member found",
));
}
None => return Ok(None),
};
let is_recognized_header =
entry.header().as_gnu().is_some() || entry.header().as_ustar().is_some();
if is_recognized_header && entry.header().entry_type().is_gnu_longname() {
if gnu_longname.is_some() {
return Err(other(
"two long name entries describing \
the same member",
));
}
gnu_longname = Some(EntryFields::from(entry).read_all()?);
continue;
}
if is_recognized_header && entry.header().entry_type().is_gnu_longlink() {
if gnu_longlink.is_some() {
return Err(other(
"two long name entries describing \
the same member",
));
}
gnu_longlink = Some(EntryFields::from(entry).read_all()?);
continue;
}
if is_recognized_header && entry.header().entry_type().is_pax_local_extensions() {
if pax_extensions.is_some() {
return Err(other(
"two pax extensions entries describing \
the same member",
));
}
pax_extensions = Some(EntryFields::from(entry).read_all()?);
continue;
}
let mut fields = EntryFields::from(entry);
fields.long_pathname = gnu_longname;
fields.long_linkname = gnu_longlink;
fields.pax_extensions = pax_extensions;
self.parse_sparse_header(&mut fields)?;
return Ok(Some(fields.into_entry()));
}
}
fn parse_sparse_header(&mut self, entry: &mut EntryFields<'a>) -> io::Result<()> {
if !entry.header.entry_type().is_gnu_sparse() {
return Ok(());
}
let gnu = match entry.header.as_gnu() {
Some(gnu) => gnu,
None => return Err(other("sparse entry type listed but not GNU header")),
};
// Sparse files are represented internally as a list of blocks that are
// read. Blocks are either a bunch of 0's or they're data from the
// underlying archive.
//
// Blocks of a sparse file are described by the `GnuSparseHeader`
// structure, some of which are contained in `GnuHeader` but some of
// which may also be contained after the first header in further
// headers.
//
// We read off all the blocks here and use the `add_block` function to
// incrementally add them to the list of I/O block (in `entry.data`).
// The `add_block` function also validates that each chunk comes after
// the previous, we don't overrun the end of the file, and each block is
// aligned to a 512-byte boundary in the archive itself.
//
// At the end we verify that the sparse file size (`Header::size`) is
// the same as the current offset (described by the list of blocks) as
// well as the amount of data read equals the size of the entry
// (`Header::entry_size`).
entry.data.truncate(0);
let mut cur = 0;
let mut remaining = entry.size;
{
let data = &mut entry.data;
let reader = &self.archive.inner;
let size = entry.size;
let mut add_block = |block: &GnuSparseHeader| -> io::Result<_> {
if block.is_empty() {
return Ok(());
}
let off = block.offset()?;
let len = block.length()?;
if len != 0 && (size - remaining) % 512 != 0 {
return Err(other(
"previous block in sparse file was not \
aligned to 512-byte boundary",
));
} else if off < cur {
return Err(other(
"out of order or overlapping sparse \
blocks",
));
} else if cur < off {
let block = io::repeat(0).take(off - cur);
data.push(EntryIo::Pad(block));
}
cur = off
.checked_add(len)
.ok_or_else(|| other("more bytes listed in sparse file than u64 can hold"))?;
remaining = remaining.checked_sub(len).ok_or_else(|| {
other(
"sparse file consumed more data than the header \
listed",
)
})?;
data.push(EntryIo::Data(reader.take(len)));
Ok(())
};
for block in gnu.sparse.iter() {
add_block(block)?
}
if gnu.is_extended() {
let mut ext = GnuExtSparseHeader::new();
ext.isextended[0] = 1;
while ext.is_extended() {
if !try_read_all(&mut &self.archive.inner, ext.as_mut_bytes())? {
return Err(other("failed to read extension"));
}
self.next += 512;
for block in ext.sparse.iter() {
add_block(block)?;
}
}
}
}
if cur != gnu.real_size()? {
return Err(other(
"mismatch in sparse file chunks and \
size in header",
));
}
entry.size = cur;
if remaining > 0 {
return Err(other(
"mismatch in sparse file chunks and \
entry size in header",
));
}
Ok(())
}
fn skip(&mut self, mut amt: u64) -> io::Result<()> {
if let Some(seekable_archive) = self.seekable_archive {
let pos = io::SeekFrom::Current(
i64::try_from(amt).map_err(|_| other("seek position out of bounds"))?,
);
(&seekable_archive.inner).seek(pos)?;
} else {
let mut buf = [0u8; 4096 * 8];
while amt > 0 {
let n = cmp::min(amt, buf.len() as u64);
let n = (&self.archive.inner).read(&mut buf[..n as usize])?;
if n == 0 {
return Err(other("unexpected EOF during skip"));
}
amt -= n as u64;
}
}
Ok(())
}
}
impl<'a> Iterator for EntriesFields<'a> {
type Item = io::Result<Entry<'a, io::Empty>>;
fn next(&mut self) -> Option<io::Result<Entry<'a, io::Empty>>> {
if self.done {
None
} else {
match self.next_entry() {
Ok(Some(e)) => Some(Ok(e)),
Ok(None) => {
self.done = true;
None
}
Err(e) => {
self.done = true;
Some(Err(e))
}
}
}
}
}
impl<'a, R: ?Sized + Read> Read for &'a ArchiveInner<R> {
fn read(&mut self, into: &mut [u8]) -> io::Result<usize> {
let i = self.obj.borrow_mut().read(into)?;
self.pos.set(self.pos.get() + i as u64);
Ok(i)
}
}
impl<'a, R: ?Sized + Seek> Seek for &'a ArchiveInner<R> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let pos = self.obj.borrow_mut().seek(pos)?;
self.pos.set(pos);
Ok(pos)
}
}
/// Try to fill the buffer from the reader.
///
/// If the reader reaches its end before filling the buffer at all, returns `false`.
/// Otherwise returns `true`.
fn try_read_all<R: Read>(r: &mut R, buf: &mut [u8]) -> io::Result<bool> {
let mut read = 0;
while read < buf.len() {
match r.read(&mut buf[read..])? {
0 => {
if read == 0 {
return Ok(false);
}
return Err(other("failed to read entire block"));
}
n => read += n,
}
}
Ok(true)
}
|
#[doc = r"Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r"Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::HB8CFG2 {
#[doc = r"Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
self.register.set(f(&R { bits }, &mut W { bits }).bits);
}
#[doc = r"Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r"Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
self.register.set(
f(&mut W {
bits: Self::reset_value(),
})
.bits,
);
}
#[doc = r"Reset value of the register"]
#[inline(always)]
pub const fn reset_value() -> u32 {
0
}
#[doc = r"Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.register.set(Self::reset_value())
}
}
#[doc = "Possible values of the field `EPI_HB8CFG2_MODE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB8CFG2_MODER {
#[doc = "ADMUX - AD\\[7:0\\]"]
EPI_HB8CFG2_MODE_ADMUX,
#[doc = "ADNONMUX - D\\[7:0\\]"]
EPI_HB8CFG2_MODE_AD,
#[doc = r"Reserved"]
_Reserved(u8),
}
impl EPI_HB8CFG2_MODER {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
match *self {
EPI_HB8CFG2_MODER::EPI_HB8CFG2_MODE_ADMUX => 0,
EPI_HB8CFG2_MODER::EPI_HB8CFG2_MODE_AD => 1,
EPI_HB8CFG2_MODER::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _from(value: u8) -> EPI_HB8CFG2_MODER {
match value {
0 => EPI_HB8CFG2_MODER::EPI_HB8CFG2_MODE_ADMUX,
1 => EPI_HB8CFG2_MODER::EPI_HB8CFG2_MODE_AD,
i => EPI_HB8CFG2_MODER::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_MODE_ADMUX`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_mode_admux(&self) -> bool {
*self == EPI_HB8CFG2_MODER::EPI_HB8CFG2_MODE_ADMUX
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_MODE_AD`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_mode_ad(&self) -> bool {
*self == EPI_HB8CFG2_MODER::EPI_HB8CFG2_MODE_AD
}
}
#[doc = "Values that can be written to the field `EPI_HB8CFG2_MODE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB8CFG2_MODEW {
#[doc = "ADMUX - AD\\[7:0\\]"]
EPI_HB8CFG2_MODE_ADMUX,
#[doc = "ADNONMUX - D\\[7:0\\]"]
EPI_HB8CFG2_MODE_AD,
}
impl EPI_HB8CFG2_MODEW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _bits(&self) -> u8 {
match *self {
EPI_HB8CFG2_MODEW::EPI_HB8CFG2_MODE_ADMUX => 0,
EPI_HB8CFG2_MODEW::EPI_HB8CFG2_MODE_AD => 1,
}
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB8CFG2_MODEW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB8CFG2_MODEW<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EPI_HB8CFG2_MODEW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "ADMUX - AD\\[7:0\\]"]
#[inline(always)]
pub fn epi_hb8cfg2_mode_admux(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_MODEW::EPI_HB8CFG2_MODE_ADMUX)
}
#[doc = "ADNONMUX - D\\[7:0\\]"]
#[inline(always)]
pub fn epi_hb8cfg2_mode_ad(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_MODEW::EPI_HB8CFG2_MODE_AD)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits &= !(3 << 0);
self.w.bits |= ((value as u32) & 3) << 0;
self.w
}
}
#[doc = "Possible values of the field `EPI_HB8CFG2_RDWS`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB8CFG2_RDWSR {
#[doc = "Active RDn is 2 EPI clocks"]
EPI_HB8CFG2_RDWS_2,
#[doc = "Active RDn is 4 EPI clocks"]
EPI_HB8CFG2_RDWS_4,
#[doc = "Active RDn is 6 EPI clocks"]
EPI_HB8CFG2_RDWS_6,
#[doc = "Active RDn is 8 EPI clocks"]
EPI_HB8CFG2_RDWS_8,
}
impl EPI_HB8CFG2_RDWSR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
match *self {
EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_2 => 0,
EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_4 => 1,
EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_6 => 2,
EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_8 => 3,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _from(value: u8) -> EPI_HB8CFG2_RDWSR {
match value {
0 => EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_2,
1 => EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_4,
2 => EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_6,
3 => EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_8,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_RDWS_2`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_rdws_2(&self) -> bool {
*self == EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_2
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_RDWS_4`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_rdws_4(&self) -> bool {
*self == EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_4
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_RDWS_6`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_rdws_6(&self) -> bool {
*self == EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_6
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_RDWS_8`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_rdws_8(&self) -> bool {
*self == EPI_HB8CFG2_RDWSR::EPI_HB8CFG2_RDWS_8
}
}
#[doc = "Values that can be written to the field `EPI_HB8CFG2_RDWS`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB8CFG2_RDWSW {
#[doc = "Active RDn is 2 EPI clocks"]
EPI_HB8CFG2_RDWS_2,
#[doc = "Active RDn is 4 EPI clocks"]
EPI_HB8CFG2_RDWS_4,
#[doc = "Active RDn is 6 EPI clocks"]
EPI_HB8CFG2_RDWS_6,
#[doc = "Active RDn is 8 EPI clocks"]
EPI_HB8CFG2_RDWS_8,
}
impl EPI_HB8CFG2_RDWSW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _bits(&self) -> u8 {
match *self {
EPI_HB8CFG2_RDWSW::EPI_HB8CFG2_RDWS_2 => 0,
EPI_HB8CFG2_RDWSW::EPI_HB8CFG2_RDWS_4 => 1,
EPI_HB8CFG2_RDWSW::EPI_HB8CFG2_RDWS_6 => 2,
EPI_HB8CFG2_RDWSW::EPI_HB8CFG2_RDWS_8 => 3,
}
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB8CFG2_RDWSW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB8CFG2_RDWSW<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EPI_HB8CFG2_RDWSW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Active RDn is 2 EPI clocks"]
#[inline(always)]
pub fn epi_hb8cfg2_rdws_2(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_RDWSW::EPI_HB8CFG2_RDWS_2)
}
#[doc = "Active RDn is 4 EPI clocks"]
#[inline(always)]
pub fn epi_hb8cfg2_rdws_4(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_RDWSW::EPI_HB8CFG2_RDWS_4)
}
#[doc = "Active RDn is 6 EPI clocks"]
#[inline(always)]
pub fn epi_hb8cfg2_rdws_6(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_RDWSW::EPI_HB8CFG2_RDWS_6)
}
#[doc = "Active RDn is 8 EPI clocks"]
#[inline(always)]
pub fn epi_hb8cfg2_rdws_8(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_RDWSW::EPI_HB8CFG2_RDWS_8)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits &= !(3 << 4);
self.w.bits |= ((value as u32) & 3) << 4;
self.w
}
}
#[doc = "Possible values of the field `EPI_HB8CFG2_WRWS`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB8CFG2_WRWSR {
#[doc = "Active WRn is 2 EPI clocks"]
EPI_HB8CFG2_WRWS_2,
#[doc = "Active WRn is 4 EPI clocks"]
EPI_HB8CFG2_WRWS_4,
#[doc = "Active WRn is 6 EPI clocks"]
EPI_HB8CFG2_WRWS_6,
#[doc = "Active WRn is 8 EPI clocks"]
EPI_HB8CFG2_WRWS_8,
}
impl EPI_HB8CFG2_WRWSR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
match *self {
EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_2 => 0,
EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_4 => 1,
EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_6 => 2,
EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_8 => 3,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _from(value: u8) -> EPI_HB8CFG2_WRWSR {
match value {
0 => EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_2,
1 => EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_4,
2 => EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_6,
3 => EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_8,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_WRWS_2`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_wrws_2(&self) -> bool {
*self == EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_2
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_WRWS_4`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_wrws_4(&self) -> bool {
*self == EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_4
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_WRWS_6`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_wrws_6(&self) -> bool {
*self == EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_6
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_WRWS_8`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_wrws_8(&self) -> bool {
*self == EPI_HB8CFG2_WRWSR::EPI_HB8CFG2_WRWS_8
}
}
#[doc = "Values that can be written to the field `EPI_HB8CFG2_WRWS`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB8CFG2_WRWSW {
#[doc = "Active WRn is 2 EPI clocks"]
EPI_HB8CFG2_WRWS_2,
#[doc = "Active WRn is 4 EPI clocks"]
EPI_HB8CFG2_WRWS_4,
#[doc = "Active WRn is 6 EPI clocks"]
EPI_HB8CFG2_WRWS_6,
#[doc = "Active WRn is 8 EPI clocks"]
EPI_HB8CFG2_WRWS_8,
}
impl EPI_HB8CFG2_WRWSW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _bits(&self) -> u8 {
match *self {
EPI_HB8CFG2_WRWSW::EPI_HB8CFG2_WRWS_2 => 0,
EPI_HB8CFG2_WRWSW::EPI_HB8CFG2_WRWS_4 => 1,
EPI_HB8CFG2_WRWSW::EPI_HB8CFG2_WRWS_6 => 2,
EPI_HB8CFG2_WRWSW::EPI_HB8CFG2_WRWS_8 => 3,
}
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB8CFG2_WRWSW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB8CFG2_WRWSW<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EPI_HB8CFG2_WRWSW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Active WRn is 2 EPI clocks"]
#[inline(always)]
pub fn epi_hb8cfg2_wrws_2(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_WRWSW::EPI_HB8CFG2_WRWS_2)
}
#[doc = "Active WRn is 4 EPI clocks"]
#[inline(always)]
pub fn epi_hb8cfg2_wrws_4(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_WRWSW::EPI_HB8CFG2_WRWS_4)
}
#[doc = "Active WRn is 6 EPI clocks"]
#[inline(always)]
pub fn epi_hb8cfg2_wrws_6(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_WRWSW::EPI_HB8CFG2_WRWS_6)
}
#[doc = "Active WRn is 8 EPI clocks"]
#[inline(always)]
pub fn epi_hb8cfg2_wrws_8(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_WRWSW::EPI_HB8CFG2_WRWS_8)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits &= !(3 << 6);
self.w.bits |= ((value as u32) & 3) << 6;
self.w
}
}
#[doc = r"Value of the field"]
pub struct EPI_HB8CFG2_ALEHIGHR {
bits: bool,
}
impl EPI_HB8CFG2_ALEHIGHR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB8CFG2_ALEHIGHW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB8CFG2_ALEHIGHW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 19);
self.w.bits |= ((value as u32) & 1) << 19;
self.w
}
}
#[doc = r"Value of the field"]
pub struct EPI_HB8CFG2_RDHIGHR {
bits: bool,
}
impl EPI_HB8CFG2_RDHIGHR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB8CFG2_RDHIGHW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB8CFG2_RDHIGHW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 20);
self.w.bits |= ((value as u32) & 1) << 20;
self.w
}
}
#[doc = r"Value of the field"]
pub struct EPI_HB8CFG2_WRHIGHR {
bits: bool,
}
impl EPI_HB8CFG2_WRHIGHR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB8CFG2_WRHIGHW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB8CFG2_WRHIGHW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 21);
self.w.bits |= ((value as u32) & 1) << 21;
self.w
}
}
#[doc = "Possible values of the field `EPI_HB8CFG2_CSCFG`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB8CFG2_CSCFGR {
#[doc = "ALE Configuration"]
EPI_HB8CFG2_CSCFG_ALE,
#[doc = "CSn Configuration"]
EPI_HB8CFG2_CSCFG_CS,
#[doc = "Dual CSn Configuration"]
EPI_HB8CFG2_CSCFG_DCS,
#[doc = "ALE with Dual CSn Configuration"]
EPI_HB8CFG2_CSCFG_ADCS,
}
impl EPI_HB8CFG2_CSCFGR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
match *self {
EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_ALE => 0,
EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_CS => 1,
EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_DCS => 2,
EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_ADCS => 3,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _from(value: u8) -> EPI_HB8CFG2_CSCFGR {
match value {
0 => EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_ALE,
1 => EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_CS,
2 => EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_DCS,
3 => EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_ADCS,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_CSCFG_ALE`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_cscfg_ale(&self) -> bool {
*self == EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_ALE
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_CSCFG_CS`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_cscfg_cs(&self) -> bool {
*self == EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_CS
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_CSCFG_DCS`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_cscfg_dcs(&self) -> bool {
*self == EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_DCS
}
#[doc = "Checks if the value of the field is `EPI_HB8CFG2_CSCFG_ADCS`"]
#[inline(always)]
pub fn is_epi_hb8cfg2_cscfg_adcs(&self) -> bool {
*self == EPI_HB8CFG2_CSCFGR::EPI_HB8CFG2_CSCFG_ADCS
}
}
#[doc = "Values that can be written to the field `EPI_HB8CFG2_CSCFG`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB8CFG2_CSCFGW {
#[doc = "ALE Configuration"]
EPI_HB8CFG2_CSCFG_ALE,
#[doc = "CSn Configuration"]
EPI_HB8CFG2_CSCFG_CS,
#[doc = "Dual CSn Configuration"]
EPI_HB8CFG2_CSCFG_DCS,
#[doc = "ALE with Dual CSn Configuration"]
EPI_HB8CFG2_CSCFG_ADCS,
}
impl EPI_HB8CFG2_CSCFGW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _bits(&self) -> u8 {
match *self {
EPI_HB8CFG2_CSCFGW::EPI_HB8CFG2_CSCFG_ALE => 0,
EPI_HB8CFG2_CSCFGW::EPI_HB8CFG2_CSCFG_CS => 1,
EPI_HB8CFG2_CSCFGW::EPI_HB8CFG2_CSCFG_DCS => 2,
EPI_HB8CFG2_CSCFGW::EPI_HB8CFG2_CSCFG_ADCS => 3,
}
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB8CFG2_CSCFGW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB8CFG2_CSCFGW<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EPI_HB8CFG2_CSCFGW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "ALE Configuration"]
#[inline(always)]
pub fn epi_hb8cfg2_cscfg_ale(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_CSCFGW::EPI_HB8CFG2_CSCFG_ALE)
}
#[doc = "CSn Configuration"]
#[inline(always)]
pub fn epi_hb8cfg2_cscfg_cs(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_CSCFGW::EPI_HB8CFG2_CSCFG_CS)
}
#[doc = "Dual CSn Configuration"]
#[inline(always)]
pub fn epi_hb8cfg2_cscfg_dcs(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_CSCFGW::EPI_HB8CFG2_CSCFG_DCS)
}
#[doc = "ALE with Dual CSn Configuration"]
#[inline(always)]
pub fn epi_hb8cfg2_cscfg_adcs(self) -> &'a mut W {
self.variant(EPI_HB8CFG2_CSCFGW::EPI_HB8CFG2_CSCFG_ADCS)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits &= !(3 << 24);
self.w.bits |= ((value as u32) & 3) << 24;
self.w
}
}
#[doc = r"Value of the field"]
pub struct EPI_HB8CFG2_CSBAUDR {
bits: bool,
}
impl EPI_HB8CFG2_CSBAUDR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB8CFG2_CSBAUDW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB8CFG2_CSBAUDW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 26);
self.w.bits |= ((value as u32) & 1) << 26;
self.w
}
}
#[doc = r"Value of the field"]
pub struct EPI_HB8CFG2_CSCFGEXTR {
bits: bool,
}
impl EPI_HB8CFG2_CSCFGEXTR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB8CFG2_CSCFGEXTW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB8CFG2_CSCFGEXTW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 27);
self.w.bits |= ((value as u32) & 1) << 27;
self.w
}
}
impl R {
#[doc = r"Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:1 - CS1n Host Bus Sub-Mode"]
#[inline(always)]
pub fn epi_hb8cfg2_mode(&self) -> EPI_HB8CFG2_MODER {
EPI_HB8CFG2_MODER::_from(((self.bits >> 0) & 3) as u8)
}
#[doc = "Bits 4:5 - CS1n Read Wait States"]
#[inline(always)]
pub fn epi_hb8cfg2_rdws(&self) -> EPI_HB8CFG2_RDWSR {
EPI_HB8CFG2_RDWSR::_from(((self.bits >> 4) & 3) as u8)
}
#[doc = "Bits 6:7 - CS1n Write Wait States"]
#[inline(always)]
pub fn epi_hb8cfg2_wrws(&self) -> EPI_HB8CFG2_WRWSR {
EPI_HB8CFG2_WRWSR::_from(((self.bits >> 6) & 3) as u8)
}
#[doc = "Bit 19 - CS1n ALE Strobe Polarity"]
#[inline(always)]
pub fn epi_hb8cfg2_alehigh(&self) -> EPI_HB8CFG2_ALEHIGHR {
let bits = ((self.bits >> 19) & 1) != 0;
EPI_HB8CFG2_ALEHIGHR { bits }
}
#[doc = "Bit 20 - CS1n READ Strobe Polarity"]
#[inline(always)]
pub fn epi_hb8cfg2_rdhigh(&self) -> EPI_HB8CFG2_RDHIGHR {
let bits = ((self.bits >> 20) & 1) != 0;
EPI_HB8CFG2_RDHIGHR { bits }
}
#[doc = "Bit 21 - CS1n WRITE Strobe Polarity"]
#[inline(always)]
pub fn epi_hb8cfg2_wrhigh(&self) -> EPI_HB8CFG2_WRHIGHR {
let bits = ((self.bits >> 21) & 1) != 0;
EPI_HB8CFG2_WRHIGHR { bits }
}
#[doc = "Bits 24:25 - Chip Select Configuration"]
#[inline(always)]
pub fn epi_hb8cfg2_cscfg(&self) -> EPI_HB8CFG2_CSCFGR {
EPI_HB8CFG2_CSCFGR::_from(((self.bits >> 24) & 3) as u8)
}
#[doc = "Bit 26 - Chip Select Baud Rate and Multiple Sub-Mode Configuration enable"]
#[inline(always)]
pub fn epi_hb8cfg2_csbaud(&self) -> EPI_HB8CFG2_CSBAUDR {
let bits = ((self.bits >> 26) & 1) != 0;
EPI_HB8CFG2_CSBAUDR { bits }
}
#[doc = "Bit 27 - Chip Select Extended Configuration"]
#[inline(always)]
pub fn epi_hb8cfg2_cscfgext(&self) -> EPI_HB8CFG2_CSCFGEXTR {
let bits = ((self.bits >> 27) & 1) != 0;
EPI_HB8CFG2_CSCFGEXTR { bits }
}
}
impl W {
#[doc = r"Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:1 - CS1n Host Bus Sub-Mode"]
#[inline(always)]
pub fn epi_hb8cfg2_mode(&mut self) -> _EPI_HB8CFG2_MODEW {
_EPI_HB8CFG2_MODEW { w: self }
}
#[doc = "Bits 4:5 - CS1n Read Wait States"]
#[inline(always)]
pub fn epi_hb8cfg2_rdws(&mut self) -> _EPI_HB8CFG2_RDWSW {
_EPI_HB8CFG2_RDWSW { w: self }
}
#[doc = "Bits 6:7 - CS1n Write Wait States"]
#[inline(always)]
pub fn epi_hb8cfg2_wrws(&mut self) -> _EPI_HB8CFG2_WRWSW {
_EPI_HB8CFG2_WRWSW { w: self }
}
#[doc = "Bit 19 - CS1n ALE Strobe Polarity"]
#[inline(always)]
pub fn epi_hb8cfg2_alehigh(&mut self) -> _EPI_HB8CFG2_ALEHIGHW {
_EPI_HB8CFG2_ALEHIGHW { w: self }
}
#[doc = "Bit 20 - CS1n READ Strobe Polarity"]
#[inline(always)]
pub fn epi_hb8cfg2_rdhigh(&mut self) -> _EPI_HB8CFG2_RDHIGHW {
_EPI_HB8CFG2_RDHIGHW { w: self }
}
#[doc = "Bit 21 - CS1n WRITE Strobe Polarity"]
#[inline(always)]
pub fn epi_hb8cfg2_wrhigh(&mut self) -> _EPI_HB8CFG2_WRHIGHW {
_EPI_HB8CFG2_WRHIGHW { w: self }
}
#[doc = "Bits 24:25 - Chip Select Configuration"]
#[inline(always)]
pub fn epi_hb8cfg2_cscfg(&mut self) -> _EPI_HB8CFG2_CSCFGW {
_EPI_HB8CFG2_CSCFGW { w: self }
}
#[doc = "Bit 26 - Chip Select Baud Rate and Multiple Sub-Mode Configuration enable"]
#[inline(always)]
pub fn epi_hb8cfg2_csbaud(&mut self) -> _EPI_HB8CFG2_CSBAUDW {
_EPI_HB8CFG2_CSBAUDW { w: self }
}
#[doc = "Bit 27 - Chip Select Extended Configuration"]
#[inline(always)]
pub fn epi_hb8cfg2_cscfgext(&mut self) -> _EPI_HB8CFG2_CSCFGEXTW {
_EPI_HB8CFG2_CSCFGEXTW { w: self }
}
}
|
//! Defines an interface for register-like actors (via [`RegisterMsg`]) and also provides
//! [`RegisterActor`] for model checking.
#[cfg(doc)]
use crate::actor::ActorModel;
use crate::actor::{Actor, Envelope, Id, Out};
use crate::semantics::register::{Register, RegisterOp, RegisterRet};
use crate::semantics::ConsistencyTester;
use std::borrow::Cow;
use std::fmt::Debug;
use std::hash::Hash;
/// Defines an interface for a register-like actor.
#[derive(
Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, serde::Serialize, serde::Deserialize,
)]
pub enum RegisterMsg<RequestId, Value, InternalMsg> {
/// A message specific to the register system's internal protocol.
Internal(InternalMsg),
/// Indicates that a value should be written.
Put(RequestId, Value),
/// Indicates that a value should be retrieved.
Get(RequestId),
/// Indicates a successful `Put`. Analogous to an HTTP 2XX.
PutOk(RequestId),
/// Indicates a successful `Get`. Analogous to an HTTP 2XX.
GetOk(RequestId, Value),
}
use RegisterMsg::*;
impl<RequestId, Value, InternalMsg> RegisterMsg<RequestId, Value, InternalMsg> {
/// This is a helper for configuring an [`ActorModel`] parameterized by a [`ConsistencyTester`]
/// for its history. Simply pass this method to [`ActorModel::record_msg_out`]. Records
/// [`RegisterOp::Read`] upon [`RegisterMsg::Get`] and [`RegisterOp::Write`] upon
/// [`RegisterMsg::Put`].
pub fn record_invocations<C, H>(
_cfg: &C,
history: &H,
env: Envelope<&RegisterMsg<RequestId, Value, InternalMsg>>,
) -> Option<H>
where
H: Clone + ConsistencyTester<Id, Register<Value>>,
Value: Clone + Debug + PartialEq,
{
// Currently throws away useful information about invalid histories. Ideally
// checking would continue, but the property would be labeled with an error.
if let Get(_) = env.msg {
let mut history = history.clone();
let _ = history.on_invoke(env.src, RegisterOp::Read);
Some(history)
} else if let Put(_req_id, value) = env.msg {
let mut history = history.clone();
let _ = history.on_invoke(env.src, RegisterOp::Write(value.clone()));
Some(history)
} else {
None
}
}
/// This is a helper for configuring an [`ActorModel`] parameterized by a [`ConsistencyTester`]
/// for its history. Simply pass this method to [`ActorModel::record_msg_in`]. Records
/// [`RegisterRet::ReadOk`] upon [`RegisterMsg::GetOk`] and [`RegisterRet::WriteOk`] upon
/// [`RegisterMsg::PutOk`].
pub fn record_returns<C, H>(
_cfg: &C,
history: &H,
env: Envelope<&RegisterMsg<RequestId, Value, InternalMsg>>,
) -> Option<H>
where
H: Clone + ConsistencyTester<Id, Register<Value>>,
Value: Clone + Debug + PartialEq,
{
// Currently throws away useful information about invalid histories. Ideally
// checking would continue, but the property would be labeled with an error.
match env.msg {
GetOk(_, v) => {
let mut history = history.clone();
let _ = history.on_return(env.dst, RegisterRet::ReadOk(v.clone()));
Some(history)
}
PutOk(_) => {
let mut history = history.clone();
let _ = history.on_return(env.dst, RegisterRet::WriteOk);
Some(history)
}
_ => None,
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum RegisterActor<ServerActor> {
/// A client that [`RegisterMsg::Put`]s a message and upon receving a
/// corresponding [`RegisterMsg::PutOk`] follows up with a
/// [`RegisterMsg::Get`].
Client {
put_count: usize,
server_count: usize,
},
/// A server actor being validated.
Server(ServerActor),
}
#[derive(Clone, Debug, Eq, Hash, PartialEq, serde::Serialize)]
pub enum RegisterActorState<ServerState, RequestId> {
/// A client that sends a sequence of [`RegisterMsg::Put`] messages before sending a
/// [`RegisterMsg::Get`].
Client {
awaiting: Option<RequestId>,
op_count: u64,
},
/// Wraps the state of a server actor.
Server(ServerState),
}
// This implementation assumes the servers are at the beginning of the list of
// actors in the system under test so that an arbitrary server destination ID
// can be derived from `(client_id.0 + k) % server_count` for any `k`.
impl<ServerActor, InternalMsg> Actor for RegisterActor<ServerActor>
where
ServerActor: Actor<Msg = RegisterMsg<u64, char, InternalMsg>>,
InternalMsg: Clone + Debug + Eq + Hash,
{
type Msg = RegisterMsg<u64, char, InternalMsg>;
type State = RegisterActorState<ServerActor::State, u64>;
type Timer = ServerActor::Timer;
#[allow(clippy::identity_op)]
fn on_start(&self, id: Id, o: &mut Out<Self>) -> Self::State {
match self {
RegisterActor::Client {
put_count,
server_count,
} => {
let server_count = *server_count as u64;
let index = id.0;
if index < server_count {
panic!("RegisterActor clients must be added to the model after servers.");
}
if *put_count == 0 {
RegisterActorState::Client {
awaiting: None,
op_count: 0,
}
} else {
let unique_request_id = 1 * index; // next will be 2 * index
let value = (b'A' + (index - server_count) as u8) as char;
o.send(
Id((index + 0) % server_count),
Put(unique_request_id, value),
);
RegisterActorState::Client {
awaiting: Some(unique_request_id),
op_count: 1,
}
}
}
RegisterActor::Server(server_actor) => {
let mut server_out = Out::new();
let state = RegisterActorState::Server(server_actor.on_start(id, &mut server_out));
o.append(&mut server_out);
state
}
}
}
fn on_msg(
&self,
id: Id,
state: &mut Cow<Self::State>,
src: Id,
msg: Self::Msg,
o: &mut Out<Self>,
) {
use RegisterActor as A;
use RegisterActorState as S;
match (self, &**state) {
(
A::Client {
put_count,
server_count,
},
S::Client {
awaiting: Some(awaiting),
op_count,
},
) => {
let server_count = *server_count as u64;
match msg {
RegisterMsg::PutOk(request_id) if &request_id == awaiting => {
let index = id.0;
let unique_request_id = (op_count + 1) * index;
if *op_count < *put_count as u64 {
let value = (b'Z' - (index - server_count) as u8) as char;
o.send(
Id((index + op_count) % server_count),
Put(unique_request_id, value),
);
} else {
o.send(
Id((index + op_count) % server_count),
Get(unique_request_id),
);
}
*state = Cow::Owned(RegisterActorState::Client {
awaiting: Some(unique_request_id),
op_count: op_count + 1,
});
}
RegisterMsg::GetOk(request_id, _value) if &request_id == awaiting => {
*state = Cow::Owned(RegisterActorState::Client {
awaiting: None,
op_count: op_count + 1,
});
}
_ => {}
}
}
(A::Server(server_actor), S::Server(server_state)) => {
let mut server_state = Cow::Borrowed(server_state);
let mut server_out = Out::new();
server_actor.on_msg(id, &mut server_state, src, msg, &mut server_out);
if let Cow::Owned(server_state) = server_state {
*state = Cow::Owned(RegisterActorState::Server(server_state))
}
o.append(&mut server_out);
}
_ => {}
}
}
fn on_timeout(
&self,
id: Id,
state: &mut Cow<Self::State>,
timer: &Self::Timer,
o: &mut Out<Self>,
) {
use RegisterActor as A;
use RegisterActorState as S;
match (self, &**state) {
(A::Client { .. }, S::Client { .. }) => {}
(A::Server(server_actor), S::Server(server_state)) => {
let mut server_state = Cow::Borrowed(server_state);
let mut server_out = Out::new();
server_actor.on_timeout(id, &mut server_state, timer, &mut server_out);
if let Cow::Owned(server_state) = server_state {
*state = Cow::Owned(RegisterActorState::Server(server_state))
}
o.append(&mut server_out);
}
_ => {}
}
}
}
|
extern crate rustl;
use rustl::foo;
fn main() {
let some_struct = rustl::display::AStructInYoRust{zeroth: 0};
println!("{}", some_struct);
foo::foo();
rustl::lol(10);
}
|
enum A {
Zero,
One,
Two()
}
fn main() {
println!("{} {} {}", A::Zero as usize, A::One as usize, A::Two as usize);
}
|
pub mod display;
pub mod pixel;
pub use sdl2;
#[cfg(test)]
mod tests {
extern crate rand;
use super::*;
use display::DisplayBuilder;
use rand::prelude::*;
use sdl2::audio::{AudioCallback, AudioSpecDesired};
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
struct SquareWave {
phase_inc: f32,
phase: f32,
volume: f32,
}
impl AudioCallback for SquareWave {
type Channel = f32;
fn callback(&mut self, out: &mut [f32]) {
for x in out.iter_mut() {
*x = if self.phase <= 0.5 {
self.volume
} else {
-self.volume
};
self.phase = (self.phase + self.phase_inc) % 1.0;
}
}
}
#[test]
fn test_rand_pix() {
let mut display = DisplayBuilder::new("Display", 64, 32, 10)
.with_margin(5, 5)
.build()
.unwrap();
let mut rng = rand::thread_rng();
'main: loop {
for event in display.get_event_pump().poll_iter() {
match event {
Event::KeyDown {
keycode: Some(Keycode::Escape),
..
} => {
println!("escape received");
break 'main;
}
_ => {}
}
}
let mut buffer = [(0, 0, 0); 64 * 32];
buffer[0] = (255, 255, 255);
buffer[1] = (255, 255, 255);
buffer[64] = (255, 255, 255);
buffer[65] = (255, 255, 255);
display.from_buffer(&buffer);
display.refresh();
std::thread::sleep(std::time::Duration::from_millis(20));
}
}
#[test]
fn test_sound() {
let mut display = DisplayBuilder::new("Display", 64, 32, 10)
.with_margin(5, 5)
.build()
.unwrap();
let audio_subsystem = display.context.audio().unwrap();
let desired_spec = AudioSpecDesired {
freq: Some(44100),
channels: Some(1),
samples: None,
};
let device = audio_subsystem
.open_playback(None, &desired_spec, |spec| SquareWave {
phase_inc: 440.0 / spec.freq as f32,
phase: 0.0,
volume: 0.25,
})
.unwrap();
device.resume();
std::thread::sleep(std::time::Duration::from_millis(2000));
device.pause();
std::thread::sleep(std::time::Duration::from_millis(2000));
device.resume();
std::thread::sleep(std::time::Duration::from_millis(2000));
}
}
|
use std::{io, error};
mod fibonacci;
mod temperature;
mod carol;
mod collections;
enum Options {
Temperature,
Fibonacci,
GoldenFibonacci,
Carol,
IntegerList,
PigLatin,
Employee,
}
fn main() -> Result<(), Box<dyn error::Error>> {
loop {
println!("Choose one of the following programs:");
println!("1. Temperature Conversion");
println!("2. Fibonacci Sequence");
println!("3. Golden Ratio Fibonacci Sequence");
println!("4. The Twelve Days of Christmas");
println!("5. Integer List Math");
println!("6. Pig Latin");
println!("7. Employees");
println!("0. Exit");
let mut option = String::new();
io::stdin().read_line(&mut option)?;
println!("===========================");
let option = match option.trim().parse() {
Ok(0) => break,
Ok(1) => Options::Temperature,
Ok(2) => Options::Fibonacci,
Ok(3) => Options::GoldenFibonacci,
Ok(4) => Options::Carol,
Ok(5) => Options::IntegerList,
Ok(6) => Options::PigLatin,
Ok(7) => Options::Employee,
_ => continue,
};
match option {
Options::Temperature => temperature::temperature_program(),
Options::Fibonacci => fibonacci::fibonacci_program(),
Options::GoldenFibonacci => fibonacci::golden_ratio_fibonacci(),
Options::Carol => carol::carol_program(),
Options::IntegerList => collections::integer_list_program(),
Options::PigLatin => collections::pig_latin_program()?,
Options::Employee => collections::employee_program()?,
}
}
Ok(())
}
|
extern crate iron;
extern crate router;
extern crate markdown;
extern crate rustc_serialize;
use std::error::Error;
use std::fs::File;
use std::path::Path;
use std::convert::From;
use rustc_serialize::{
Decodable,
Decoder
};
use iron::prelude::*;
use iron::status;
use iron::headers::{
ContentType
};
use iron::middleware::Handler;
use iron::modifiers::Header;
use router::Router;
use rustc_serialize::json;
use std::env::args;
#[derive(RustcEncodable,RustcDecodable,Debug)]
struct Page
{
title: String,
location: String,
url: String
}
fn index() {
}
fn get_config(path:&str) -> Result<Vec<Page>,Box<Error>> {
let mut config = try!{File::open(path)};
let structure = try!{json::Json::from_reader(&mut config)};
Ok(try!{<Vec<Page>>::decode(&mut json::Decoder::new(structure))})
}
struct PageHandler {
prefix: String
}
impl Handler for PageHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
//let ref path = req.extensions.get::<Router>().unwrap().find("path").unwrap_or("/");
let config = get_config(&format!("{}/config.json",self.prefix)).unwrap();
let mut path = String::new();
for fragment in &req.url.path {
path.push_str("/");
path.push_str(&fragment);
}
println!("PATH ! {}",path);
println!("{:?}",config);
println!("{:?}",req);
// let mut headers = Headers::new();
// headers.set(ContentType::html());
for page in config {
if page.url == path {
println!("MATCH!");
return Ok(Response::with((status::Ok, markdown::file_to_html(Path::new(&format!("{}/{}",self.prefix,page.location))).unwrap(),Header(ContentType::html()))))
}
}
println!("HELLO!");
return Ok(Response::with(status::NotFound))
}
}
fn main() {
let prefix = args().nth(1).expect("error, content location not specified");
Iron::new(PageHandler{prefix:prefix}).http("localhost:4000").unwrap();
}
|
///! Error types for startuppong apis
extern crate hyper;
extern crate rustc_serialize;
use std::io;
use std::fmt;
use std::convert::From;
use std::error::Error;
use rustc_serialize::json;
/// The error type returned in a startuppong `Result`.
#[derive(Debug)]
pub enum ApiError {
/// An ID was not found for the given player name in a call to `get_player_ids`
PlayerNotFound(String),
/// Something went wrong during the request
Http(hyper::error::Error),
/// Error reading response
Io(io::Error),
/// Response JSON could not be decoded
JsonDecoding(json::DecoderError)
}
impl Error for ApiError {
fn description(&self) -> &str {
match *self {
ApiError::PlayerNotFound(_) => "Could not match player name to id",
ApiError::Http(ref err) => err.description(),
ApiError::Io(ref err) => err.description(),
ApiError::JsonDecoding(ref err) => err.description(),
}
}
fn cause(&self) -> Option<&Error> {
match *self {
ApiError::PlayerNotFound(_) => None,
ApiError::Http(ref err) => Some(err),
ApiError::Io(ref err) => Some(err),
ApiError::JsonDecoding(ref err) => Some(err),
}
}
}
impl fmt::Display for ApiError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ApiError::PlayerNotFound(ref s) => write!(f, "PlayerNotFound: {}", s),
ApiError::Http(ref err) => write!(f, "Http error: {}", err),
ApiError::Io(ref err) => write!(f, "Io error: {}", err),
ApiError::JsonDecoding(ref err) => write!(f, "JsonDecoding error: {}", err),
}
}
}
impl From<hyper::error::Error> for ApiError {
fn from(err: hyper::error::Error) -> ApiError {
ApiError::Http(err)
}
}
impl From<io::Error> for ApiError {
fn from(err: io::Error) -> ApiError {
ApiError::Io(err)
}
}
impl From<json::DecoderError> for ApiError {
fn from(err: json::DecoderError) -> ApiError {
ApiError::JsonDecoding(err)
}
}
|
pub struct Solution;
impl Solution {
pub fn count_battleships(board: Vec<Vec<char>>) -> i32 {
let mut count = 0;
for i in 0..board.len() {
for j in 0..board[i].len() {
if board[i][j] == 'X'
&& (i == 0 || board[i - 1][j] == '.')
&& (j == 0 || board[i][j - 1] == '.')
{
count += 1;
}
}
}
count
}
}
#[test]
fn test0419() {
fn case(board: Vec<&str>, want: i32) {
let board = board.iter().map(|s| s.chars().collect()).collect();
let got = Solution::count_battleships(board);
assert_eq!(got, want);
}
case(vec!["X..X", "...X", "...X"], 2);
}
|
use crate::{source::Source, CACHE};
use serde::{Deserialize, Serialize};
use std::{
collections::{btree_map::Entry, BTreeMap},
fs::File,
hash::{Hash, Hasher},
io::Write,
ops::Deref,
path::PathBuf,
u8,
};
impl Default for Library {
fn default() -> Self {
Self {
books: BTreeMap::new(),
location: PathBuf::from(CACHE).join("library"),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Library {
pub books: BTreeMap<BookName, Book>,
pub location: PathBuf,
}
#[derive(Default, Ord, PartialOrd, Eq, Debug, Clone, Serialize, Deserialize)]
pub struct Book {
pub name: BookName,
pub index: Source,
chapters: BTreeMap<u16, Chapter>,
visual: Option<bool>,
pub pos: u16,
}
//TODO: implement Default Chapter
#[derive(
Default, Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Serialize, Deserialize,
)]
pub struct Chapter {
pub page: Source,
content: BTreeMap<u16, Content>,
pub pos: u16,
}
#[derive(
Hash,
Eq,
PartialEq,
Ord,
PartialOrd,
Default,
Debug,
Clone,
Serialize,
Deserialize,
)]
pub struct BookName(String);
//TODO: implement Default for Content
#[derive(
Default, Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Serialize, Deserialize,
)]
pub struct Content(pub u16, pub PathBuf);
impl Library {
pub async fn get(
&self,
book: &BookName,
) -> Option<&Book> {
self.books.get(book)
}
pub fn rename_book(
&mut self,
old: &BookName,
new_name: BookName,
) {
if self.books.contains_key(old) {
let mut book = self.books.remove(old).unwrap();
book.name = new_name.clone();
self.books.insert(new_name, book);
}
}
pub async fn add_book(
&mut self,
book: BookName,
site: Option<Source>,
) -> &mut Book {
if let Some(src) = site {
let b = Book {
name: book.clone(),
index: src.refresh().await.index().await,
..Default::default()
};
self.books.entry(book).or_insert(b)
} else {
self.books.entry(book).or_default()
}
}
pub async fn remove_book(
&mut self,
book: BookName,
) {
self.books.remove(&book);
}
pub async fn set_source(
&mut self,
book: BookName,
url: Option<String>,
) {
match (self.books.entry(book), url) {
(Entry::Occupied(mut e), Some(url)) => {
e.get_mut().index = Source::from(url).refresh().await;
}
(Entry::Occupied(mut e), None) => *e.get_mut() = Default::default(),
_ => {}
}
}
}
impl Book {
pub fn set_visual(
&mut self,
visual: Option<bool>,
) {
match visual {
Some(_) => self.visual = visual,
None => self.visual = self.index.check_visual(),
}
}
pub fn visual(&self) -> Option<bool> { self.visual }
pub async fn add_chapter(
&mut self,
ch: Chapter,
) -> Option<Chapter> {
self.chapters.insert(ch.num(), ch)
}
pub fn remove_chapter(
&mut self,
ch: Chapter,
) -> Option<Chapter> {
self.chapters.remove(&ch.num())
}
pub fn get(
&mut self,
ch: u16,
) -> Option<&Chapter> {
self.chapters.get(&ch)
}
pub fn seek(
&mut self,
chapter: u16,
) -> Option<Chapter> {
let e = self.chapters.get(&chapter).cloned();
e.is_some().then(|| self.pos = chapter);
e
}
pub fn prev(&mut self) -> Chapter {
match self.seek(self.pos.saturating_sub(1)) {
Some(c) => c,
None => Chapter::default(),
}
}
pub fn next(&mut self) -> Chapter {
match self.seek(self.pos.saturating_add(1)) {
Some(c) => c,
None => Chapter::default(),
}
}
}
impl Chapter {
pub fn add_content(
&mut self,
content: Content,
) -> Option<Content> {
self.content.insert(content.0, content)
}
pub fn remove_content(
&mut self,
content: Content,
) -> Option<Content> {
self.content.remove(&content.0)
}
pub fn get(
&mut self,
p: u16,
) -> Option<&Content> {
self.content.get(&p)
}
pub fn num(&self) -> u16 { self.page.place.1 }
pub fn seek(
&mut self,
page: u16,
) -> Option<Content> {
let e = self.content.get(&page).cloned();
e.is_some().then(|| self.pos = page);
e
}
pub fn prev(&mut self) -> Content {
match self.seek(self.pos.saturating_sub(1)) {
Some(c) => c,
None => Content::default(),
}
}
pub fn next(&mut self) -> Content {
match self.seek(self.pos.saturating_add(1)) {
Some(c) => c,
None => Content::default(),
}
}
}
impl Content {
pub fn save(
&self,
data: &[u8],
) {
let pb = &self.1;
std::fs::create_dir_all(pb).unwrap();
let pb = &pb.join(format!("{:04}.jpg", self.0));
File::with_options()
.write(true)
.create(true)
.open(pb)
.unwrap()
.write(data)
.unwrap();
}
pub fn file(&self) -> File {
let pb = &self.1;
std::fs::create_dir_all(pb).unwrap();
let pb = &pb.join(format!("{}", self.0));
std::fs::create_dir_all(pb).unwrap();
File::with_options()
.write(true)
.create(true)
.open(pb)
.unwrap()
}
}
impl PartialEq for Book {
fn eq(
&self,
other: &Self,
) -> bool {
self.name == other.name
}
}
impl Hash for Book {
fn hash<H: Hasher>(
&self,
state: &mut H,
) {
self.name.hash(state);
}
}
impl From<String> for Book {
fn from(name: String) -> Self {
Self {
name: name.into(),
..Default::default()
}
}
}
impl From<BookName> for Book {
fn from(name: BookName) -> Self {
Self {
name: name.into(),
..Default::default()
}
}
}
impl From<String> for BookName {
fn from(name: String) -> Self { Self(name) }
}
impl Deref for Book {
type Target = BookName;
fn deref<'a>(&'a self) -> &'a BookName { &self.name }
}
impl Deref for BookName {
type Target = String;
fn deref<'a>(&'a self) -> &'a String { &self.0 }
}
|
use super::*;
#[test]
pub fn test_engine_disasm() {
struct Test {
arch: Arch,
mode: Mode,
opts: Vec<(Opt, usize)>,
code: Vec<u8>,
insn: Vec<Insn>,
};
let tests = vec![
Test{
arch: Arch::X86,
mode: MODE_16,
opts: vec![],
code: vec![0x8d, 0x4c, 0x32, 0x08, 0x01, 0xd8, 0x81, 0xc6, 0x34, 0x12, 0x00, 0x00],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x8d, 0x4c, 0x32, ],
mnemonic: "lea".to_string(),
op_str: "cx, word ptr [si + 0x32]".to_string(),
}, Insn{
addr: 0x1003,
bytes: vec![0x08, 0x01, ],
mnemonic: "or".to_string(),
op_str: "byte ptr [bx + di], al".to_string(),
}, Insn{
addr: 0x1005,
bytes: vec![0xd8, 0x81, 0xc6, 0x34, ],
mnemonic: "fadd".to_string(),
op_str: "dword ptr [bx + di + 0x34c6]".to_string(),
}, Insn{
addr: 0x1009,
bytes: vec![0x12, 0x00, ],
mnemonic: "adc".to_string(),
op_str: "al, byte ptr [bx + si]".to_string(),
}],
}, Test{
arch: Arch::X86,
mode: MODE_32,
opts: vec![(Opt::Syntax, 2 /*ATT*/)],
code: vec![0x8d, 0x4c, 0x32, 0x08, 0x01, 0xd8, 0x81, 0xc6, 0x34, 0x12, 0x00, 0x00],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x8d, 0x4c, 0x32, 0x08, ],
mnemonic: "leal".to_string(),
op_str: "8(%edx, %esi), %ecx".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0x01, 0xd8, ],
mnemonic: "addl".to_string(),
op_str: "%ebx, %eax".to_string(),
}, Insn{
addr: 0x1006,
bytes: vec![0x81, 0xc6, 0x34, 0x12, 0x00, 0x00, ],
mnemonic: "addl".to_string(),
op_str: "$0x1234, %esi".to_string(),
}],
}, Test{
arch: Arch::X86,
mode: MODE_32,
opts: vec![],
code: vec![0x8d, 0x4c, 0x32, 0x08, 0x01, 0xd8, 0x81, 0xc6, 0x34, 0x12, 0x00, 0x00],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x8d, 0x4c, 0x32, 0x08, ],
mnemonic: "lea".to_string(),
op_str: "ecx, dword ptr [edx + esi + 8]".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0x01, 0xd8, ],
mnemonic: "add".to_string(),
op_str: "eax, ebx".to_string(),
}, Insn{
addr: 0x1006,
bytes: vec![0x81, 0xc6, 0x34, 0x12, 0x00, 0x00, ],
mnemonic: "add".to_string(),
op_str: "esi, 0x1234".to_string(),
}],
}, Test{
arch: Arch::X86,
mode: MODE_64,
opts: vec![],
code: vec![0x55, 0x48, 0x8b, 0x05, 0xb8, 0x13, 0x00, 0x00],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x55, ],
mnemonic: "push".to_string(),
op_str: "rbp".to_string(),
}, Insn{
addr: 0x1001,
bytes: vec![0x48, 0x8b, 0x05, 0xb8, 0x13, 0x00, 0x00, ],
mnemonic: "mov".to_string(),
op_str: "rax, qword ptr [rip + 0x13b8]".to_string(),
}],
}, Test{
arch: Arch::Arm,
mode: MODE_ARM|MODE_LITTLE_ENDIAN,
opts: vec![],
code: vec![0xed, 0xff, 0xff, 0xeb, 0x04, 0xe0, 0x2d, 0xe5, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x83, 0x22, 0xe5, 0xf1, 0x02, 0x03, 0x0e, 0x00, 0x00, 0xa0, 0xe3, 0x02, 0x30, 0xc1, 0xe7, 0x00, 0x00, 0x53, 0xe3],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0xed, 0xff, 0xff, 0xeb, ],
mnemonic: "bl".to_string(),
op_str: "#0xfbc".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0x04, 0xe0, 0x2d, 0xe5, ],
mnemonic: "str".to_string(),
op_str: "lr, [sp, #-4]!".to_string(),
}, Insn{
addr: 0x1008,
bytes: vec![0x00, 0x00, 0x00, 0x00, ],
mnemonic: "andeq".to_string(),
op_str: "r0, r0, r0".to_string(),
}, Insn{
addr: 0x100c,
bytes: vec![0xe0, 0x83, 0x22, 0xe5, ],
mnemonic: "str".to_string(),
op_str: "r8, [r2, #-0x3e0]!".to_string(),
}, Insn{
addr: 0x1010,
bytes: vec![0xf1, 0x02, 0x03, 0x0e, ],
mnemonic: "mcreq".to_string(),
op_str: "p2, #0, r0, c3, c1, #7".to_string(),
}, Insn{
addr: 0x1014,
bytes: vec![0x00, 0x00, 0xa0, 0xe3, ],
mnemonic: "mov".to_string(),
op_str: "r0, #0".to_string(),
}, Insn{
addr: 0x1018,
bytes: vec![0x02, 0x30, 0xc1, 0xe7, ],
mnemonic: "strb".to_string(),
op_str: "r3, [r1, r2]".to_string(),
}, Insn{
addr: 0x101c,
bytes: vec![0x00, 0x00, 0x53, 0xe3, ],
mnemonic: "cmp".to_string(),
op_str: "r3, #0".to_string(),
}],
}, Test{
arch: Arch::Arm,
mode: MODE_THUMB,
opts: vec![],
code: vec![0x4f, 0xf0, 0x00, 0x01, 0xbd, 0xe8, 0x00, 0x88, 0xd1, 0xe8, 0x00, 0xf0],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x4f, 0xf0, 0x00, 0x01, ],
mnemonic: "mov.w".to_string(),
op_str: "r1, #0".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0xbd, 0xe8, 0x00, 0x88, ],
mnemonic: "pop.w".to_string(),
op_str: "{fp, pc}".to_string(),
}, Insn{
addr: 0x1008,
bytes: vec![0xd1, 0xe8, 0x00, 0xf0, ],
mnemonic: "tbb".to_string(),
op_str: "[r1, r0]".to_string(),
}],
}, Test{
arch: Arch::Arm,
mode: MODE_ARM,
opts: vec![],
// ARM: Cortex-A15 + NEON,
code: vec![0x10, 0xf1, 0x10, 0xe7, 0x11, 0xf2, 0x31, 0xe7, 0xdc, 0xa1, 0x2e, 0xf3, 0xe8, 0x4e, 0x62, 0xf3],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x10, 0xf1, 0x10, 0xe7, ],
mnemonic: "sdiv".to_string(),
op_str: "r0, r0, r1".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0x11, 0xf2, 0x31, 0xe7, ],
mnemonic: "udiv".to_string(),
op_str: "r1, r1, r2".to_string(),
}, Insn{
addr: 0x1008,
bytes: vec![0xdc, 0xa1, 0x2e, 0xf3, ],
mnemonic: "vbit".to_string(),
op_str: "q5, q15, q6".to_string(),
}, Insn{
addr: 0x100c,
bytes: vec![0xe8, 0x4e, 0x62, 0xf3, ],
mnemonic: "vcgt.f32".to_string(),
op_str: "q10, q9, q12".to_string(),
}],
}, Test{
arch: Arch::Arm,
mode: MODE_THUMB,
opts: vec![],
// THUMB,
code: vec![0x70, 0x47, 0xeb, 0x46, 0x83, 0xb0, 0xc9, 0x68],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x70, 0x47, ],
mnemonic: "bx".to_string(),
op_str: "lr".to_string(),
}, Insn{
addr: 0x1002,
bytes: vec![0xeb, 0x46, ],
mnemonic: "mov".to_string(),
op_str: "fp, sp".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0x83, 0xb0, ],
mnemonic: "sub".to_string(),
op_str: "sp, #0xc".to_string(),
}, Insn{
addr: 0x1006,
bytes: vec![0xc9, 0x68, ],
mnemonic: "ldr".to_string(),
op_str: "r1, [r1, #0xc]".to_string(),
}],
}, Test{
arch: Arch::MIPS,
mode: MODE_32| MODE_BIG_ENDIAN,
opts: vec![],
// MIPS-32 (Big-endian),
code: vec![0x0c, 0x10, 0x00, 0x97, 0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x00, 0x0c, 0x8f, 0xa2, 0x00, 0x00, 0x34, 0x21, 0x34, 0x56],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x0c, 0x10, 0x00, 0x97, ],
mnemonic: "jal".to_string(),
op_str: "0x40025c".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0x00, 0x00, 0x00, 0x00, ],
mnemonic: "nop".to_string(),
op_str: "".to_string(),
}, Insn{
addr: 0x1008,
bytes: vec![0x24, 0x02, 0x00, 0x0c, ],
mnemonic: "addiu".to_string(),
op_str: "$v0, $zero, 0xc".to_string(),
}, Insn{
addr: 0x100c,
bytes: vec![0x8f, 0xa2, 0x00, 0x00, ],
mnemonic: "lw".to_string(),
op_str: "$v0, ($sp)".to_string(),
}, Insn{
addr: 0x1010,
bytes: vec![0x34, 0x21, 0x34, 0x56, ],
mnemonic: "ori".to_string(),
op_str: "$at, $at, 0x3456".to_string(),
}],
}, Test{
arch: Arch::MIPS,
mode: MODE_64| MODE_LITTLE_ENDIAN,
opts: vec![],
// MIPS-64-EL (Little-endian),
code: vec![0x56, 0x34, 0x21, 0x34, 0xc2, 0x17, 0x01, 0x00],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x56, 0x34, 0x21, 0x34, ],
mnemonic: "ori".to_string(),
op_str: "$at, $at, 0x3456".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0xc2, 0x17, 0x01, 0x00, ],
mnemonic: "srl".to_string(),
op_str: "$v0, $at, 0x1f".to_string(),
}],
}, Test{
arch: Arch::Arm64,
mode: MODE_ARM,
opts: vec![],
code: vec![0x21, 0x7c, 0x02, 0x9b, 0x21, 0x7c, 0x00, 0x53, 0x00, 0x40, 0x21, 0x4b, 0xe1, 0x0b, 0x40, 0xb9, 0x10, 0x20, 0x21, 0x1e],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x21, 0x7c, 0x02, 0x9b, ],
mnemonic: "mul".to_string(),
op_str: "x1, x1, x2".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0x21, 0x7c, 0x00, 0x53, ],
mnemonic: "lsr".to_string(),
op_str: "w1, w1, #0".to_string(),
}, Insn{
addr: 0x1008,
bytes: vec![0x00, 0x40, 0x21, 0x4b, ],
mnemonic: "sub".to_string(),
op_str: "w0, w0, w1, uxtw".to_string(),
}, Insn{
addr: 0x100c,
bytes: vec![0xe1, 0x0b, 0x40, 0xb9, ],
mnemonic: "ldr".to_string(),
op_str: "w1, [sp, #8]".to_string(),
}, Insn{
addr: 0x1010,
bytes: vec![0x10, 0x20, 0x21, 0x1e, ],
mnemonic: "fcmpe".to_string(),
op_str: "s0, s1".to_string(),
}],
}, Test{
arch: Arch::PowerPC,
mode: MODE_BIG_ENDIAN,
opts: vec![],
code: vec![0x80, 0x20, 0x00, 0x00, 0x80, 0x3f, 0x00, 0x00, 0x10, 0x43, 0x23, 0x0e, 0xd0, 0x44, 0x00, 0x80, 0x4c, 0x43, 0x22, 0x02, 0x2d, 0x03, 0x00, 0x80, 0x7c, 0x43, 0x20, 0x14, 0x7c, 0x43, 0x20, 0x93, 0x4f, 0x20, 0x00, 0x21, 0x4c, 0xc8, 0x00, 0x21],
insn: vec![Insn{
addr: 0x1000,
bytes: vec![0x80, 0x20, 0x00, 0x00, ],
mnemonic: "lwz".to_string(),
op_str: "r1, (0)".to_string(),
}, Insn{
addr: 0x1004,
bytes: vec![0x80, 0x3f, 0x00, 0x00, ],
mnemonic: "lwz".to_string(),
op_str: "r1, (r31)".to_string(),
}, Insn{
addr: 0x1008,
bytes: vec![0x10, 0x43, 0x23, 0x0e, ],
mnemonic: "vpkpx".to_string(),
op_str: "v2, v3, v4".to_string(),
}, Insn{
addr: 0x100c,
bytes: vec![0xd0, 0x44, 0x00, 0x80, ],
mnemonic: "stfs".to_string(),
op_str: "f2, 0x80(r4)".to_string(),
}, Insn{
addr: 0x1010,
bytes: vec![0x4c, 0x43, 0x22, 0x02, ],
mnemonic: "crand".to_string(),
op_str: "2, 3, 4".to_string(),
}, Insn{
addr: 0x1014,
bytes: vec![0x2d, 0x03, 0x00, 0x80, ],
mnemonic: "cmpwi".to_string(),
op_str: "cr2, r3, 0x80".to_string(),
}, Insn{
addr: 0x1018,
bytes: vec![0x7c, 0x43, 0x20, 0x14, ],
mnemonic: "addc".to_string(),
op_str: "r2, r3, r4".to_string(),
}, Insn{
addr: 0x101c,
bytes: vec![0x7c, 0x43, 0x20, 0x93, ],
mnemonic: "mulhd.".to_string(),
op_str: "r2, r3, r4".to_string(),
}, Insn{
addr: 0x1020,
bytes: vec![0x4f, 0x20, 0x00, 0x21, ],
mnemonic: "bdnzlrl+".to_string(),
op_str: "".to_string(),
}, Insn{
addr: 0x1024,
bytes: vec![0x4c, 0xc8, 0x00, 0x21, ],
mnemonic: "bgelrl-".to_string(),
op_str: "cr2".to_string(),
}],
},
];
for (i, test) in tests.iter().enumerate() {
println!("test case #{} / {:?} {:?}", i, test.arch, test.mode);
if !supports(test.arch) {
println!("skipped - arch {:?} no supported", test.arch);
continue;
}
match Engine::new(test.arch, test.mode) {
Ok(e) => {
for &(opt, val) in test.opts.iter() {
match e.set_option(opt, val) {
Ok(_) => (),
Err(err) => panic!("#{} Engine::set_option({:?}, {:?}) failed: {:?}\n", i, opt, val, err),
}
}
match e.disasm(test.code.as_ref(), 0x1000, 0) {
Ok(insns) => {
assert!(insns.len() == test.insn.len());
for (out, expected) in insns.iter().zip(test.insn.iter()) {
println!("out: {:x}\t{}\t{}", out.addr, out.mnemonic, out.op_str);
println!("exp: {:x}\t{}\t{}", expected.addr, expected.mnemonic, expected.op_str);
assert!(out.addr == expected.addr);
assert!(out.bytes == expected.bytes);
assert!(out.mnemonic == expected.mnemonic);
assert!(out.op_str == expected.op_str);
}
}
Err(err) => {
panic!("#{} Engine::disasm failed: {:?} {:?}", i, err.code, err.desc);
}
}
},
Err(err) => {
panic!("#{} Engine::new failed: {:?} {:?}", i, err.code, err.desc);
}
}
}
}
|
//! Diffie-Hellman key exchange
use crate::keys::{PublicKey, SecretKey};
use mohan::tools::RistrettoBoth;
/// Alias type for a shared secret after ECDH
pub type SharedSecret = RistrettoBoth;
/// Perform a Diffie-Hellman key agreement to produce a `SharedSecret`.
pub fn diffie_hellman(secret: &SecretKey, their_public: &PublicKey) -> SharedSecret {
RistrettoBoth::from_point(secret.as_scalar() * their_public.as_point())
}
#[cfg(test)]
mod test {
use super::diffie_hellman;
use crate::Keypair;
#[test]
fn alice_and_bob() {
let mut csprng = rand::thread_rng();
let alice: Keypair = Keypair::generate(&mut csprng);
let bob: Keypair = Keypair::generate(&mut csprng);
let alice_shared_secret = diffie_hellman(&alice.secret, &bob.public);
let bob_shared_secret = diffie_hellman(&bob.secret, &alice.public);
assert_eq!(alice_shared_secret.as_bytes(), bob_shared_secret.as_bytes());
}
}
|
use join::Join;
use next_permutation::NextPermutation;
use procon_reader::ProconReader;
fn main() {
let stdin = std::io::stdin();
let mut rd = ProconReader::new(stdin.lock());
let s: String = rd.get();
let k: usize = rd.get();
let mut s: Vec<char> = s.chars().collect();
s.sort();
for _ in 0..(k - 1) {
assert!(s.next_permutation());
}
println!("{}", s.iter().join(""));
}
|
use crate::diesel::QueryDsl;
use crate::diesel::RunQueryDsl;
use crate::helpers::{email, email_template};
use crate::model::{Event, Space, SpaceUser, User};
use crate::schema::events::dsl::*;
use crate::schema::spaces::dsl::*;
use crate::schema::users::dsl::*;
use crate::Pool;
use actix::prelude::*;
use chrono::prelude::*;
use chrono::Local;
use cron::Schedule;
use std::{str::FromStr, time::Duration};
use diesel::prelude::*;
use diesel::r2d2::{self, ConnectionManager};
// Define actor
pub struct Scheduler;
// Provide Actor implementation for our actor
impl Actor for Scheduler {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Context<Self>) {
println!("Actor is alive");
ctx.run_later(duration_until_next(), move |this, ctx| {
this.schedule_task(ctx)
});
}
fn stopped(&mut self, _ctx: &mut Context<Self>) {
println!("Actor is stopped");
}
}
impl Scheduler {
fn schedule_task(&self, ctx: &mut Context<Self>) {
println!("schedule_task event - {:?}", Local::now());
let database_url = std::env::var("DATABASE_URL").expect("DATABASE_URL must be set");
// create db connection pool
let manager = ConnectionManager::<PgConnection>::new(database_url);
let pool: Pool = r2d2::Pool::builder()
.build(manager)
.expect("Failed to create pool.");
run_job(pool);
ctx.run_later(duration_until_next(), move |this, ctx| {
this.schedule_task(ctx)
});
}
}
pub fn duration_until_next() -> Duration {
let cron_expression = "0 0 0 * * * *"; //every 24 hrs
let cron_schedule = Schedule::from_str(cron_expression).unwrap();
let now = Local::now();
let next = cron_schedule.upcoming(Local).next().unwrap();
let duration_until = next.signed_duration_since(now);
Duration::from_millis(duration_until.num_milliseconds() as u64)
}
//cron job to remin space members of any set event
fn run_job(db: Pool) {
let conn = db.get().unwrap();
println!("Running daily job...");
//get all pending events
let items: Vec<Event> = events
.filter(reminded.eq(false))
.load::<Event>(&conn)
.unwrap();
//get today's date
let today: DateTime<Local> = Local::now();
let other_email_address = std::env::var("EMAIL_ADDRESS").expect("EMAIL ADDRESS not set");
let other_email_password = std::env::var("EMAIL_PASSWORD").expect("EMAIL PASSWORD not set");
let other_email_provider = std::env::var("EMAIL_PROVIDER").expect("EMAIL PROVIDER not set");
for val in items.iter() {
//get date of the current event
let set_event_date: NaiveDateTime = val.event_date;
if (today.year(), today.month(), today.day())
== (
set_event_date.year(),
set_event_date.month(),
set_event_date.day(),
)
{
let _update_event = diesel::update(events.find(val.id))
.set(reminded.eq(&true))
.execute(&conn);
//get all users in event channel
let space = spaces.find(val.space_id).first::<Space>(&conn).unwrap();
let user_spaces: Vec<_> = SpaceUser::belonging_to(&space)
.inner_join(users)
.load::<(SpaceUser, User)>(&conn)
.unwrap();
//send email to all membrs
let template = email_template::send_reminder(
&val.event_name,
&space.spaces_name,
&val.event_description,
);
for a in user_spaces.iter() {
email::send_email(
&a.1.email,
&a.1.username,
&"Event reminder".to_string(),
&template,
&other_email_address,
&other_email_password,
&other_email_provider,
);
}
}
}
}
|
#[doc = "Reader of register TBPS"]
pub type R = crate::R<u32, super::TBPS>;
#[doc = "Reader of field `PSS`"]
pub type PSS_R = crate::R<u16, u16>;
impl R {
#[doc = "Bits 0:15 - GPTM Timer A Prescaler Value"]
#[inline(always)]
pub fn pss(&self) -> PSS_R {
PSS_R::new((self.bits & 0xffff) as u16)
}
}
|
use std::cmp::Ord;
use std::cmp::Ordering;
use std::convert::From;
use std::slice::Iter;
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum InsertionResult {
Create,
Overwrite,
InvalidKey,
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum RemovalResult {
KeyNotFound,
Removed,
}
// Provides an ordered map with a method to query for partial matches.
// This is useful for disambiguation.
#[derive(Debug, PartialEq)]
pub struct OrderedVecMap<K, T>
where
K: Ord,
{
data: Vec<(K, T)>,
}
impl<K, T> OrderedVecMap<K, T>
where
K: Ord,
{
pub fn new() -> Self {
OrderedVecMap { data: Vec::<(K, T)>::new() }
}
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
pub fn len(&self) -> usize {
self.data.len()
}
// Returns true if the value is inserted, false if overwritten.
pub fn insert(&mut self, datum: (K, T)) -> InsertionResult {
match self.data.binary_search_by(|probe| probe.0.cmp(&datum.0)) {
Ok(idx) => {
*self.data.get_mut(idx).unwrap() = datum;
return InsertionResult::Overwrite;
}
Err(idx) => {
self.data.insert(idx, datum);
return InsertionResult::Create;
}
};
}
pub fn remove(&mut self, key: &K) -> RemovalResult {
match self.data.binary_search_by(|probe| probe.0.cmp(key)) {
Ok(idx) => {
self.data.remove(idx);
return RemovalResult::Removed;
}
Err(_) => {
return RemovalResult::KeyNotFound;
}
}
}
pub fn iter(&self) -> Iter<(K, T)> {
return self.data.iter();
}
pub fn get(&self, idx: usize) -> Option<&(K, T)> {
self.data.get(idx)
}
pub fn find_idx(&self, query: &K) -> Result<usize, usize> {
self.data.binary_search_by(|probe| probe.0.cmp(query))
}
pub fn find(&self, query: &K) -> Option<&(K, T)> {
match self.find_idx(query) {
Ok(idx) => Some(&self.data.get(idx).unwrap()),
Err(_) => None,
}
}
pub fn find_by<'a, F>(&self, f: F) -> Option<&(K, T)>
where
F: Fn(&(K, T)) -> Ordering,
K: 'a,
T: 'a,
{
match self.data.binary_search_by(f) {
Ok(idx) => Some(&self.data.get(idx).unwrap()),
Err(_) => None,
}
}
}
impl<K, T> From<Vec<(K, T)>> for OrderedVecMap<K, T>
where
K: Ord,
{
fn from(mut data: Vec<(K, T)>) -> OrderedVecMap<K, T> {
data.sort_by(|a, b| a.0.cmp(&b.0));
OrderedVecMap { data: data }
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn insert_empty_len() {
let x = OrderedVecMap::<u8, u8>::new();
assert_eq!(0, x.len());
assert_eq!(true, x.is_empty());
}
#[test]
fn insert_one_len() {
let mut x = OrderedVecMap::<u8, u8>::new();
match x.insert((4u8, 2u8)) {
InsertionResult::Overwrite => {
assert!(false);
}
_ => {}
};
assert_eq!(1, x.len());
assert_eq!(false, x.is_empty());
}
#[test]
fn insert_two_len() {
let mut x = OrderedVecMap::<u8, u8>::new();
assert_eq!(InsertionResult::Create, x.insert((4u8, 2u8)));
assert_eq!(InsertionResult::Create, x.insert((3u8, 3u8)));
assert_eq!(2, x.len());
}
#[test]
fn insert_same_key_len() {
let mut x = OrderedVecMap::<u8, u8>::new();
assert_eq!(InsertionResult::Create, x.insert((3u8, 3u8)));
assert_eq!(InsertionResult::Overwrite, x.insert((3u8, 3u8)));
assert_eq!(1, x.len());
}
#[test]
fn remove() {
let mut x = OrderedVecMap::<u8, u8>::new();
assert_eq!(InsertionResult::Create, x.insert((3u8, 3u8)));
assert_eq!(RemovalResult::Removed, x.remove(&3u8));
assert_eq!(0, x.len());
}
#[test]
fn removal_failure() {
let mut x = OrderedVecMap::<u8, u8>::new();
assert_eq!(InsertionResult::Create, x.insert((3u8, 3u8)));
assert_eq!(RemovalResult::KeyNotFound, x.remove(&1u8));
assert_eq!(1, x.len());
}
#[test]
fn find_one() {
let mut x = OrderedVecMap::<u8, u8>::new();
x.insert((4u8, 2u8));
assert_eq!(Some(&(4u8, 2u8)), x.find(&4u8));
}
#[test]
fn find_one_by() {
let mut x = OrderedVecMap::<u8, u8>::new();
x.insert((4u8, 2u8));
assert_eq!(Some(&(4u8, 2u8)), x.find_by(|probe| probe.0.cmp(&4u8)));
}
#[test]
fn find() {
let mut x = OrderedVecMap::<u8, u8>::new();
x.insert((4u8, 2u8));
x.insert((3u8, 3u8));
assert_eq!(Some(&(4u8, 2u8)), x.find(&4u8));
}
#[test]
fn find_by() {
let mut x = OrderedVecMap::<u8, u8>::new();
x.insert((3u8, 3u8));
x.insert((4u8, 2u8));
assert_eq!(Some(&(4u8, 2u8)), x.find_by(|probe| probe.0.cmp(&4u8)));
}
#[test]
fn from_vec() {
let v = vec![(1u8, 3u8), (2u8, 2u8), (3u8, 1u8)];
let x = OrderedVecMap::<u8, u8>::from(v);
assert_eq!(3, x.len());
assert_eq!(Some(&(1u8, 3u8)), x.find(&1u8));
}
}
|
use rust_mal_lib::env::{Env, Environment};
use rust_mal_lib::types::MalError;
use rust_mal_steps::scaffold::*;
fn read(string: String) -> String {
string
}
fn eval(ast: String) -> String {
ast
}
fn print(expr: String) -> String {
expr
}
struct Step0Repl;
impl InterpreterScaffold<Env> for Step0Repl {
const STEP_NAME: &'static str = "step0_repl";
fn create_env() -> Result<Env, MalError> {
Ok(Environment::new(None))
}
fn rep(input: &str, _: &Env) -> Result<String, MalError> {
if input.is_empty() {
Err(MalError::ErrEmptyLine)
} else {
Ok(print(eval(read(input.into()))))
}
}
}
fn main() -> Result<(), String> {
cli_loop::<Env, Step0Repl>()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_step0_spec() {
assert_eq!(
validate_against_spec::<Env, Step0Repl>("step0_repl.mal"),
Ok(())
);
}
}
|
//main
//calls game from game/mod.rs
extern crate quicksilver;
pub mod game;
fn main() {
// call copied from qs:
// NOTE: Set HIDPI to 1.0 to get pixel-perfect rendering.
// Otherwise the window resizes to whatever value the OS sets and
// scales the contents.
// https://docs.rs/glutin/0.19.0/glutin/dpi/index.html
std::env::set_var("WINIT_HIDPI_FACTOR", "1.0");
let settings = quicksilver::lifecycle::Settings {
// If the graphics do need to be scaled (e.g. using
// `with_center`), blur them. This looks better with fonts.
scale: quicksilver::graphics::ImageScaleStrategy::Blur,
..Default::default()
};
quicksilver::lifecycle::run::<game::Game>(
"NKd",
quicksilver::geom::Vector::new(800, 600), //set size... 60fps
settings,
);
}
|
use anyhow::Result;
use clap::Parser;
use qapi::qmp;
use super::{GlobalArgs, QmpStream};
#[derive(Parser, Debug)]
pub(crate) struct Status {
}
impl Status {
pub async fn run(self, qmp: QmpStream, _args: GlobalArgs) -> Result<i32> {
let status = qmp.execute(qmp::query_status { }).await?;
println!("VCPU Status: {:#?}", status);
Ok(0)
}
}
|
use anyhow::{Context, Result};
use cargo_metadata::{
Metadata as CargoMetadata, Package as MetadataPackage, PackageId as MetadataId,
};
use std::{
collections::{btree_map, BTreeMap, BTreeSet},
path::{Path, PathBuf},
str::FromStr,
};
/// The minimal amount of package information we care about
///
/// The package's `name` is used to `cargo clean -p` specific crates while the `src_paths` are
/// are used to trigger recompiles of packages within the workspace
#[derive(Debug)]
pub struct Package {
name: String,
src_paths: Vec<PathBuf>,
}
impl Package {
pub fn name(&self) -> &str {
&self.name
}
pub fn src_paths(&self) -> &[PathBuf] {
&self.src_paths
}
}
impl From<&MetadataPackage> for Package {
fn from(package: &MetadataPackage) -> Self {
let name = package.name.clone();
let src_paths = package
.targets
.iter()
.map(|target| target.src_path.clone().into_std_path_buf())
.collect();
Self { name, src_paths }
}
}
/// Contains metadata for the current project
pub struct Metadata {
/// Maps packages metadata id to the package
///
/// Currently `MetadataId` is used over `PkgId` because pkgid is not a UUID
packages: BTreeMap<MetadataId, Package>,
/// All of the crates in the current workspace
workspace_members: Vec<MetadataId>,
/// Maps each dependency to its set of dependents
reverse_deps: BTreeMap<MetadataId, BTreeSet<MetadataId>>,
/// The target directory of the project
///
/// Typically `target` at the workspace root, but can be overridden
target_directory: PathBuf,
/// Crate in the current working directory, empty if run from a
/// virtual workspace root.
current_package: Option<Package>,
}
impl Metadata {
pub fn package(&self, id: &MetadataId) -> Option<&Package> {
self.packages.get(id)
}
pub fn entries<'this>(&'this self) -> btree_map::Iter<'this, MetadataId, Package> {
self.packages.iter()
}
pub fn workspace_members(&self) -> &[MetadataId] {
&self.workspace_members
}
pub fn target_directory(&self) -> &Path {
&self.target_directory
}
pub fn current_package(&self) -> Option<&Package> {
self.current_package.as_ref()
}
/// Gets all dependents (direct and transitive) of `id`
pub fn all_dependents_of(&self, id: &MetadataId) -> BTreeSet<&MetadataId> {
let mut dependents = BTreeSet::new();
self.all_dependents_of_helper(id, &mut dependents);
dependents
}
fn all_dependents_of_helper<'this>(
&'this self,
id: &MetadataId,
dependents: &mut BTreeSet<&'this MetadataId>,
) {
if let Some(immediate_dependents) = self.reverse_deps.get(id) {
for immediate_dependent in immediate_dependents {
if dependents.insert(immediate_dependent) {
self.all_dependents_of_helper(&immediate_dependent, dependents);
}
}
}
}
}
impl FromStr for Metadata {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let cargo_metadata: CargoMetadata = serde_json::from_str(s)?;
// Extract the package in the current working directory, empty if run from a
// virtual workspace root.
let current_package: Option<Package> = cargo_metadata.root_package().map(Package::from);
let CargoMetadata {
packages: metadata_packages,
workspace_members,
resolve,
target_directory,
..
} = cargo_metadata;
let mut packages = BTreeMap::new();
for metadata_package in metadata_packages {
let package = Package::from(&metadata_package);
packages.insert(metadata_package.id, package);
}
let mut reverse_deps: BTreeMap<_, BTreeSet<_>> = BTreeMap::new();
let resolve =
resolve.context("Resolving the dependency graph failed (old version of cargo)")?;
for node in resolve.nodes {
for dep in node.deps {
let dependent = node.id.clone();
let dependency = dep.pkg;
reverse_deps
.entry(dependency)
.or_default()
.insert(dependent);
}
}
let target_directory = target_directory.into_std_path_buf();
Ok(Self {
packages,
workspace_members,
reverse_deps,
target_directory,
current_package,
})
}
}
|
//! Metrics for [`Loader`].
use std::sync::Arc;
use async_trait::async_trait;
use iox_time::TimeProvider;
use metric::{DurationHistogram, U64Counter};
use observability_deps::tracing::warn;
use parking_lot::Mutex;
use pdatastructs::filters::{bloomfilter::BloomFilter, Filter};
use super::Loader;
/// Wraps a [`Loader`] and adds metrics.
pub struct MetricsLoader<L>
where
L: Loader,
{
inner: L,
time_provider: Arc<dyn TimeProvider>,
metric_calls_new: U64Counter,
metric_calls_probably_reloaded: U64Counter,
metric_duration: DurationHistogram,
seen: Mutex<BloomFilter<L::K>>,
}
impl<L> MetricsLoader<L>
where
L: Loader,
{
/// Create new wrapper.
///
/// # Testing
/// If `testing` is set, the "seen" metrics will NOT be processed correctly because the underlying data structure is
/// too expensive to create many times a second in an un-optimized debug build.
pub fn new(
inner: L,
name: &'static str,
time_provider: Arc<dyn TimeProvider>,
metric_registry: &metric::Registry,
testing: bool,
) -> Self {
let metric_calls = metric_registry.register_metric::<U64Counter>(
"cache_load_function_calls",
"Count how often a cache loader was called.",
);
let metric_calls_new = metric_calls.recorder(&[("name", name), ("status", "new")]);
let metric_calls_probably_reloaded =
metric_calls.recorder(&[("name", name), ("status", "probably_reloaded")]);
let metric_duration = metric_registry
.register_metric::<DurationHistogram>(
"cache_load_function_duration",
"Time taken by cache load function calls",
)
.recorder(&[("name", name)]);
let seen = if testing {
BloomFilter::with_params(1, 1)
} else {
// Set up bloom filter for "probably reloaded" test:
//
// - input size: we expect 10M elements
// - reliability: probability of false positives should be <= 1%
// - CPU efficiency: number of hash functions should be <= 10
// - RAM efficiency: size should be <= 15MB
//
//
// A bloom filter was chosen here because of the following properties:
//
// - memory bound: The storage size is bound even when the set of "probably reloaded" entries approaches
// infinite sizes.
// - memory efficiency: We do not need to store the actual keys.
// - infallible: Inserting new data into the filter never fails (in contrast to for example a CuckooFilter or
// QuotientFilter).
//
// The fact that a filter can produce false positives (i.e. it classifies an actual new entry as "probably
// reloaded") is considered to be OK since the metric is more of an estimate and a guide for cache tuning. We
// might want to use a more efficient (i.e. more modern) filter design at one point though.
let seen = BloomFilter::with_properties(10_000_000, 1.0 / 100.0);
const BOUND_HASH_FUNCTIONS: usize = 10;
assert!(
seen.k() <= BOUND_HASH_FUNCTIONS,
"number of hash functions for bloom filter should be <= {} but is {}",
BOUND_HASH_FUNCTIONS,
seen.k(),
);
const BOUND_SIZE_BYTES: usize = 15_000_000;
let size_bytes = (seen.m() + 7) / 8;
assert!(
size_bytes <= BOUND_SIZE_BYTES,
"size of bloom filter should be <= {BOUND_SIZE_BYTES} bytes but is {size_bytes} bytes",
);
seen
};
Self {
inner,
time_provider,
metric_calls_new,
metric_calls_probably_reloaded,
metric_duration,
seen: Mutex::new(seen),
}
}
}
impl<L> std::fmt::Debug for MetricsLoader<L>
where
L: Loader,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MetricsLoader").finish_non_exhaustive()
}
}
#[async_trait]
impl<L> Loader for MetricsLoader<L>
where
L: Loader,
{
type K = L::K;
type V = L::V;
type Extra = L::Extra;
async fn load(&self, k: Self::K, extra: Self::Extra) -> Self::V {
{
let mut seen_guard = self.seen.lock();
if seen_guard.insert(&k).expect("bloom filter cannot fail") {
&self.metric_calls_new
} else {
&self.metric_calls_probably_reloaded
}
.inc(1);
}
let t_start = self.time_provider.now();
let v = self.inner.load(k, extra).await;
let t_end = self.time_provider.now();
match t_end.checked_duration_since(t_start) {
Some(duration) => {
self.metric_duration.record(duration);
}
None => {
warn!("Clock went backwards, not recording loader duration");
}
}
v
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use iox_time::{MockProvider, Time};
use metric::{Observation, RawReporter};
use crate::loader::FunctionLoader;
use super::*;
#[tokio::test]
async fn test_metrics() {
let time_provider = Arc::new(MockProvider::new(Time::from_timestamp_millis(0).unwrap()));
let metric_registry = Arc::new(metric::Registry::new());
let time_provider_captured = Arc::clone(&time_provider);
let d = Duration::from_secs(10);
let inner_loader = FunctionLoader::new(move |x: u64, _extra: ()| {
let time_provider_captured = Arc::clone(&time_provider_captured);
async move {
time_provider_captured.inc(d);
x.to_string()
}
});
let loader = MetricsLoader::new(
inner_loader,
"my_loader",
time_provider,
&metric_registry,
false,
);
let mut reporter = RawReporter::default();
metric_registry.report(&mut reporter);
for status in ["new", "probably_reloaded"] {
assert_eq!(
reporter
.metric("cache_load_function_calls")
.unwrap()
.observation(&[("name", "my_loader"), ("status", status)])
.unwrap(),
&Observation::U64Counter(0)
);
}
if let Observation::DurationHistogram(hist) = reporter
.metric("cache_load_function_duration")
.unwrap()
.observation(&[("name", "my_loader")])
.unwrap()
{
assert_eq!(hist.sample_count(), 0);
assert_eq!(hist.total, Duration::from_secs(0));
} else {
panic!("Wrong observation type");
}
assert_eq!(loader.load(42, ()).await, String::from("42"));
assert_eq!(loader.load(42, ()).await, String::from("42"));
assert_eq!(loader.load(1337, ()).await, String::from("1337"));
let mut reporter = RawReporter::default();
metric_registry.report(&mut reporter);
assert_eq!(
reporter
.metric("cache_load_function_calls")
.unwrap()
.observation(&[("name", "my_loader"), ("status", "new")])
.unwrap(),
&Observation::U64Counter(2)
);
assert_eq!(
reporter
.metric("cache_load_function_calls")
.unwrap()
.observation(&[("name", "my_loader"), ("status", "probably_reloaded")])
.unwrap(),
&Observation::U64Counter(1)
);
if let Observation::DurationHistogram(hist) = reporter
.metric("cache_load_function_duration")
.unwrap()
.observation(&[("name", "my_loader")])
.unwrap()
{
assert_eq!(hist.sample_count(), 3);
assert_eq!(hist.total, 3 * d);
} else {
panic!("Wrong observation type");
}
}
}
|
pub mod queue;
pub mod user;
|
use piece::*;
use mask::*;
use sided_mask::*;
use std::ops::*;
use rank::*;
pub trait Side {
type Mask : SidedMask;
type Opposite : Side;
const PAWN : Piece;
const KNIGHT : Piece;
const BISHOP : Piece;
const ROOK : Piece;
const QUEEN : Piece;
const KING : Piece;
const RANGE : Range<usize>;
const EN_PASSANT_RANK : Rank;
const DOUBLE_PUSH_RANK_MASK : Mask;
}
#[derive(Eq, Copy, Clone, Debug, Default, PartialEq)]
pub struct White;
#[derive(Eq, Copy, Clone, Debug, Default, PartialEq)]
pub struct Black;
impl Side for White {
type Mask = WhiteMask;
type Opposite = Black;
const PAWN : Piece = WHITE_PAWN;
const KNIGHT : Piece = WHITE_KNIGHT;
const BISHOP : Piece = WHITE_BISHOP;
const ROOK : Piece = WHITE_ROOK;
const QUEEN : Piece = WHITE_QUEEN;
const KING : Piece = WHITE_KING;
const RANGE : Range<usize> = 0..6;
const EN_PASSANT_RANK : Rank = _6;
const DOUBLE_PUSH_RANK_MASK : Mask = masks::_4;
}
impl Side for Black {
type Mask = BlackMask;
type Opposite = White;
const PAWN : Piece = BLACK_PAWN;
const KNIGHT : Piece = BLACK_KNIGHT;
const BISHOP : Piece = BLACK_BISHOP;
const ROOK : Piece = BLACK_ROOK;
const QUEEN : Piece = BLACK_QUEEN;
const KING : Piece = BLACK_KING;
const RANGE : Range<usize> = 6..12;
const EN_PASSANT_RANK : Rank = _3;
const DOUBLE_PUSH_RANK_MASK : Mask = masks::_5;
}
|
//! The Document interface represents any web page loaded in the browser
pub mod body_1;
pub mod create_element_2;
pub mod create_text_node_2;
pub mod get_element_by_id_2;
pub mod new_0;
use std::convert::TryInto;
use std::mem;
use anyhow::*;
use web_sys::Document;
use liblumen_alloc::erts::exception;
use liblumen_alloc::erts::term::prelude::*;
pub fn module() -> Atom {
Atom::try_from_str("Elixir.Lumen.Web.Document").unwrap()
}
// Private
fn from_term(term: Term) -> Result<&'static Document, exception::Exception> {
let boxed: Boxed<Resource> = term
.try_into()
.with_context(|| format!("{} must be a document resource", term))?;
let document_reference: Resource = boxed.into();
match document_reference.downcast_ref() {
Some(document) => {
let static_document: &'static Document =
unsafe { mem::transmute::<&Document, _>(document) };
Ok(static_document)
}
None => Err(TypeError)
.with_context(|| format!("{} is a resource, but not a document", term))
.map_err(From::from),
}
}
|
use std::fmt::Debug;
use std::time::Instant;
pub fn run_timed<T, X>(f: fn(T) -> X, argument: T, part: u64)
where
X: Debug,
{
let now = Instant::now();
let answer = f(argument);
println!(
"part {}: {:?}, result found in {} ms",
part,
answer,
now.elapsed().as_millis()
);
}
pub fn min_max(input : &Vec<i64>) -> (i64, i64) {
let min_max = (i64::MAX, i64::MIN);
input.iter().fold( min_max, |(mut min, mut max), nr| {
if min > *nr {
min = *nr
}
if max < *nr {
max = *nr
}
(min,max)
})
}
#[cfg(test)]
mod tests {
use crate::calculator::min_max;
#[test]
fn test_min_max() {
let input: Vec<i64> = vec![16, 1, 2, 0, 4, 2, 7, 1, 2, 14];
let (min, max) = min_max(&input);
assert_eq!(0, min);
assert_eq!(16, max);
}
} |
pub mod component;
pub mod data_manager;
pub mod entity;
pub mod system;
pub mod world;
pub use self::component::Component;
pub use self::data_manager::DataManager;
pub use self::entity::Entity;
pub use self::system::System;
pub use self::world::World; |
/*
* Given an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei),
* find the minimum number of conference rooms required.
*
* Example 1:
* ----------
* Input: [[0, 30],[5, 10],[15, 20]]
* Output: 2
*
* Example 2:
* -----------
* Input: [[7,10],[2,4]]
* Output: 1
*/
pub fn min_meeting_rooms(intervals: Vec<Vec<i32>>) -> i32 {
let n = intervals.len();
if n == 1 {
return 1;
}
if n == 0 {
return 0;
}
let mut starts = intervals.iter().map(|i| i[0]).collect::<Vec<i32>>();
let mut ends = intervals.iter().map(|i| i[1]).collect::<Vec<i32>>();
starts.sort();
ends.sort();
let mut rooms_needed = 1;
let mut result = 1;
let mut i = 1;
let mut j = 0;
while i < n && j < n {
if starts[i] < ends[j] {
rooms_needed += 1;
i += 1;
if rooms_needed > result {
result = rooms_needed;
}
} else {
rooms_needed -= 1;
j += 1
}
}
return result
}
#[cfg(test)]
mod test {
use super::min_meeting_rooms;
#[test]
fn example1() {
let input = vec![vec![0, 30], vec![5, 10], vec![15, 20]];
assert_eq!(min_meeting_rooms(input), 2);
}
#[test]
fn example2() {
let input = vec![vec![7,10], vec![2,4]];
assert_eq!(min_meeting_rooms(input), 1);
}
}
|
pub(crate) use _sha256::make_module;
#[pymodule]
mod _sha256 {
use crate::hashlib::_hashlib::{local_sha224, local_sha256, HashArgs};
use crate::vm::{PyPayload, PyResult, VirtualMachine};
#[pyfunction]
fn sha224(args: HashArgs, vm: &VirtualMachine) -> PyResult {
Ok(local_sha224(args).into_pyobject(vm))
}
#[pyfunction]
fn sha256(args: HashArgs, vm: &VirtualMachine) -> PyResult {
Ok(local_sha256(args).into_pyobject(vm))
}
}
|
pub struct Solution;
impl Solution {
pub fn reverse(x: i32) -> i32 {
let mut x = x;
let mut y = 0i32;
while x != 0 {
if let Some(t) = y.checked_mul(10).and_then(|y| y.checked_add(x % 10)) {
y = t;
} else {
return 0;
}
x /= 10;
}
y
}
}
#[test]
fn test0007() {
assert_eq!(Solution::reverse(123), 321);
assert_eq!(Solution::reverse(-123), -321);
assert_eq!(Solution::reverse(120), 21);
}
|
use super::utills::{get_points, started_check};
use colors::SUCCESS_COLOR;
use prelude::*;
use serenity::framework::standard::CreateGroup;
use store::UsersInfo;
use timeago::Formatter;
struct InfoCommand;
impl Command for InfoCommand {
fn execute(&self, ctx: &mut Context, msg: &Message, _args: Args) -> CommandResult {
let collecting_sense = {
let mut data = ctx.data.lock();
let users = data.get_mut::<UsersInfo>().unwrap();
let user = users.get(&msg.author.id.0).unwrap();
let formatter = Formatter::new();
formatter.convert(user.collecting_sinse.elapsed().unwrap())
};
let _ = msg.channel_id.send_message(|m| {
m.embed(|e| {
e.color(SUCCESS_COLOR)
.field(
"Boogie Points",
get_points(ctx, msg.author.id).unwrap().to_string(),
true,
)
.field("Collecting sense", collecting_sense, true)
})
});
// let _ = success(
// msg.channel_id,
// format!("You have `{}` Boogie Points", get_points(ctx, msg.author.id).unwrap().to_string()),
// Some("😏"),
// );
// let _ = msg.reply(&match get_points(ctx, msg.author.id) {
// Some(points) => points.to_string(),
// None => format!(
// "You've not started collecting Boogie Points. You can start by typing: `{}bp start`",
// PREFIX
// ),
// });
Ok(())
}
}
pub fn register_command(sf: CreateGroup) -> CreateGroup {
sf.command("info", |c| c.check(started_check).cmd(InfoCommand))
}
|
use super::SkewTContext;
use crate::{
app::config::{self},
coords::{Rect, ScreenCoords, ScreenRect, TPCoords, XYCoords},
gui::{DrawingArgs, PlotContextExt},
};
use gtk::cairo::Context;
use itertools::izip;
use metfor::{Celsius, HectoPascal, Knots, WindSpdDir};
struct WindBarbConfig {
shaft_length: f64,
barb_length: f64,
pennant_width: f64,
xcoord: f64,
dot_size: f64,
}
impl WindBarbConfig {
fn init(args: DrawingArgs<'_, '_>) -> Self {
let (ac, cr) = (args.ac, args.cr);
let config = ac.config.borrow();
let (shaft_length, barb_length) = cr
.device_to_user_distance(config.wind_barb_shaft_length, -config.wind_barb_barb_length)
.unwrap();
let (dot_size, pennant_width) = cr
.device_to_user_distance(config.wind_barb_dot_radius, -config.wind_barb_pennant_width)
.unwrap();
let padding = cr
.device_to_user_distance(config.edge_padding, 0.0)
.unwrap()
.0;
let screen_bounds = ac.skew_t.get_plot_area();
let XYCoords { x: mut xmax, .. } =
ac.skew_t.convert_screen_to_xy(screen_bounds.upper_right);
if xmax > 1.0 {
xmax = 1.0;
}
let ScreenCoords { x: xmax, .. } =
ac.skew_t.convert_xy_to_screen(XYCoords { x: xmax, y: 0.0 });
let xcoord = xmax - padding - shaft_length;
WindBarbConfig {
shaft_length,
barb_length,
pennant_width,
xcoord,
dot_size,
}
}
}
struct WindBarbData {
center: ScreenCoords,
shaft_end: ScreenCoords,
num_pennants: usize,
pennant_coords: [(ScreenCoords, ScreenCoords, ScreenCoords); 5],
num_barbs: usize,
barb_coords: [(ScreenCoords, ScreenCoords); 5],
point_radius: f64,
}
impl WindBarbData {
fn create(
pressure: HectoPascal,
wind: WindSpdDir<Knots>,
barb_config: &WindBarbConfig,
args: DrawingArgs<'_, '_>,
) -> Self {
let center = SkewTContext::get_wind_barb_center(pressure, barb_config.xcoord, args);
let WindSpdDir {
speed: Knots(speed),
direction,
} = wind;
// Convert angle to traditional XY coordinate plane
let direction_radians = ::std::f64::consts::FRAC_PI_2 - direction.to_radians();
let dx = barb_config.shaft_length * direction_radians.cos();
let dy = barb_config.shaft_length * direction_radians.sin();
let shaft_end = ScreenCoords {
x: center.x + dx,
y: center.y + dy,
};
let mut rounded_speed = (speed / 10.0 * 2.0).round() / 2.0 * 10.0;
let mut num_pennants = 0;
while rounded_speed >= 50.0 {
num_pennants += 1;
rounded_speed -= 50.0;
}
let mut num_barbs = 0;
while rounded_speed >= 10.0 {
num_barbs += 1;
rounded_speed -= 10.0;
}
let mut pennant_coords = [(
ScreenCoords::origin(),
ScreenCoords::origin(),
ScreenCoords::origin(),
); 5];
for i in 0..num_pennants {
if i >= pennant_coords.len() {
break;
}
let mut pos = shaft_end;
pos.x -= (i + 1) as f64 * barb_config.pennant_width * direction_radians.cos();
pos.y -= (i + 1) as f64 * barb_config.pennant_width * direction_radians.sin();
let pnt1 = pos;
pos.x += barb_config.pennant_width * direction_radians.cos();
pos.y += barb_config.pennant_width * direction_radians.sin();
let pnt2 = pos;
let point_angle = direction_radians - ::std::f64::consts::FRAC_PI_2;
pos.x += barb_config.barb_length * point_angle.cos();
pos.y += barb_config.barb_length * point_angle.sin();
let pnt3 = pos;
pennant_coords[i] = (pnt1, pnt2, pnt3);
}
let mut barb_coords = [(ScreenCoords::origin(), ScreenCoords::origin()); 5];
for i in 0..num_barbs {
if i >= barb_coords.len() {
break;
}
let mut pos = shaft_end;
pos.x -= num_pennants as f64 * barb_config.pennant_width * direction_radians.cos();
pos.y -= num_pennants as f64 * barb_config.pennant_width * direction_radians.sin();
pos.x -= i as f64 * barb_config.pennant_width * direction_radians.cos();
pos.y -= i as f64 * barb_config.pennant_width * direction_radians.sin();
let pnt1 = pos;
let point_angle = direction_radians - ::std::f64::consts::FRAC_PI_2;
pos.x += barb_config.barb_length * point_angle.cos();
pos.y += barb_config.barb_length * point_angle.sin();
let pnt2 = pos;
barb_coords[i] = (pnt1, pnt2);
}
// Add half barb if needed
if rounded_speed >= 5.0 && num_barbs < barb_coords.len() {
let mut pos = shaft_end;
pos.x -= num_pennants as f64 * barb_config.pennant_width * direction_radians.cos();
pos.y -= num_pennants as f64 * barb_config.pennant_width * direction_radians.sin();
pos.x -= num_barbs as f64 * barb_config.pennant_width * direction_radians.cos();
pos.y -= num_barbs as f64 * barb_config.pennant_width * direction_radians.sin();
let pnt1 = pos;
let point_angle = direction_radians - ::std::f64::consts::FRAC_PI_2;
pos.x += barb_config.barb_length * point_angle.cos() / 2.0;
pos.y += barb_config.barb_length * point_angle.sin() / 2.0;
let pnt2 = pos;
barb_coords[num_barbs] = (pnt1, pnt2);
num_barbs += 1;
}
let point_radius = barb_config.dot_size;
WindBarbData {
center,
shaft_end,
num_pennants,
pennant_coords,
num_barbs,
barb_coords,
point_radius,
}
}
fn bounding_box(&self) -> ScreenRect {
let mut bbox = ScreenRect {
lower_left: ScreenCoords {
x: self.center.x - self.point_radius,
y: self.center.y - self.point_radius,
},
upper_right: ScreenCoords {
x: self.center.x + self.point_radius,
y: self.center.y + self.point_radius,
},
};
bbox.expand_to_fit(self.shaft_end);
for i in 0..self.num_pennants {
if i >= self.pennant_coords.len() {
break;
}
bbox.expand_to_fit(self.pennant_coords[i].2);
}
for i in 0..self.num_barbs {
if i >= self.barb_coords.len() {
break;
}
bbox.expand_to_fit(self.barb_coords[i].1);
}
bbox
}
fn draw(&self, cr: &Context) {
// Assume color and line width are already taken care of.
cr.arc(
self.center.x,
self.center.y,
self.point_radius,
0.0,
2.0 * ::std::f64::consts::PI,
);
cr.fill().unwrap();
cr.move_to(self.center.x, self.center.y);
cr.line_to(self.shaft_end.x, self.shaft_end.y);
cr.stroke().unwrap();
for (i, &(pnt1, pnt2, pnt3)) in self.pennant_coords.iter().enumerate() {
if i >= self.num_pennants {
break;
}
cr.move_to(pnt1.x, pnt1.y);
cr.line_to(pnt2.x, pnt2.y);
cr.line_to(pnt3.x, pnt3.y);
cr.close_path();
cr.fill().unwrap();
}
for (i, &(pnt1, pnt2)) in self.barb_coords.iter().enumerate() {
if i >= self.num_barbs {
break;
}
cr.move_to(pnt1.x, pnt1.y);
cr.line_to(pnt2.x, pnt2.y);
cr.stroke().unwrap();
}
}
}
impl SkewTContext {
pub fn draw_wind_profile(args: DrawingArgs<'_, '_>) {
if args.ac.config.borrow().show_wind_profile {
let (ac, cr) = (args.ac, args.cr);
let config = ac.config.borrow();
let anal = if let Some(anal) = ac.get_sounding_for_display() {
anal
} else {
return;
};
let anal = anal.borrow();
let snd = anal.sounding();
let barb_config = WindBarbConfig::init(args);
let barb_data = Self::gather_wind_data(snd, &barb_config, args);
let barb_data = Self::filter_wind_data(args, barb_data);
let rgba = config.wind_rgba;
cr.set_source_rgba(rgba.0, rgba.1, rgba.2, rgba.3);
cr.set_line_width(
cr.device_to_user_distance(config.wind_barb_line_width, 0.0)
.unwrap()
.0,
);
for bdata in &barb_data {
bdata.draw(cr);
}
}
}
fn gather_wind_data(
snd: &::sounding_analysis::Sounding,
barb_config: &WindBarbConfig,
args: DrawingArgs<'_, '_>,
) -> Vec<WindBarbData> {
let wind = snd.wind_profile();
let pres = snd.pressure_profile();
izip!(pres, wind)
.filter_map(|tuple| {
let (p, w) = (*tuple.0, *tuple.1);
if let (Some(p), Some(w)) = (p.into(), w.into()) {
if p > config::MINP {
Some((p, w))
} else {
None
}
} else {
None
}
})
.map(|tuple| {
let (p, w) = tuple;
WindBarbData::create(p, w, barb_config, args)
})
.collect()
}
fn filter_wind_data(
args: DrawingArgs<'_, '_>,
barb_data: Vec<WindBarbData>,
) -> Vec<WindBarbData> {
let ac = args.ac;
// Remove overlapping barbs, or barbs not on the screen
let mut keepers: Vec<WindBarbData> = vec![];
let screen_box = ac.skew_t.get_plot_area();
let mut last_added_bbox: ScreenRect = ScreenRect {
lower_left: ScreenCoords {
x: ::std::f64::MAX,
y: ::std::f64::MAX,
},
upper_right: ScreenCoords {
x: ::std::f64::MAX,
y: ::std::f64::MAX,
},
};
for bdata in barb_data {
let bbox = bdata.bounding_box();
if !bbox.inside(&screen_box) || bbox.overlaps(&last_added_bbox) {
continue;
}
last_added_bbox = bbox;
keepers.push(bdata);
}
keepers
}
pub fn get_wind_barb_center(
pressure: HectoPascal,
xcenter: f64,
args: DrawingArgs<'_, '_>,
) -> ScreenCoords {
let ac = args.ac;
let ScreenCoords { y: yc, .. } = ac.skew_t.convert_tp_to_screen(TPCoords {
temperature: Celsius(0.0),
pressure,
});
ScreenCoords { x: xcenter, y: yc }
}
}
|
use std::collections::HashMap;
use std::mem;
use cranelift::codegen::Context;
use cranelift::prelude::*;
use cranelift_module::{Linkage, Module};
use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder};
use crate::parser::{Ast, Expresion, OperationType, Statement, VariableID};
struct CodegenContext {
variable_list: HashMap<VariableID, Value>,
module: Module<SimpleJITBackend>,
int: Type,
}
impl CodegenContext {
fn new() -> Self {
let builder = SimpleJITBuilder::new(cranelift_module::default_libcall_names());
let module: Module<SimpleJITBackend> = Module::new(builder);
let mut cranelift_ctx: Context = module.make_context();
let int: Type = Type::int(64).unwrap();
cranelift_ctx
.func
.signature
.returns
.push(AbiParam::new(int));
CodegenContext {
variable_list: HashMap::new(),
module,
int,
}
}
fn get_value(&self, id: VariableID) -> Value {
*self.variable_list.get(&id).unwrap()
}
fn set_variable_value(&mut self, id: VariableID, value: Value) {
self.variable_list.insert(id, value);
}
fn int(&self) -> Type {
self.int
}
fn get_finalized_function(mut self, cranelift_context: &mut Context) -> fn() -> isize {
let id = self
.module
.declare_function("eval", Linkage::Export, &cranelift_context.func.signature)
.unwrap();
self.module
.define_function(
id,
cranelift_context,
&mut codegen::binemit::NullTrapSink {},
)
.unwrap();
self.module.clear_context(cranelift_context);
self.module.finalize_definitions();
let code = self.module.get_finalized_function(id);
unsafe { mem::transmute::<_, fn() -> isize>(code) }
}
}
impl Statement {
fn translate(self, ctx: &mut CodegenContext, builder: &mut FunctionBuilder) {
match self {
Statement::LetStatement(identifier, expresion) => {
let value = expresion.translate(ctx, builder);
ctx.set_variable_value(identifier, value);
}
}
}
}
impl Ast {
pub fn run(self) -> isize {
let func = self.build();
func()
}
pub fn build(self) -> fn() -> isize {
let mut ctx = CodegenContext::new();
let mut cranelift_context = Context::new();
cranelift_context
.func
.signature
.returns
.push(AbiParam::new(ctx.int()));
let mut function_context = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut cranelift_context.func, &mut function_context);
let block = builder.create_block();
builder.switch_to_block(block);
self.clone()
.get_statement_list()
.into_iter()
.for_each(|statement| statement.translate(&mut ctx, &mut builder));
let value = self.get_expresion().translate(&mut ctx, &mut builder);
builder.ins().return_(&[value]);
ctx.get_finalized_function(&mut cranelift_context)
}
}
impl Expresion {
fn translate(self, ctx: &mut CodegenContext, builder: &mut FunctionBuilder) -> Value {
let int = ctx.int;
match self {
Expresion::Variable(id) => ctx.get_value(id),
Expresion::Integer(num) => builder.ins().iconst(int, num as i64),
Expresion::Operation(optype, terms) => {
let (left_term, right_term) = *terms;
let left_value = left_term.translate(ctx, builder);
let right_value = right_term.translate(ctx, builder);
translate_operation(optype, left_value, right_value, builder)
}
}
}
}
fn translate_operation(
optype: OperationType,
left_value: Value,
right_value: Value,
builder: &mut FunctionBuilder,
) -> Value {
match optype {
OperationType::Addition => builder.ins().iadd(left_value, right_value),
OperationType::Subtraction => builder.ins().isub(left_value, right_value),
OperationType::Multiplication => builder.ins().imul(left_value, right_value),
OperationType::Division => builder.ins().udiv(left_value, right_value),
}
}
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Scoped attributes should not trigger an unused attributes lint.
#![feature(tool_attributes)]
#![deny(unused_attributes)]
fn main() {
#[rustfmt::skip]
foo ();
}
fn foo() {
assert!(true);
}
|
use crate::*;
use std::collections::HashMap;
#[derive(Clone)]
pub struct Scores(HashMap<ScoreId, Score>);
impl Scores {
pub fn create_by_map(scores: HashMap<ScoreId, Score>) -> Self {
Scores(scores)
}
pub fn count(&self) -> usize {
self.0.len()
}
pub fn get(&self, song_id: &ScoreId) -> Option<&Score> {
self.0.get(song_id)
}
pub fn get_map(&self) -> &HashMap<ScoreId, Score> {
&self.0
}
pub fn table_scores<'a>(
&self,
tables: &'a Tables,
songs: &'a Songs,
date: &'a UpdatedAt,
account: &'a Account,
) -> DetailResponse<'a> {
DetailResponse {
user_id: account.user_id(),
user_name: account.user_name(),
score: tables
.get_charts()
.into_iter()
.filter_map(|chart| {
let song = songs.song(chart);
self.get(&song.song_id())
.map(|s| s.make_detail(date))
.map(|score| (chart.md5(), score))
})
.collect(),
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct DetailResponse<'a> {
user_id: UserId,
user_name: String,
score: HashMap<&'a HashMd5, ScoreDetail>,
}
|
extern crate ekiden_tools;
fn main() {
ekiden_tools::generate_mod("src/generated", &["api"]);
ekiden_tools::build_api();
}
|
fn main() {
let x: i32 = 5;
let arr = [1, 2, 3];
for i in arr.iter() {
}
if i == 7 {
println!("{} is equal to 7", i);
} else {
println!("{} is smaller than 7", i)
}
println!("{:?}", x);
}
|
pub use crate::other::*;
pub type size_t = libc::size_t;
pub type __int32_t = libc::c_int;
pub type pid_t = libc::pid_t;
pub type time_t = libc::time_t;
pub type uint32_t = libc::c_uint;
pub type iconv_t = *mut libc::c_void;
pub type dev_t = libc::dev_t;
pub type blkcnt_t = libc::blkcnt_t;
pub type blksize_t = libc::blksize_t;
pub type gid_t = libc::gid_t;
pub type mode_t = libc::mode_t;
pub type nlink_t = libc::uint16_t;
pub type off_t = libc::off_t;
pub type uid_t = libc::uid_t;
extern "C" {
pub fn munmap(_: *mut libc::c_void, _: size_t) -> libc::c_int;
pub fn mmap(
_: *mut libc::c_void,
_: size_t,
_: libc::c_int,
_: libc::c_int,
_: libc::c_int,
_: off_t,
) -> *mut libc::c_void;
pub fn iconv_close(_cd: iconv_t) -> libc::c_int;
pub fn iconv(
__cd: iconv_t,
__inbuf: *mut *mut libc::c_char,
__inbytesleft: *mut size_t,
__outbuf: *mut *mut libc::c_char,
__outbytesleft: *mut size_t,
) -> size_t;
pub fn iconv_open(__tocode: *const libc::c_char, __fromcode: *const libc::c_char) -> iconv_t;
pub fn getpid() -> pid_t;
pub fn random() -> libc::c_long;
pub fn pthread_mutex_unlock(_: *mut pthread_mutex_t) -> libc::c_int;
pub fn pthread_mutex_lock(_: *mut pthread_mutex_t) -> libc::c_int;
}
|
fn main() {
let s1 = String::from("hello");
let len = calculate_length(&s1); // s1 is borrowed
println!("The length of '{}' is {}", s1, len);
let mut s2 = s1; //s1 is moved, use clone if want to copy
change(&mut s2);
let len = calculate_length(&s2);
println!("The length of '{}' is {}", s2, len);
{
let _s5 = &mut s2; // This is okay
}
let _s3 = &mut s2;
// let _s4 = &mut s2; Only one mutable reference of a val in a scope
let mut s = String::from("hello");
{
let _r1 = &s;
let _r2 = &s;
// let _r3 = &mut s; Cannot have a mutable reference when an immutable ref is in scope
}
let _r4 = &mut s;
_r4.push_str(", world");
let _s = String::from("hello world");
let s1 = String::from("hello world");
println!("{}", _r4);
let _hello: &str = &s1[0..5];
let _world = &s1[6..11];
let _hello = &s1[0..=4];
let _world = &s1[6..=10];
// s1.clear(); Error
// s1.push_str("some more"); Erro
let string_literal: &str = "Hello, I am a slice too";
let _wd = accept_literals_and_strings(string_literal);
let _wd = accept_literals_and_strings(&s1[..]);
let _wd = accept_literals_and_strings(&string_literal[..]);
}
fn accept_literals_and_strings(s: &str) -> &str {
&s[..]
}
fn calculate_length(s: &String) -> usize {
s.len()
}
fn change(some_string: &mut String) {
some_string.push_str(", world");
}
fn ref_pass(s: &String) -> &String {
s
}
fn own_pass() -> String {
let s = String::from("hello");
s
}
// Below wont compile
// fn dangle() -> &String {
// let s = String::from("hello");
//
// &s
// }
|
//! This module implements the `remote` CLI command
use influxdb_iox_client::connection::Connection;
use thiserror::Error;
mod partition;
mod store;
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Error)]
pub enum Error {
#[error("{0}")]
Partition(#[from] partition::Error),
#[error("{0}")]
Store(#[from] store::Error),
#[error("Catalog error: {0}")]
Catalog(#[from] iox_catalog::interface::Error),
#[error("Catalog DSN error: {0}")]
CatalogDsn(#[from] clap_blocks::catalog_dsn::Error),
}
/// Various commands against a remote IOx API
#[derive(Debug, clap::Parser)]
pub struct Config {
#[clap(subcommand)]
command: Command,
}
/// All possible subcommands for remote
#[derive(Debug, clap::Parser)]
enum Command {
/// Get partition data
Partition(partition::Config),
/// Get Parquet files from the object store
Store(store::Config),
}
pub async fn command(connection: Connection, config: Config) -> Result<(), Error> {
match config.command {
Command::Partition(config) => {
partition::command(connection, config).await?;
}
Command::Store(config) => {
store::command(connection, config).await?;
}
}
Ok(())
}
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from ../gir-files
// DO NOT EDIT
use crate::Address;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
#[doc(alias = "SoupSocket")]
pub struct Socket(Object<ffi::SoupSocket, ffi::SoupSocketClass>);
match fn {
type_ => || ffi::soup_socket_get_type(),
}
}
impl Socket {
//#[doc(alias = "soup_socket_new")]
//pub fn new(optname1: &str, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> Socket {
// unsafe { TODO: call ffi:soup_socket_new() }
//}
}
pub const NONE_SOCKET: Option<&Socket> = None;
pub trait SocketExt: 'static {
#[doc(alias = "soup_socket_connect_async")]
fn connect_async<P: IsA<gio::Cancellable>, Q: FnOnce(&Socket, u32) + 'static>(&self, cancellable: Option<&P>, callback: Q);
#[doc(alias = "soup_socket_connect_sync")]
fn connect_sync<P: IsA<gio::Cancellable>>(&self, cancellable: Option<&P>) -> u32;
#[doc(alias = "soup_socket_disconnect")]
fn disconnect(&self);
#[doc(alias = "soup_socket_get_fd")]
#[doc(alias = "get_fd")]
fn fd(&self) -> i32;
#[doc(alias = "soup_socket_get_local_address")]
#[doc(alias = "get_local_address")]
fn local_address(&self) -> Option<Address>;
#[doc(alias = "soup_socket_get_remote_address")]
#[doc(alias = "get_remote_address")]
fn remote_address(&self) -> Option<Address>;
#[doc(alias = "soup_socket_is_connected")]
fn is_connected(&self) -> bool;
#[doc(alias = "soup_socket_is_ssl")]
fn is_ssl(&self) -> bool;
#[doc(alias = "soup_socket_listen")]
fn listen(&self) -> bool;
//#[doc(alias = "soup_socket_read_until")]
//fn read_until<P: IsA<gio::Cancellable>>(&self, buffer: &[u8], boundary: /*Unimplemented*/Option<Fundamental: Pointer>, boundary_len: usize, got_boundary: bool, cancellable: Option<&P>) -> Result<(SocketIOStatus, usize), glib::Error>;
#[doc(alias = "soup_socket_start_proxy_ssl")]
fn start_proxy_ssl<P: IsA<gio::Cancellable>>(&self, ssl_host: &str, cancellable: Option<&P>) -> bool;
#[doc(alias = "soup_socket_start_ssl")]
fn start_ssl<P: IsA<gio::Cancellable>>(&self, cancellable: Option<&P>) -> bool;
//#[doc(alias = "async-context")]
//fn async_context(&self) -> /*Unimplemented*/Fundamental: Pointer;
#[doc(alias = "ipv6-only")]
fn is_ipv6_only(&self) -> bool;
#[doc(alias = "ipv6-only")]
fn set_ipv6_only(&self, ipv6_only: bool);
#[doc(alias = "is-server")]
fn is_server(&self) -> bool;
#[doc(alias = "non-blocking")]
fn is_non_blocking(&self) -> bool;
#[doc(alias = "non-blocking")]
fn set_non_blocking(&self, non_blocking: bool);
//#[doc(alias = "ssl-creds")]
//fn ssl_creds(&self) -> /*Unimplemented*/Fundamental: Pointer;
//#[doc(alias = "ssl-creds")]
//fn set_ssl_creds(&self, ssl_creds: /*Unimplemented*/Fundamental: Pointer);
#[doc(alias = "ssl-fallback")]
fn is_ssl_fallback(&self) -> bool;
#[doc(alias = "ssl-strict")]
fn is_ssl_strict(&self) -> bool;
fn timeout(&self) -> u32;
fn set_timeout(&self, timeout: u32);
#[doc(alias = "tls-certificate")]
fn tls_certificate(&self) -> Option<gio::TlsCertificate>;
#[doc(alias = "tls-errors")]
fn tls_errors(&self) -> gio::TlsCertificateFlags;
#[doc(alias = "trusted-certificate")]
fn is_trusted_certificate(&self) -> bool;
#[cfg(any(feature = "v2_38", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_38")))]
#[doc(alias = "use-thread-context")]
fn uses_thread_context(&self) -> bool;
#[doc(alias = "disconnected")]
fn connect_disconnected<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[cfg(any(feature = "v2_38", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_38")))]
#[doc(alias = "event")]
fn connect_event<F: Fn(&Self, gio::SocketClientEvent, &gio::IOStream) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "new-connection")]
fn connect_new_connection<F: Fn(&Self, &Socket) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "readable")]
fn connect_readable<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "writable")]
fn connect_writable<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "ipv6-only")]
fn connect_ipv6_only_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "is-server")]
fn connect_is_server_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "non-blocking")]
fn connect_non_blocking_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "ssl-creds")]
fn connect_ssl_creds_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "timeout")]
fn connect_timeout_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "tls-certificate")]
fn connect_tls_certificate_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "tls-errors")]
fn connect_tls_errors_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "trusted-certificate")]
fn connect_trusted_certificate_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<Socket>> SocketExt for O {
fn connect_async<P: IsA<gio::Cancellable>, Q: FnOnce(&Socket, u32) + 'static>(&self, cancellable: Option<&P>, callback: Q) {
let callback_data: Box_<Q> = Box_::new(callback);
unsafe extern "C" fn callback_func<P: IsA<gio::Cancellable>, Q: FnOnce(&Socket, u32) + 'static>(sock: *mut ffi::SoupSocket, status: libc::c_uint, user_data: glib::ffi::gpointer) {
let sock = from_glib_borrow(sock);
let callback: Box_<Q> = Box_::from_raw(user_data as *mut _);
(*callback)(&sock, status);
}
let callback = Some(callback_func::<P, Q> as _);
let super_callback0: Box_<Q> = callback_data;
unsafe {
ffi::soup_socket_connect_async(self.as_ref().to_glib_none().0, cancellable.map(|p| p.as_ref()).to_glib_none().0, callback, Box_::into_raw(super_callback0) as *mut _);
}
}
fn connect_sync<P: IsA<gio::Cancellable>>(&self, cancellable: Option<&P>) -> u32 {
unsafe {
ffi::soup_socket_connect_sync(self.as_ref().to_glib_none().0, cancellable.map(|p| p.as_ref()).to_glib_none().0)
}
}
fn disconnect(&self) {
unsafe {
ffi::soup_socket_disconnect(self.as_ref().to_glib_none().0);
}
}
fn fd(&self) -> i32 {
unsafe {
ffi::soup_socket_get_fd(self.as_ref().to_glib_none().0)
}
}
fn local_address(&self) -> Option<Address> {
unsafe {
from_glib_none(ffi::soup_socket_get_local_address(self.as_ref().to_glib_none().0))
}
}
fn remote_address(&self) -> Option<Address> {
unsafe {
from_glib_none(ffi::soup_socket_get_remote_address(self.as_ref().to_glib_none().0))
}
}
fn is_connected(&self) -> bool {
unsafe {
from_glib(ffi::soup_socket_is_connected(self.as_ref().to_glib_none().0))
}
}
fn is_ssl(&self) -> bool {
unsafe {
from_glib(ffi::soup_socket_is_ssl(self.as_ref().to_glib_none().0))
}
}
fn listen(&self) -> bool {
unsafe {
from_glib(ffi::soup_socket_listen(self.as_ref().to_glib_none().0))
}
}
//fn read_until<P: IsA<gio::Cancellable>>(&self, buffer: &[u8], boundary: /*Unimplemented*/Option<Fundamental: Pointer>, boundary_len: usize, got_boundary: bool, cancellable: Option<&P>) -> Result<(SocketIOStatus, usize), glib::Error> {
// unsafe { TODO: call ffi:soup_socket_read_until() }
//}
fn start_proxy_ssl<P: IsA<gio::Cancellable>>(&self, ssl_host: &str, cancellable: Option<&P>) -> bool {
unsafe {
from_glib(ffi::soup_socket_start_proxy_ssl(self.as_ref().to_glib_none().0, ssl_host.to_glib_none().0, cancellable.map(|p| p.as_ref()).to_glib_none().0))
}
}
fn start_ssl<P: IsA<gio::Cancellable>>(&self, cancellable: Option<&P>) -> bool {
unsafe {
from_glib(ffi::soup_socket_start_ssl(self.as_ref().to_glib_none().0, cancellable.map(|p| p.as_ref()).to_glib_none().0))
}
}
//fn async_context(&self) -> /*Unimplemented*/Fundamental: Pointer {
// unsafe {
// let mut value = glib::Value::from_type(</*Unknown type*/ as StaticType>::static_type());
// glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"async-context\0".as_ptr() as *const _, value.to_glib_none_mut().0);
// value.get().expect("Return Value for property `async-context` getter")
// }
//}
fn is_ipv6_only(&self) -> bool {
unsafe {
let mut value = glib::Value::from_type(<bool as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"ipv6-only\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `ipv6-only` getter")
}
}
fn set_ipv6_only(&self, ipv6_only: bool) {
unsafe {
glib::gobject_ffi::g_object_set_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"ipv6-only\0".as_ptr() as *const _, ipv6_only.to_value().to_glib_none().0);
}
}
fn is_server(&self) -> bool {
unsafe {
let mut value = glib::Value::from_type(<bool as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"is-server\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `is-server` getter")
}
}
fn is_non_blocking(&self) -> bool {
unsafe {
let mut value = glib::Value::from_type(<bool as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"non-blocking\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `non-blocking` getter")
}
}
fn set_non_blocking(&self, non_blocking: bool) {
unsafe {
glib::gobject_ffi::g_object_set_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"non-blocking\0".as_ptr() as *const _, non_blocking.to_value().to_glib_none().0);
}
}
//fn ssl_creds(&self) -> /*Unimplemented*/Fundamental: Pointer {
// unsafe {
// let mut value = glib::Value::from_type(</*Unknown type*/ as StaticType>::static_type());
// glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"ssl-creds\0".as_ptr() as *const _, value.to_glib_none_mut().0);
// value.get().expect("Return Value for property `ssl-creds` getter")
// }
//}
//fn set_ssl_creds(&self, ssl_creds: /*Unimplemented*/Fundamental: Pointer) {
// unsafe {
// glib::gobject_ffi::g_object_set_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"ssl-creds\0".as_ptr() as *const _, ssl_creds.to_value().to_glib_none().0);
// }
//}
fn is_ssl_fallback(&self) -> bool {
unsafe {
let mut value = glib::Value::from_type(<bool as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"ssl-fallback\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `ssl-fallback` getter")
}
}
fn is_ssl_strict(&self) -> bool {
unsafe {
let mut value = glib::Value::from_type(<bool as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"ssl-strict\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `ssl-strict` getter")
}
}
fn timeout(&self) -> u32 {
unsafe {
let mut value = glib::Value::from_type(<u32 as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"timeout\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `timeout` getter")
}
}
fn set_timeout(&self, timeout: u32) {
unsafe {
glib::gobject_ffi::g_object_set_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"timeout\0".as_ptr() as *const _, timeout.to_value().to_glib_none().0);
}
}
fn tls_certificate(&self) -> Option<gio::TlsCertificate> {
unsafe {
let mut value = glib::Value::from_type(<gio::TlsCertificate as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"tls-certificate\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `tls-certificate` getter")
}
}
fn tls_errors(&self) -> gio::TlsCertificateFlags {
unsafe {
let mut value = glib::Value::from_type(<gio::TlsCertificateFlags as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"tls-errors\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `tls-errors` getter")
}
}
fn is_trusted_certificate(&self) -> bool {
unsafe {
let mut value = glib::Value::from_type(<bool as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"trusted-certificate\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `trusted-certificate` getter")
}
}
#[cfg(any(feature = "v2_38", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_38")))]
fn uses_thread_context(&self) -> bool {
unsafe {
let mut value = glib::Value::from_type(<bool as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"use-thread-context\0".as_ptr() as *const _, value.to_glib_none_mut().0);
value.get().expect("Return Value for property `use-thread-context` getter")
}
}
fn connect_disconnected<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn disconnected_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"disconnected\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(disconnected_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
#[cfg(any(feature = "v2_38", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_38")))]
fn connect_event<F: Fn(&Self, gio::SocketClientEvent, &gio::IOStream) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn event_trampoline<P: IsA<Socket>, F: Fn(&P, gio::SocketClientEvent, &gio::IOStream) + 'static>(this: *mut ffi::SoupSocket, event: gio::ffi::GSocketClientEvent, connection: *mut gio::ffi::GIOStream, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref(), from_glib(event), &from_glib_borrow(connection))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"event\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(event_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_new_connection<F: Fn(&Self, &Socket) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn new_connection_trampoline<P: IsA<Socket>, F: Fn(&P, &Socket) + 'static>(this: *mut ffi::SoupSocket, new: *mut ffi::SoupSocket, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref(), &from_glib_borrow(new))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"new-connection\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(new_connection_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_readable<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn readable_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"readable\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(readable_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_writable<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn writable_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"writable\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(writable_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_ipv6_only_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_ipv6_only_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::ipv6-only\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(notify_ipv6_only_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_is_server_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_is_server_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::is-server\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(notify_is_server_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_non_blocking_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_non_blocking_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::non-blocking\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(notify_non_blocking_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_ssl_creds_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_ssl_creds_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::ssl-creds\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(notify_ssl_creds_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_timeout_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_timeout_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::timeout\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(notify_timeout_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_tls_certificate_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_tls_certificate_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::tls-certificate\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(notify_tls_certificate_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_tls_errors_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_tls_errors_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::tls-errors\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(notify_tls_errors_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
fn connect_trusted_certificate_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_trusted_certificate_trampoline<P: IsA<Socket>, F: Fn(&P) + 'static>(this: *mut ffi::SoupSocket, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer) {
let f: &F = &*(f as *const F);
f(Socket::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::trusted-certificate\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(notify_trusted_certificate_trampoline::<Self, F> as *const ())), Box_::into_raw(f))
}
}
}
impl fmt::Display for Socket {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("Socket")
}
}
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{Error, ResultExt},
fidl_fuchsia_modular::{
ExecuteResult, ExecuteStatus, PuppetMasterRequest, PuppetMasterRequestStream, StoryCommand,
StoryPuppetMasterRequest, StoryPuppetMasterRequestStream,
},
fuchsia_async as fasync,
fuchsia_syslog::macros::*,
futures::prelude::*,
std::collections::HashMap,
};
pub struct PuppetMasterFake<F> {
on_execute_callbacks: HashMap<String, F>,
}
impl<F: 'static> PuppetMasterFake<F>
where
F: Fn(&Vec<StoryCommand>) + Sync + Send,
{
pub fn new() -> Self {
PuppetMasterFake { on_execute_callbacks: HashMap::new() }
}
pub fn set_on_execute(&mut self, story_name: &str, callback: F) {
self.on_execute_callbacks.insert(story_name.to_string(), callback);
}
pub fn spawn(mut self, mut stream: PuppetMasterRequestStream) {
fasync::spawn(
async move {
while let Some(request) =
stream.try_next().await.context("error running fake puppet master")?
{
match request {
PuppetMasterRequest::ControlStory { story_name, request, .. } => {
let stream = request.into_stream()?;
let callback = self.on_execute_callbacks.remove(&story_name);
StoryPuppetMasterFake::new(story_name, callback).spawn(stream);
}
_ => continue,
}
}
Ok(())
}
.unwrap_or_else(|e: Error| {
fx_log_err!("error serving fake puppet master: {:?}", e)
}),
);
}
}
pub struct StoryPuppetMasterFake<F> {
story_name: String,
on_execute_callback: Option<F>,
enqueued_commands: Vec<StoryCommand>,
}
impl<F: 'static> StoryPuppetMasterFake<F>
where
F: Fn(&Vec<StoryCommand>) + Sync + Send,
{
fn new(story_name: String, on_execute_callback: Option<F>) -> Self {
StoryPuppetMasterFake { story_name, on_execute_callback, enqueued_commands: vec![] }
}
fn spawn(mut self, mut stream: StoryPuppetMasterRequestStream) {
fasync::spawn(
async move {
while let Some(request) =
stream.try_next().await.context("error running fake story puppet master")?
{
match request {
StoryPuppetMasterRequest::Enqueue { commands, .. } => {
self.enqueued_commands = commands;
}
StoryPuppetMasterRequest::Execute { responder } => {
if let Some(ref callback) = self.on_execute_callback {
callback(&self.enqueued_commands);
}
self.enqueued_commands.clear();
let mut result = ExecuteResult {
status: ExecuteStatus::Ok,
story_id: Some(self.story_name.clone()),
error_message: None,
};
responder.send(&mut result)?;
}
_ => continue,
}
}
Ok(())
}
.unwrap_or_else(|e: Error| {
fx_log_err!("error serving fake story puppet master: {:?}", e)
}),
);
}
}
|
// Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use std::path::PathBuf;
/// Create the default data directory (`~/.tari` on OSx and Linux, for example) if it doesn't already exist
pub fn create_data_directory(base_dir: Option<&PathBuf>) -> Result<(), std::io::Error> {
let home = default_path("", base_dir);
if !home.exists() {
println!("Creating {:?}", home);
std::fs::create_dir(home)
} else {
Ok(())
}
}
/// A convenience function for creating subfolders inside the `~/.tari` default data directory
///
/// # Panics
/// This function panics if the home folder location cannot be found or if the path value is not valid UTF-8.
/// This is a trade-off made in favour of convenience of use.
pub fn default_subdir(path: &str, base_dir: Option<&PathBuf>) -> String {
let home = default_path(path, base_dir);
String::from(home.to_str().expect("Invalid path value"))
}
pub fn default_path(filename: &str, base_path: Option<&PathBuf>) -> PathBuf {
let mut home = base_path.cloned().unwrap_or_else(|| {
let mut home = dirs::home_dir().unwrap_or_else(|| PathBuf::from("."));
home.push(".tari");
home
});
home.push(filename);
home
}
|
use na::{Isometry3, Point3, Unit, Vector3};
#[derive(Debug, Copy, Clone)]
pub struct Ray {
pub origin: Point3<f32>,
pub direction: Unit<Vector3<f32>>,
}
impl Ray {
pub fn apply_isometry(&self, isometry: Isometry3<f32>) -> Self {
Self {
origin: isometry.transform_point(&self.origin),
direction: Unit::new_unchecked(isometry.transform_vector(&self.direction)),
}
}
}
|
use firefly_diagnostics::*;
use firefly_pass::Pass;
use firefly_syntax_base::*;
use crate::ast::*;
/// Registers auto-imported BIFs in the given module
///
/// This pass takes into account the compiler options of the module when deciding what to import
pub struct AddAutoImports;
impl Pass for AddAutoImports {
type Input<'a> = &'a mut Module;
type Output<'a> = &'a mut Module;
fn run<'a>(&mut self, module: Self::Input<'a>) -> anyhow::Result<Self::Output<'a>> {
if let Some(compile) = module.compile.as_ref() {
if compile.no_auto_import {
return Ok(module);
}
let span = module.name.span;
for sig in bifs::all().iter().map(|sig| Span::new(span, sig.clone())) {
let local_name = sig.mfa().to_local();
if !compile.no_auto_imports.contains(&local_name) {
module.imports.insert(local_name, sig);
}
}
} else {
let span = module.name.span;
for sig in bifs::all().iter().map(|sig| Span::new(span, sig.clone())) {
let local_name = sig.mfa().to_local();
module.imports.insert(local_name, sig);
}
}
Ok(module)
}
}
/// Every module in Erlang has some functions implicitly defined for internal use:
///
/// * `module_info/0` (exported)
/// * `module_info/1` (exported)
/// * `record_info/2`
/// * `behaviour_info/1` (optional)
pub struct DefinePseudoLocals;
impl Pass for DefinePseudoLocals {
type Input<'a> = &'a mut Module;
type Output<'a> = &'a mut Module;
fn run<'a>(&mut self, module: Self::Input<'a>) -> anyhow::Result<Self::Output<'a>> {
let span = module.span;
let mod_info_0 = fun!(module_info () -> apply!(span, erlang, get_module_info, (atom_from_ident!(module.name))));
let mod_info_1 = fun!(module_info (Key) -> apply!(span, erlang, get_module_info, (atom_from_ident!(module.name), var!(Key))));
if !module.records.is_empty() {
let mut clauses = Vec::with_capacity(module.records.len() * 2);
for record in module.records.values() {
let size = (record.fields.len() + 1).into();
clauses.push((
Some(Name::Atom(ident!(record_info))),
Clause {
span: SourceSpan::UNKNOWN,
patterns: vec![atom!(size), atom_from_ident!(record.name)],
guards: vec![],
body: vec![int!(size)],
compiler_generated: true,
},
));
}
for record in module.records.values() {
let field_names = record
.fields
.iter()
.fold(nil!(), |acc, f| cons!(atom_from_ident!(f.name), acc));
clauses.push((
Some(Name::Atom(ident!(record_info))),
Clause {
span: SourceSpan::UNKNOWN,
patterns: vec![atom!(fields), atom_from_ident!(record.name)],
guards: vec![],
body: vec![field_names],
compiler_generated: true,
},
));
}
let record_info_2 = Function {
span: SourceSpan::UNKNOWN,
name: ident!(record_info),
arity: 2,
clauses,
spec: None,
is_nif: false,
var_counter: 0,
fun_counter: 0,
};
define_function(module, record_info_2);
}
define_function(module, mod_info_0);
define_function(module, mod_info_1);
if module.callbacks.len() > 0 {
let callbacks = module.callbacks.iter().fold(nil!(), |acc, (cbname, cb)| {
if cb.optional {
acc
} else {
cons!(
tuple!(
atom!(cb.span, cbname.function),
int!((cbname.arity as i64).into())
),
acc
)
}
});
let opt_callbacks = module.callbacks.iter().fold(nil!(), |acc, (cbname, cb)| {
if cb.optional {
cons!(
tuple!(
atom!(cb.span, cbname.function),
int!((cbname.arity as i64).into())
),
acc
)
} else {
acc
}
});
let behaviour_info_1 = fun!(behaviour_info
(atom!(callbacks)) -> callbacks;
(atom!(optional_callbacks)) -> opt_callbacks);
define_function(module, behaviour_info_1);
}
Ok(module)
}
}
fn define_function(module: &mut Module, f: Function) {
let name = FunctionName::new_local(f.name.name, f.arity);
module.functions.insert(name, f);
}
|
use anyhow::Context;
use std::{
io::{BufRead, BufReader},
ops::Range,
};
const INPUT: &[u8] = include_bytes!("input.txt");
fn partition(seat: &str, range: Range<u32>) -> Option<u32> {
match seat.as_bytes().first() {
Some(b'F') | Some(b'L') => {
partition(&seat[1..], range.start..((range.start + range.end + 1) / 2))
}
Some(b'B') | Some(b'R') => {
partition(&seat[1..], ((range.start + range.end + 1) / 2)..range.end)
}
None => Some(range.start),
_ => None,
}
}
fn get_seat_id(seat: &str) -> Option<u32> {
let row = partition(&seat[0..7], 0..128)?;
let col = partition(&seat[7..], 0..8)?;
Some(row * 8 + col)
}
fn part1(input: &Vec<String>) -> anyhow::Result<u32> {
let max_id = input
.iter()
.filter(|s| !s.is_empty())
.flat_map(|s| get_seat_id(s))
.max()
.context("no valid seats")?;
Ok(max_id)
}
fn part2(input: &Vec<String>) -> anyhow::Result<u32> {
let mut seat_ids: Vec<_> = input
.iter()
.filter(|s| !s.is_empty())
.flat_map(|s| get_seat_id(s))
.collect();
seat_ids.sort_unstable();
let first = *seat_ids.first().context("no valid seats")?;
let seat_before_gap =
seat_ids
.into_iter()
.fold(first, |prev, cur| if prev + 1 == cur { cur } else { prev });
Ok(seat_before_gap + 1)
}
fn main() -> anyhow::Result<()> {
let input = BufReader::new(INPUT)
.lines()
.collect::<Result<Vec<_>, _>>()
.context("failure reading input file")?;
println!("part 1: {}", part1(&input)?);
println!("part 2: {}", part2(&input)?);
Ok(())
}
|
/***********************************************************************************************************************
* Copyright (c) 2019 by the authors
*
* Author: André Borrmann
* License: Apache License 2.0
**********************************************************************************************************************/
//! Build script to pre-compile the assembly files containing the cache operations code
//!
extern crate cc;
use std::env;
fn main() {
match env::var_os("CARGO_CFG_TARGET_ARCH") {
Some(target_arch) => {
if target_arch == "arm" {
cc::Build::new()
.file("src/asm/cache32.s")
.flag("-march=armv8-a")
.flag("-mfpu=neon-fp-armv8")
.flag("-mfloat-abi=hard")
.compile("cache");
}
if target_arch == "aarch64" {
cc::Build::new()
.file("src/asm/cache64.s")
.flag("-march=armv8-a")
.compile("cache");
}
}
_ => (),
}
}
|
//! This crate includes [`Canvas`] type, which provides basic drawing methods for [`Rectangle`] and
//! [`Line`], with label or not.
use std::cmp::{max, min};
use std::{mem, str};
type Vertex = (usize, usize);
/// Defines a rectangle with four boundaries.
#[derive(Debug)]
pub struct Rectangle {
pub left: usize,
pub right: usize,
pub top: usize,
pub bottom: usize,
}
/// Defines a line with start/end vertices.
#[allow(dead_code)]
pub struct Line {
start: Vertex,
end: Vertex,
}
/// Defines a paint canvas.
pub struct Canvas {
width: usize,
height: usize,
buffer: Vec<Vec<char>>,
/// boundary of the canvas with surrounding empty space cropped.
boundary: Rectangle,
}
impl ToString for Canvas {
fn to_string(&self) -> String {
let mut ret = String::new();
for i in self.boundary.top..=self.boundary.bottom {
for j in self.boundary.left..=self.boundary.right {
ret.push(self.buffer[i][j]);
}
ret.push('\n');
}
ret
}
}
impl Canvas {
pub fn new(width: usize, height: usize) -> Self {
Canvas {
width,
height,
buffer: vec![vec![' '; width]; height],
boundary: Rectangle {
left: width - 1,
right: 0,
top: height - 1,
bottom: 0,
},
}
}
fn change_pixel(&mut self, vertex: &Vertex, c: char) {
self.buffer[vertex.0][vertex.1] = c;
self.boundary.top = min(self.boundary.top, vertex.0);
self.boundary.bottom = max(self.boundary.bottom, vertex.0);
self.boundary.left = min(self.boundary.left, vertex.1);
self.boundary.right = max(self.boundary.right, vertex.1);
}
pub fn draw_point(&mut self, vertex: &Vertex, c: char) {
self.change_pixel(vertex, c);
}
fn draw_line_overwrite_or_not(
&mut self,
a: &Vertex,
b: &Vertex,
overwrite: bool,
) -> Line {
if a.1 != b.1 {
for j in (min(a.1, b.1) + 1)..(max(a.1, b.1)) {
if self.buffer[a.0][j] == ' ' || overwrite {
self.change_pixel(&(a.0, j), '─');
}
}
} else if a.0 != b.0 {
for j in (min(a.0, b.0) + 1)..(max(a.0, b.0)) {
if self.buffer[j][a.1] == ' ' || overwrite {
self.change_pixel(&(j, a.1), '│');
}
}
}
Line {
start: a.to_owned(),
end: b.to_owned(),
}
}
pub fn draw_line(&mut self, a: &Vertex, b: &Vertex) -> Line {
self.draw_line_overwrite_or_not(a, b, true)
}
pub fn draw_line_under(&mut self, a: &Vertex, b: &Vertex) -> Line {
self.draw_line_overwrite_or_not(a, b, false)
}
pub fn draw_arrowed_line(&mut self, start: &Vertex, end: &Vertex) -> Line {
let line = self.draw_line(start, end);
let mut c = '?';
if start.0 != end.0 {
if start.0 < end.0 {
c = '▼';
} else {
c = '▲';
}
} else if start.1 != end.1 {
if start.1 < end.1 {
c = '►';
} else {
c = '◄';
}
}
self.change_pixel(end, c);
line
}
fn write_label_within_rec(&mut self, rec: &Rectangle, label: &str) {
let mut k = 0;
let mut new_line = false;
for i in (rec.top + 1)..rec.bottom {
let mut j = rec.left + 1;
while j < rec.right {
if k < label.len() {
let c = label.chars().nth(k).unwrap();
if new_line && c == ' ' {
} else {
self.change_pixel(&(i, j), c);
j = j + 1;
}
k = k + 1;
new_line = false;
} else {
return;
}
}
new_line = true;
}
}
pub fn draw_rectangle(&mut self, rec: &Rectangle) {
self.draw_line(&(rec.top, rec.left), &(rec.top, rec.right));
self.draw_line(&(rec.top, rec.right), &(rec.bottom, rec.right));
self.draw_line(&(rec.bottom, rec.right), &(rec.bottom, rec.left));
self.draw_line(&(rec.bottom, rec.left), &(rec.top, rec.left));
self.draw_point(&(rec.top, rec.left), '┌');
self.draw_point(&(rec.top, rec.right), '┐');
self.draw_point(&(rec.bottom, rec.right), '┘');
self.draw_point(&(rec.bottom, rec.left), '└');
}
fn rec_from_vertices(&self, vertices: &[Vertex]) -> Rectangle {
let mut left = self.height - 1;
let mut right = 0;
let mut top = self.width - 1;
let mut bottom = 0;
for vertex in vertices.iter() {
left = min(left, vertex.1);
right = max(right, vertex.1);
top = min(top, vertex.0);
bottom = max(bottom, vertex.0);
}
Rectangle {
left,
right,
top,
bottom,
}
}
pub fn draw_rectangle_with_vertices_label(
&mut self,
vertices: &[Vertex],
label: &str,
) {
let rec = self.rec_from_vertices(vertices);
self.write_label_within_rec(&rec, label);
}
pub fn draw_rectangle_with_label(&mut self, rec: &Rectangle, label: &str) {
self.draw_rectangle(rec);
self.write_label_within_rec(rec, label);
}
/// Draws a line from `a` to `b` with a horizontal label. An arrow is added at the end vertex
/// `b` if `arrowed == true`.
pub fn draw_line_with_label(
&mut self,
mut a: Vertex,
mut b: Vertex,
label: &str,
arrowed: bool,
) {
if arrowed {
self.draw_arrowed_line(&a, &b);
} else {
self.draw_line(&a, &b);
}
if a.1 != b.1 {
if a.1 > b.1 {
mem::swap(&mut a.1, &mut b.1);
}
let label_rec = Rectangle {
left: a.1 + 1,
right: b.1 - 1,
top: a.0
- 1
- (label.len() - 1) / (b.1 - 1 - (a.1 + 1) - 1)
- 1,
bottom: a.0,
};
self.write_label_within_rec(&label_rec, label);
} else if a.0 != b.0 {
if a.0 > b.0 {
mem::swap(&mut a.0, &mut b.0);
}
let width = (label.len() - 1) / (b.0 - a.0 - 3) + 1;
let label_rec = Rectangle {
left: a.1 - width / 2 - 1,
right: b.1 + width / 2 + 1,
top: a.0 + 1,
bottom: b.0 - 1,
};
self.write_label_within_rec(&label_rec, label);
}
}
/// Resets boundary to crop the surrounding empty space.
pub fn reset_boundary(&mut self) {
for i in 0..self.height {
let mut empty_line = true;
for j in 0..self.width {
if self.buffer[i][j] != ' ' {
empty_line = false;
}
}
if !empty_line {
self.boundary.top = min(self.boundary.top, i);
self.boundary.bottom = max(self.boundary.bottom, i);
}
}
for j in 0..self.width {
let mut empty_column = true;
for i in 0..self.height {
if self.buffer[i][j] != ' ' {
empty_column = false;
break;
}
}
if !empty_column {
self.boundary.left = min(self.boundary.left, j);
self.boundary.right = max(self.boundary.right, j);
}
}
}
pub fn print(&self) {
for i in self.boundary.top..=self.boundary.bottom {
for j in self.boundary.left..=self.boundary.right {
print!("{}", self.buffer[i][j]);
}
println!();
}
}
pub fn clear(&mut self) {
self.buffer = vec![vec![' '; self.width]; self.height];
self.boundary = Rectangle {
left: self.width - 1,
right: 0,
top: self.height - 1,
bottom: 0,
};
}
}
#[cfg(test)]
mod test {
use super::*;
use std::fs;
#[test]
fn test_rec_with_label() {
let mut canvas = Canvas::new(20, 20);
canvas.draw_rectangle_with_label(
&Rectangle {
left: 1,
right: 8,
top: 1,
bottom: 5,
},
"test",
);
canvas.draw_rectangle_with_label(
&Rectangle {
left: 1,
right: 8,
top: 6,
bottom: 10,
},
"test a super long label",
);
canvas.reset_boundary();
let res = fs::read_to_string("./test/rec_with_label.txt").unwrap();
assert_eq!(canvas.to_string(), res);
}
#[test]
fn test_arrowed_line() {
let mut canvas = Canvas::new(20, 20);
canvas.draw_arrowed_line(&(10, 10), &(10, 18));
canvas.reset_boundary();
let res = fs::read_to_string("./test/arrowed_line.txt").unwrap();
assert_eq!(canvas.to_string(), res);
}
#[test]
fn test_line_with_label() {
let mut canvas = Canvas::new(20, 20);
canvas.draw_line_with_label(
(10, 10),
(10, 18),
"func_call_name",
true,
);
canvas.draw_line_with_label(
(15, 10),
(15, 18),
"func_call_name",
false,
);
canvas.reset_boundary();
let res = fs::read_to_string("./test/line_with_label.txt").unwrap();
assert_eq!(canvas.to_string(), res);
}
#[test]
fn test_line_with_long_label() {
let mut canvas = Canvas::new(20, 20);
canvas.draw_line_with_label(
(15, 10),
(15, 18),
"func_call_name_really_long",
false,
);
canvas.reset_boundary();
let res =
fs::read_to_string("./test/line_with_long_label.txt").unwrap();
assert_eq!(canvas.to_string(), res);
}
}
|
extern crate geo;
use geo::{Coordinate, Point};
fn main() {
let c = Coordinate {
x: 40.02f64,
y: 116.34,
};
let p = Point(c);
let Point(coord) = p;
println!("Point at ({}, {})", coord.x, coord.y);
}
|
// TODO: no_std
extern crate cpuid;
#[macro_use]
extern crate quick_error;
extern crate semver;
#[cfg(unix)]
mod unix;
#[cfg(unix)]
use unix::{PlatformError, total_memory};
#[cfg(unix)]
pub use unix::os;
#[cfg(windows)]
mod windows;
#[cfg(windows)]
use windows::{PlatformError, total_memory};
#[cfg(windows)]
pub use windows::os;
use semver::{SemVerError, Version};
use std::{io, result};
use std::error::Error as ErrorTrait;
/// Information about the current operating system
pub struct OSVersion {
/// Full name of the operating system
pub name: String,
// TODO: os_type
/// OS version info
pub version: Version,
#[cfg(unix)]
/// Distribution name
pub distribution: String,
#[cfg(windows)]
/// Service pack version
pub service_pack_version: Version,
}
/// Basic hardware information
pub struct Platform {
// TODO: NUMA?
/// CPU name
pub cpu: String,
/// CPU vendor
pub cpu_vendor: String,
/// Total installed memory
pub memory: u64,
}
quick_error! {
#[derive(Debug)]
pub enum Error {
PlatformError(err: PlatformError) {
from()
description(err.description())
display("{}", err)
}
IO(err: io::Error) {
from()
description(err.description())
display(s) -> ("{}: {}", s.description(), err)
}
SemVer(err: SemVerError) {
from()
description(err.description())
display(s) -> ("{}: {}", s.description(), err)
}
String(err: String) {
from()
description(err)
display("{}", err)
}
}
}
pub type Result<T> = result::Result<T, Error>;
/// Get basic information about the hardware we're running on.
///
/// # Requirements
///
/// For *nix: a functional `/proc` filesystem with system information. For
/// Windows: XP or Server 2003.
// TODO: don't show requirements for anothe system in the doc
pub fn platform() -> Result<Platform> {
let cpu = cpuid::identify()?;
Ok(Platform {
cpu: cpu.brand,
cpu_vendor: cpu.vendor,
memory: total_memory(),
})
}
|
use gary_zmq::cluster_communication::ZmqNode;
// use std::collections::HashMap;
use chrono::{DateTime, Utc};
use std::collections::HashMap;
use std::sync::mpsc::{Receiver, Sender};
use std::sync::{Arc, Mutex};
pub fn start_node(
sender: Sender<&'static str>,
_receiver: Receiver<&str>,
host_addr: &str,
init_neighbors: Arc<Mutex<HashMap<String, DateTime<Utc>>>>,
// node_listener_port: u16,
) {
// loop {
// match receiver.recv() {
// Ok(i) => println!("got int: {}", i),
// Err(_) => {
// println!("channel closed");
// break;
// }
// }
// }
println!("Initial representation of a running Node");
let mut myself = ZmqNode::new(sender, host_addr, init_neighbors); //, sender.to_owned());
myself.run();
}
|
// Copyright (c) 2020 Sam Blenny
// SPDX-License-Identifier: Apache-2.0 OR MIT
//
#![forbid(unsafe_code)]
/// Frame buffer bounds
pub const WORDS_PER_LINE: usize = 11;
pub const WIDTH: usize = 336;
pub const LINES: usize = 536;
pub const FRAME_BUF_SIZE: usize = WORDS_PER_LINE * LINES;
/// Frame buffer of 1-bit pixels
pub type FrBuf = [u32; FRAME_BUF_SIZE];
/// Initialize a frame buffer with stripes
pub const fn new_fr_buf() -> FrBuf {
[0xffff0000; FRAME_BUF_SIZE]
}
|
use generic_array::{ArrayLength, GenericArray};
use spade::{PointN, SpadeNum, TwoDimensional};
use std::cmp::Ordering;
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
pub type VertexId = usize;
pub type Weight = f64;
pub type Real = f64;
#[derive(Debug)]
pub struct Edge {
pub tail: VertexId,
pub head: VertexId,
pub weight: Weight,
}
/// Canonical graph representation, as a set of edges.
pub type Graph = Vec<Edge>;
/// Tabular graph representation, that for each pair of
/// vertices it returns the weight of the edge between them.
pub type GraphTab = HashMap<(VertexId, VertexId), Weight>;
/// Represents a point in the space `T^N`.
pub type Point<T, N> = GenericArray<T, N>;
#[derive(Clone, Debug)]
/// Represents a labeled vertex located at point in the space `T^N`.
pub struct PointVertex<T, N>
where
T: SpadeNum,
N: ArrayLength<T>,
{
pub point: Point<T, N>,
pub id: VertexId,
}
impl<T, N> PartialEq for PointVertex<T, N>
where
T: SpadeNum,
N: ArrayLength<T>,
{
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl<T, N> Eq for PointVertex<T, N>
where
T: SpadeNum,
N: ArrayLength<T>,
{
}
impl<T, N> PartialOrd for PointVertex<T, N>
where
T: SpadeNum,
N: ArrayLength<T>,
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.id.partial_cmp(&other.id)
}
}
impl<T, N> Ord for PointVertex<T, N>
where
T: SpadeNum,
N: ArrayLength<T>,
{
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id)
}
}
impl<T, N> Hash for PointVertex<T, N>
where
T: SpadeNum,
N: ArrayLength<T>,
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
impl<T, N> PointN for PointVertex<T, N>
where
T: SpadeNum,
N: ArrayLength<T> + Copy + Debug + PartialEq + PartialOrd,
{
type Scalar = T;
fn dimensions() -> usize {
N::USIZE
}
fn from_value(value: Self::Scalar) -> Self {
let id = 0;
let point = (0..N::USIZE).map(|_| value.clone()).collect();
PointVertex { id, point }
}
fn nth(&self, index: usize) -> &Self::Scalar {
self.point.iter().nth(index).unwrap()
}
fn nth_mut(&mut self, index: usize) -> &mut Self::Scalar {
self.point.iter_mut().nth(index).unwrap()
}
}
impl<T, N> TwoDimensional for PointVertex<T, N>
where
T: SpadeNum,
N: ArrayLength<T> + Copy + Debug + PartialEq + PartialOrd,
{
}
/// Data type to represent the expected output of this week's assignment.
pub type TspResult = i64;
|
#[doc = "Reader of register ITLINE14"]
pub type R = crate::R<u32, super::ITLINE14>;
#[doc = "Reader of field `TIM1_CC`"]
pub type TIM1_CC_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 0 - TIM1_CC"]
#[inline(always)]
pub fn tim1_cc(&self) -> TIM1_CC_R {
TIM1_CC_R::new((self.bits & 0x01) != 0)
}
}
|
#[doc = "Reader of register CSR"]
pub type R = crate::R<u32, super::CSR>;
#[doc = "Reader of field `ADDRDY_MST`"]
pub type ADDRDY_MST_R = crate::R<bool, bool>;
#[doc = "EOSMP_MST\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EOSMP_MST_A {
#[doc = "0: End of sampling phase no yet reached"]
NOTENDED = 0,
#[doc = "1: End of sampling phase reached"]
ENDED = 1,
}
impl From<EOSMP_MST_A> for bool {
#[inline(always)]
fn from(variant: EOSMP_MST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `EOSMP_MST`"]
pub type EOSMP_MST_R = crate::R<bool, EOSMP_MST_A>;
impl EOSMP_MST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EOSMP_MST_A {
match self.bits {
false => EOSMP_MST_A::NOTENDED,
true => EOSMP_MST_A::ENDED,
}
}
#[doc = "Checks if the value of the field is `NOTENDED`"]
#[inline(always)]
pub fn is_not_ended(&self) -> bool {
*self == EOSMP_MST_A::NOTENDED
}
#[doc = "Checks if the value of the field is `ENDED`"]
#[inline(always)]
pub fn is_ended(&self) -> bool {
*self == EOSMP_MST_A::ENDED
}
}
#[doc = "EOC_MST\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EOC_MST_A {
#[doc = "0: Regular conversion is not complete"]
NOTCOMPLETE = 0,
#[doc = "1: Regular conversion complete"]
COMPLETE = 1,
}
impl From<EOC_MST_A> for bool {
#[inline(always)]
fn from(variant: EOC_MST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `EOC_MST`"]
pub type EOC_MST_R = crate::R<bool, EOC_MST_A>;
impl EOC_MST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EOC_MST_A {
match self.bits {
false => EOC_MST_A::NOTCOMPLETE,
true => EOC_MST_A::COMPLETE,
}
}
#[doc = "Checks if the value of the field is `NOTCOMPLETE`"]
#[inline(always)]
pub fn is_not_complete(&self) -> bool {
*self == EOC_MST_A::NOTCOMPLETE
}
#[doc = "Checks if the value of the field is `COMPLETE`"]
#[inline(always)]
pub fn is_complete(&self) -> bool {
*self == EOC_MST_A::COMPLETE
}
}
#[doc = "EOS_MST\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EOS_MST_A {
#[doc = "0: Regular sequence is not complete"]
NOTCOMPLETE = 0,
#[doc = "1: Regular sequence complete"]
COMPLETE = 1,
}
impl From<EOS_MST_A> for bool {
#[inline(always)]
fn from(variant: EOS_MST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `EOS_MST`"]
pub type EOS_MST_R = crate::R<bool, EOS_MST_A>;
impl EOS_MST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EOS_MST_A {
match self.bits {
false => EOS_MST_A::NOTCOMPLETE,
true => EOS_MST_A::COMPLETE,
}
}
#[doc = "Checks if the value of the field is `NOTCOMPLETE`"]
#[inline(always)]
pub fn is_not_complete(&self) -> bool {
*self == EOS_MST_A::NOTCOMPLETE
}
#[doc = "Checks if the value of the field is `COMPLETE`"]
#[inline(always)]
pub fn is_complete(&self) -> bool {
*self == EOS_MST_A::COMPLETE
}
}
#[doc = "OVR_MST\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum OVR_MST_A {
#[doc = "0: No overrun occurred"]
NOOVERRUN = 0,
#[doc = "1: Overrun occurred"]
OVERRUN = 1,
}
impl From<OVR_MST_A> for bool {
#[inline(always)]
fn from(variant: OVR_MST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `OVR_MST`"]
pub type OVR_MST_R = crate::R<bool, OVR_MST_A>;
impl OVR_MST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OVR_MST_A {
match self.bits {
false => OVR_MST_A::NOOVERRUN,
true => OVR_MST_A::OVERRUN,
}
}
#[doc = "Checks if the value of the field is `NOOVERRUN`"]
#[inline(always)]
pub fn is_no_overrun(&self) -> bool {
*self == OVR_MST_A::NOOVERRUN
}
#[doc = "Checks if the value of the field is `OVERRUN`"]
#[inline(always)]
pub fn is_overrun(&self) -> bool {
*self == OVR_MST_A::OVERRUN
}
}
#[doc = "JEOC_MST\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum JEOC_MST_A {
#[doc = "0: Injected conversion is not complete"]
NOTCOMPLETE = 0,
#[doc = "1: Injected conversion complete"]
COMPLETE = 1,
}
impl From<JEOC_MST_A> for bool {
#[inline(always)]
fn from(variant: JEOC_MST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `JEOC_MST`"]
pub type JEOC_MST_R = crate::R<bool, JEOC_MST_A>;
impl JEOC_MST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> JEOC_MST_A {
match self.bits {
false => JEOC_MST_A::NOTCOMPLETE,
true => JEOC_MST_A::COMPLETE,
}
}
#[doc = "Checks if the value of the field is `NOTCOMPLETE`"]
#[inline(always)]
pub fn is_not_complete(&self) -> bool {
*self == JEOC_MST_A::NOTCOMPLETE
}
#[doc = "Checks if the value of the field is `COMPLETE`"]
#[inline(always)]
pub fn is_complete(&self) -> bool {
*self == JEOC_MST_A::COMPLETE
}
}
#[doc = "JEOS_MST\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum JEOS_MST_A {
#[doc = "0: Injected sequence is not complete"]
NOTCOMPLETE = 0,
#[doc = "1: Injected sequence complete"]
COMPLETE = 1,
}
impl From<JEOS_MST_A> for bool {
#[inline(always)]
fn from(variant: JEOS_MST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `JEOS_MST`"]
pub type JEOS_MST_R = crate::R<bool, JEOS_MST_A>;
impl JEOS_MST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> JEOS_MST_A {
match self.bits {
false => JEOS_MST_A::NOTCOMPLETE,
true => JEOS_MST_A::COMPLETE,
}
}
#[doc = "Checks if the value of the field is `NOTCOMPLETE`"]
#[inline(always)]
pub fn is_not_complete(&self) -> bool {
*self == JEOS_MST_A::NOTCOMPLETE
}
#[doc = "Checks if the value of the field is `COMPLETE`"]
#[inline(always)]
pub fn is_complete(&self) -> bool {
*self == JEOS_MST_A::COMPLETE
}
}
#[doc = "AWD1_MST\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum AWD1_MST_A {
#[doc = "0: No analog watchdog event occurred"]
NOEVENT = 0,
#[doc = "1: Analog watchdog event occurred"]
EVENT = 1,
}
impl From<AWD1_MST_A> for bool {
#[inline(always)]
fn from(variant: AWD1_MST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `AWD1_MST`"]
pub type AWD1_MST_R = crate::R<bool, AWD1_MST_A>;
impl AWD1_MST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> AWD1_MST_A {
match self.bits {
false => AWD1_MST_A::NOEVENT,
true => AWD1_MST_A::EVENT,
}
}
#[doc = "Checks if the value of the field is `NOEVENT`"]
#[inline(always)]
pub fn is_no_event(&self) -> bool {
*self == AWD1_MST_A::NOEVENT
}
#[doc = "Checks if the value of the field is `EVENT`"]
#[inline(always)]
pub fn is_event(&self) -> bool {
*self == AWD1_MST_A::EVENT
}
}
#[doc = "AWD2_MST"]
pub type AWD2_MST_A = AWD1_MST_A;
#[doc = "Reader of field `AWD2_MST`"]
pub type AWD2_MST_R = crate::R<bool, AWD1_MST_A>;
#[doc = "AWD3_MST"]
pub type AWD3_MST_A = AWD1_MST_A;
#[doc = "Reader of field `AWD3_MST`"]
pub type AWD3_MST_R = crate::R<bool, AWD1_MST_A>;
#[doc = "JQOVF_MST\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum JQOVF_MST_A {
#[doc = "0: No injected context queue overflow has occurred"]
NOOVERFLOW = 0,
#[doc = "1: Injected context queue overflow has occurred"]
OVERFLOW = 1,
}
impl From<JQOVF_MST_A> for bool {
#[inline(always)]
fn from(variant: JQOVF_MST_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `JQOVF_MST`"]
pub type JQOVF_MST_R = crate::R<bool, JQOVF_MST_A>;
impl JQOVF_MST_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> JQOVF_MST_A {
match self.bits {
false => JQOVF_MST_A::NOOVERFLOW,
true => JQOVF_MST_A::OVERFLOW,
}
}
#[doc = "Checks if the value of the field is `NOOVERFLOW`"]
#[inline(always)]
pub fn is_no_overflow(&self) -> bool {
*self == JQOVF_MST_A::NOOVERFLOW
}
#[doc = "Checks if the value of the field is `OVERFLOW`"]
#[inline(always)]
pub fn is_overflow(&self) -> bool {
*self == JQOVF_MST_A::OVERFLOW
}
}
#[doc = "ADRDY_SLV\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADRDY_SLV_A {
#[doc = "0: ADC is not ready to start conversion"]
NOTREADY = 0,
#[doc = "1: ADC is ready to start conversion"]
READY = 1,
}
impl From<ADRDY_SLV_A> for bool {
#[inline(always)]
fn from(variant: ADRDY_SLV_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ADRDY_SLV`"]
pub type ADRDY_SLV_R = crate::R<bool, ADRDY_SLV_A>;
impl ADRDY_SLV_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADRDY_SLV_A {
match self.bits {
false => ADRDY_SLV_A::NOTREADY,
true => ADRDY_SLV_A::READY,
}
}
#[doc = "Checks if the value of the field is `NOTREADY`"]
#[inline(always)]
pub fn is_not_ready(&self) -> bool {
*self == ADRDY_SLV_A::NOTREADY
}
#[doc = "Checks if the value of the field is `READY`"]
#[inline(always)]
pub fn is_ready(&self) -> bool {
*self == ADRDY_SLV_A::READY
}
}
#[doc = "EOSMP_SLV"]
pub type EOSMP_SLV_A = EOSMP_MST_A;
#[doc = "Reader of field `EOSMP_SLV`"]
pub type EOSMP_SLV_R = crate::R<bool, EOSMP_MST_A>;
#[doc = "End of regular conversion of the slave ADC"]
pub type EOC_SLV_A = EOC_MST_A;
#[doc = "Reader of field `EOC_SLV`"]
pub type EOC_SLV_R = crate::R<bool, EOC_MST_A>;
#[doc = "End of regular sequence flag of the slave ADC"]
pub type EOS_SLV_A = EOS_MST_A;
#[doc = "Reader of field `EOS_SLV`"]
pub type EOS_SLV_R = crate::R<bool, EOS_MST_A>;
#[doc = "Overrun flag of the slave ADC"]
pub type OVR_SLV_A = OVR_MST_A;
#[doc = "Reader of field `OVR_SLV`"]
pub type OVR_SLV_R = crate::R<bool, OVR_MST_A>;
#[doc = "End of injected conversion flag of the slave ADC"]
pub type JEOC_SLV_A = JEOC_MST_A;
#[doc = "Reader of field `JEOC_SLV`"]
pub type JEOC_SLV_R = crate::R<bool, JEOC_MST_A>;
#[doc = "End of injected sequence flag of the slave ADC"]
pub type JEOS_SLV_A = JEOS_MST_A;
#[doc = "Reader of field `JEOS_SLV`"]
pub type JEOS_SLV_R = crate::R<bool, JEOS_MST_A>;
#[doc = "Analog watchdog 1 flag of the slave ADC"]
pub type AWD1_SLV_A = AWD1_MST_A;
#[doc = "Reader of field `AWD1_SLV`"]
pub type AWD1_SLV_R = crate::R<bool, AWD1_MST_A>;
#[doc = "Analog watchdog 2 flag of the slave ADC"]
pub type AWD2_SLV_A = AWD1_MST_A;
#[doc = "Reader of field `AWD2_SLV`"]
pub type AWD2_SLV_R = crate::R<bool, AWD1_MST_A>;
#[doc = "Analog watchdog 3 flag of the slave ADC"]
pub type AWD3_SLV_A = AWD1_MST_A;
#[doc = "Reader of field `AWD3_SLV`"]
pub type AWD3_SLV_R = crate::R<bool, AWD1_MST_A>;
#[doc = "Injected Context Queue Overflow flag of the slave ADC"]
pub type JQOVF_SLV_A = JQOVF_MST_A;
#[doc = "Reader of field `JQOVF_SLV`"]
pub type JQOVF_SLV_R = crate::R<bool, JQOVF_MST_A>;
impl R {
#[doc = "Bit 0 - ADDRDY_MST"]
#[inline(always)]
pub fn addrdy_mst(&self) -> ADDRDY_MST_R {
ADDRDY_MST_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - EOSMP_MST"]
#[inline(always)]
pub fn eosmp_mst(&self) -> EOSMP_MST_R {
EOSMP_MST_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - EOC_MST"]
#[inline(always)]
pub fn eoc_mst(&self) -> EOC_MST_R {
EOC_MST_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - EOS_MST"]
#[inline(always)]
pub fn eos_mst(&self) -> EOS_MST_R {
EOS_MST_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - OVR_MST"]
#[inline(always)]
pub fn ovr_mst(&self) -> OVR_MST_R {
OVR_MST_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - JEOC_MST"]
#[inline(always)]
pub fn jeoc_mst(&self) -> JEOC_MST_R {
JEOC_MST_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - JEOS_MST"]
#[inline(always)]
pub fn jeos_mst(&self) -> JEOS_MST_R {
JEOS_MST_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - AWD1_MST"]
#[inline(always)]
pub fn awd1_mst(&self) -> AWD1_MST_R {
AWD1_MST_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - AWD2_MST"]
#[inline(always)]
pub fn awd2_mst(&self) -> AWD2_MST_R {
AWD2_MST_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - AWD3_MST"]
#[inline(always)]
pub fn awd3_mst(&self) -> AWD3_MST_R {
AWD3_MST_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - JQOVF_MST"]
#[inline(always)]
pub fn jqovf_mst(&self) -> JQOVF_MST_R {
JQOVF_MST_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 16 - ADRDY_SLV"]
#[inline(always)]
pub fn adrdy_slv(&self) -> ADRDY_SLV_R {
ADRDY_SLV_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - EOSMP_SLV"]
#[inline(always)]
pub fn eosmp_slv(&self) -> EOSMP_SLV_R {
EOSMP_SLV_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - End of regular conversion of the slave ADC"]
#[inline(always)]
pub fn eoc_slv(&self) -> EOC_SLV_R {
EOC_SLV_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - End of regular sequence flag of the slave ADC"]
#[inline(always)]
pub fn eos_slv(&self) -> EOS_SLV_R {
EOS_SLV_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 20 - Overrun flag of the slave ADC"]
#[inline(always)]
pub fn ovr_slv(&self) -> OVR_SLV_R {
OVR_SLV_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 21 - End of injected conversion flag of the slave ADC"]
#[inline(always)]
pub fn jeoc_slv(&self) -> JEOC_SLV_R {
JEOC_SLV_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 22 - End of injected sequence flag of the slave ADC"]
#[inline(always)]
pub fn jeos_slv(&self) -> JEOS_SLV_R {
JEOS_SLV_R::new(((self.bits >> 22) & 0x01) != 0)
}
#[doc = "Bit 23 - Analog watchdog 1 flag of the slave ADC"]
#[inline(always)]
pub fn awd1_slv(&self) -> AWD1_SLV_R {
AWD1_SLV_R::new(((self.bits >> 23) & 0x01) != 0)
}
#[doc = "Bit 24 - Analog watchdog 2 flag of the slave ADC"]
#[inline(always)]
pub fn awd2_slv(&self) -> AWD2_SLV_R {
AWD2_SLV_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 25 - Analog watchdog 3 flag of the slave ADC"]
#[inline(always)]
pub fn awd3_slv(&self) -> AWD3_SLV_R {
AWD3_SLV_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 26 - Injected Context Queue Overflow flag of the slave ADC"]
#[inline(always)]
pub fn jqovf_slv(&self) -> JQOVF_SLV_R {
JQOVF_SLV_R::new(((self.bits >> 26) & 0x01) != 0)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.