text stringlengths 8 4.13M |
|---|
use crate::v0::support::{with_ipfs, InvalidPeerId, StringError};
use ipfs::{BitswapStats, Ipfs, IpfsTypes};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use warp::{query, reply, Filter, Rejection, Reply};
#[derive(Debug, Deserialize)]
pub struct WantlistQuery {
peer: Option<String>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "PascalCase")]
pub struct WantlistResponse {
keys: Vec<Value>,
}
async fn wantlist_query<T: IpfsTypes>(
ipfs: Ipfs<T>,
query: WantlistQuery,
) -> Result<impl Reply, Rejection> {
let peer_id = if let Some(peer_id) = query.peer {
let peer_id = peer_id.parse().map_err(|_| InvalidPeerId)?;
Some(peer_id)
} else {
None
};
let cids = ipfs
.bitswap_wantlist(peer_id)
.await
.map_err(StringError::from)?;
let keys = cids
.into_iter()
.map(|(cid, _)| json!({"/": cid.to_string()}))
.collect();
let response = WantlistResponse { keys };
Ok(reply::json(&response))
}
pub fn wantlist<T: IpfsTypes>(
ipfs: &Ipfs<T>,
) -> impl Filter<Extract = (impl Reply,), Error = Rejection> + Clone {
with_ipfs(ipfs)
.and(query::<WantlistQuery>())
.and_then(wantlist_query)
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "PascalCase")]
pub struct StatResponse {
blocks_received: u64,
blocks_sent: u64,
data_received: u64,
data_sent: u64,
dup_blks_received: u64,
dup_data_received: u64,
messages_received: u64,
peers: Vec<String>,
provide_buf_len: i32,
wantlist: Vec<Value>,
}
impl From<BitswapStats> for StatResponse {
fn from(stats: BitswapStats) -> Self {
let wantlist = stats
.wantlist
.into_iter()
.map(|(cid, _)| json!({"/": cid.to_string()}))
.collect();
let peers = stats
.peers
.into_iter()
.map(|peer_id| peer_id.to_string())
.collect();
Self {
blocks_received: stats.blocks_received,
blocks_sent: stats.blocks_sent,
data_received: stats.data_received,
data_sent: stats.data_sent,
dup_blks_received: stats.dup_blks_received,
dup_data_received: stats.dup_data_received,
peers,
wantlist,
messages_received: 0,
provide_buf_len: 0,
}
}
}
async fn stat_query<T: IpfsTypes>(ipfs: Ipfs<T>) -> Result<impl Reply, Rejection> {
let stats: StatResponse = ipfs
.bitswap_stats()
.await
.map_err(StringError::from)?
.into();
Ok(reply::json(&stats))
}
pub fn stat<T: IpfsTypes>(
ipfs: &Ipfs<T>,
) -> impl Filter<Extract = (impl Reply,), Error = Rejection> + Clone {
with_ipfs(ipfs).and_then(stat_query)
}
|
static EPSILON: f64 = f64::EPSILON;
pub fn safe_invert(v: f64) -> f64 {
1. / (v + EPSILON)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_safe_invert() {
assert_eq!(safe_invert(10.0), 0.1);
assert_eq!(safe_invert(0.0), 1. / EPSILON);
}
}
|
// Copyright 2019, The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{
crypt,
envelope::{DhtMessageFlags, DhtMessageHeader},
inbound::message::{DecryptedDhtMessage, DhtInboundMessage},
proto::envelope::OriginMac,
};
use derive_error::Error;
use futures::{task::Context, Future};
use log::*;
use prost::Message;
use std::{sync::Arc, task::Poll};
use tari_comms::{
message::EnvelopeBody,
peer_manager::NodeIdentity,
pipeline::PipelineError,
types::CommsPublicKey,
utils::signature,
};
use tari_utilities::ByteArray;
use tower::{layer::Layer, Service, ServiceExt};
const LOG_TARGET: &str = "comms::middleware::decryption";
#[derive(Error, Debug)]
enum DecryptionError {
/// Failed to validate origin MAC signature
OriginMacInvalidSignature,
/// Origin MAC contained an invalid public key
OriginMacInvalidPublicKey,
/// Origin MAC not provided for encrypted message
OriginMacNotProvided,
/// Failed to decrypt origin MAC
OriginMacDecryptedFailed,
/// Failed to decode clear-text origin MAC
OriginMacClearTextDecodeFailed,
/// Failed to decrypt message body
MessageBodyDecryptionFailed,
}
/// This layer is responsible for attempting to decrypt inbound messages.
pub struct DecryptionLayer {
node_identity: Arc<NodeIdentity>,
}
impl DecryptionLayer {
pub fn new(node_identity: Arc<NodeIdentity>) -> Self {
Self { node_identity }
}
}
impl<S> Layer<S> for DecryptionLayer {
type Service = DecryptionService<S>;
fn layer(&self, service: S) -> Self::Service {
DecryptionService::new(service, Arc::clone(&self.node_identity))
}
}
/// Responsible for decrypting InboundMessages and passing a DecryptedInboundMessage to the given service
#[derive(Clone)]
pub struct DecryptionService<S> {
node_identity: Arc<NodeIdentity>,
inner: S,
}
impl<S> DecryptionService<S> {
pub fn new(service: S, node_identity: Arc<NodeIdentity>) -> Self {
Self {
inner: service,
node_identity,
}
}
}
impl<S> Service<DhtInboundMessage> for DecryptionService<S>
where S: Service<DecryptedDhtMessage, Response = (), Error = PipelineError> + Clone
{
type Error = PipelineError;
type Response = ();
type Future = impl Future<Output = Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, msg: DhtInboundMessage) -> Self::Future {
Self::handle_message(self.inner.clone(), Arc::clone(&self.node_identity), msg)
}
}
impl<S> DecryptionService<S>
where S: Service<DecryptedDhtMessage, Response = (), Error = PipelineError>
{
async fn handle_message(
next_service: S,
node_identity: Arc<NodeIdentity>,
message: DhtInboundMessage,
) -> Result<(), PipelineError>
{
let dht_header = &message.dht_header;
if !dht_header.flags.contains(DhtMessageFlags::ENCRYPTED) {
return Self::success_not_encrypted(next_service, message).await;
}
let e_pk = dht_header
.ephemeral_public_key
.as_ref()
// TODO: #banheuristic - encrypted message sent without ephemeral public key
.ok_or("Ephemeral public key not provided for encrypted message")?;
let shared_secret = crypt::generate_ecdh_secret(node_identity.secret_key(), e_pk);
// Decrypt and verify the origin
let authenticated_origin = match Self::attempt_decrypt_origin_mac(&shared_secret, dht_header) {
Ok((public_key, signature)) => {
// If this fails, discard the message because we decrypted and deserialized the message with our shared
// ECDH secret but the message could not be authenticated
Self::authenticate_origin_mac(&public_key, &signature, &message.body)
.map_err(PipelineError::from_debug)?;
public_key
},
Err(err) => {
debug!(target: LOG_TARGET, "Unable to decrypt message origin: {}", err);
return Self::decryption_failed(next_service, &node_identity, message).await;
},
};
debug!(
target: LOG_TARGET,
"Attempting to decrypt message body from origin public key '{}'", authenticated_origin
);
match Self::attempt_decrypt_message_body(&shared_secret, &message.body) {
Ok(message_body) => {
debug!(target: LOG_TARGET, "Message successfully decrypted");
let msg = DecryptedDhtMessage::succeeded(message_body, Some(authenticated_origin), message);
next_service.oneshot(msg).await
},
Err(err) => {
debug!(target: LOG_TARGET, "Unable to decrypt message: {}", err);
Self::decryption_failed(next_service, &node_identity, message).await
},
}
}
fn attempt_decrypt_origin_mac(
shared_secret: &CommsPublicKey,
dht_header: &DhtMessageHeader,
) -> Result<(CommsPublicKey, Vec<u8>), DecryptionError>
{
let encrypted_origin_mac = Some(&dht_header.origin_mac)
.filter(|b| !b.is_empty())
// TODO: #banheuristic - this should not have been sent/propagated
.ok_or_else(|| DecryptionError::OriginMacNotProvided)?;
let decrypted_bytes = crypt::decrypt(shared_secret, encrypted_origin_mac)
.map_err(|_| DecryptionError::OriginMacDecryptedFailed)?;
let origin_mac =
OriginMac::decode(decrypted_bytes.as_slice()).map_err(|_| DecryptionError::OriginMacDecryptedFailed)?;
// Check the public key here, because it is possible (rare but possible) for an failed decrypted message to pass
// protobuf decoding of the relatively simple OriginMac struct but with invalid data
let public_key = CommsPublicKey::from_bytes(&origin_mac.public_key)
.map_err(|_| DecryptionError::OriginMacInvalidPublicKey)?;
Ok((public_key, origin_mac.signature))
}
fn authenticate_origin_mac(
public_key: &CommsPublicKey,
signature: &[u8],
body: &[u8],
) -> Result<(), DecryptionError>
{
if signature::verify(public_key, signature, body).unwrap_or(false) {
Ok(())
} else {
Err(DecryptionError::OriginMacInvalidSignature)
}
}
fn attempt_decrypt_message_body(
shared_secret: &CommsPublicKey,
message_body: &[u8],
) -> Result<EnvelopeBody, DecryptionError>
{
let decrypted =
crypt::decrypt(shared_secret, message_body).map_err(|_| DecryptionError::MessageBodyDecryptionFailed)?;
// Deserialization into an EnvelopeBody is done here to determine if the
// decryption produced valid bytes or not.
EnvelopeBody::decode(decrypted.as_slice())
.and_then(|body| {
// Check if we received a body length of zero
//
// In addition to a peer sending a zero-length EnvelopeBody, decoding can erroneously succeed
// if the decrypted bytes happen to be valid protobuf encoding. This is very possible and
// the decrypt_inbound_fail test below _will_ sporadically fail without the following check.
// This is because proto3 will set fields to their default value if they don't exist in a valid
// encoding.
//
// For the parts of EnvelopeBody to be erroneously populated with bytes, all of these
// conditions would have to be true:
// 1. field type == 2 (length-delimited)
// 2. field number == 1
// 3. the subsequent byte(s) would have to be varint-encoded length which does not overflow
// 4. the rest of the bytes would have to be valid protobuf encoding
//
// The chance of this happening is extremely negligible.
if body.is_empty() {
return Err(prost::DecodeError::new("EnvelopeBody has no parts"));
}
Ok(body)
})
.map_err(|_| DecryptionError::MessageBodyDecryptionFailed)
}
async fn success_not_encrypted(next_service: S, message: DhtInboundMessage) -> Result<(), PipelineError> {
let authenticated_pk = if message.dht_header.origin_mac.is_empty() {
None
} else {
let origin_mac = OriginMac::decode(message.dht_header.origin_mac.as_slice())
.map_err(|_| PipelineError::from_debug(DecryptionError::OriginMacClearTextDecodeFailed))?;
let public_key = CommsPublicKey::from_bytes(&origin_mac.public_key)
.map_err(|_| PipelineError::from_debug(DecryptionError::OriginMacInvalidPublicKey))?;
Self::authenticate_origin_mac(&public_key, &origin_mac.signature, &message.body)
.map_err(PipelineError::from_debug)?;
Some(public_key)
};
match EnvelopeBody::decode(message.body.as_slice()) {
Ok(deserialized) => {
debug!(
target: LOG_TARGET,
"Message is not encrypted. Passing onto next service"
);
let msg = DecryptedDhtMessage::succeeded(deserialized, authenticated_pk, message);
next_service.oneshot(msg).await
},
Err(err) => {
// Message was not encrypted but failed to deserialize - immediately discard
// TODO: Bad node behaviour?
debug!(
target: LOG_TARGET,
"Unable to deserialize message: {}. Message will be discarded.", err
);
Ok(())
},
}
}
async fn decryption_failed(
next_service: S,
node_identity: &NodeIdentity,
message: DhtInboundMessage,
) -> Result<(), PipelineError>
{
if message.dht_header.destination == node_identity.node_id() ||
message.dht_header.destination == node_identity.public_key()
{
// TODO: #banheuristic - the origin of this message sent this node a message we could not decrypt
warn!(
target: LOG_TARGET,
"Received message from peer '{}' that is destined for this node that could not be decrypted. \
Discarding message",
message.source_peer.node_id
);
return Err(
"Message rejected because this node could not decrypt a message that was addressed to it".into(),
);
}
let msg = DecryptedDhtMessage::failed(message);
next_service.oneshot(msg).await
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::{
envelope::DhtMessageFlags,
test_utils::{make_dht_inbound_message, make_node_identity, service_fn},
};
use futures::{executor::block_on, future};
use std::sync::Mutex;
use tari_comms::{message::MessageExt, wrap_in_envelope_body};
use tari_test_utils::counter_context;
#[test]
fn poll_ready() {
let inner = service_fn(|_: DecryptedDhtMessage| future::ready(Result::<(), PipelineError>::Ok(())));
let node_identity = make_node_identity();
let mut service = DecryptionService::new(inner, node_identity);
counter_context!(cx, counter);
assert!(service.poll_ready(&mut cx).is_ready());
assert_eq!(counter.get(), 0);
}
#[test]
fn decrypt_inbound_success() {
let result = Mutex::new(None);
let inner = service_fn(|msg: DecryptedDhtMessage| {
*result.lock().unwrap() = Some(msg);
future::ready(Result::<(), PipelineError>::Ok(()))
});
let node_identity = make_node_identity();
let mut service = DecryptionService::new(inner, Arc::clone(&node_identity));
let plain_text_msg = wrap_in_envelope_body!(b"Secret plans".to_vec());
let inbound_msg = make_dht_inbound_message(
&node_identity,
plain_text_msg.to_encoded_bytes(),
DhtMessageFlags::ENCRYPTED,
true,
);
block_on(service.call(inbound_msg)).unwrap();
let decrypted = result.lock().unwrap().take().unwrap();
assert_eq!(decrypted.decryption_succeeded(), true);
assert_eq!(decrypted.decryption_result.unwrap(), plain_text_msg);
}
#[test]
fn decrypt_inbound_fail() {
let result = Mutex::new(None);
let inner = service_fn(|msg: DecryptedDhtMessage| {
*result.lock().unwrap() = Some(msg);
future::ready(Result::<(), PipelineError>::Ok(()))
});
let node_identity = make_node_identity();
let mut service = DecryptionService::new(inner, Arc::clone(&node_identity));
let some_secret = "Super secret message".as_bytes().to_vec();
let some_other_node_identity = make_node_identity();
let inbound_msg =
make_dht_inbound_message(&some_other_node_identity, some_secret, DhtMessageFlags::ENCRYPTED, true);
block_on(service.call(inbound_msg.clone())).unwrap();
let decrypted = result.lock().unwrap().take().unwrap();
assert_eq!(decrypted.decryption_succeeded(), false);
assert_eq!(decrypted.decryption_result.unwrap_err(), inbound_msg.body);
}
#[test]
fn decrypt_inbound_fail_destination() {
let result = Mutex::new(None);
let inner = service_fn(|msg: DecryptedDhtMessage| {
*result.lock().unwrap() = Some(msg);
future::ready(Result::<(), PipelineError>::Ok(()))
});
let node_identity = make_node_identity();
let mut service = DecryptionService::new(inner, Arc::clone(&node_identity));
let nonsense = "Cannot Decrypt this".as_bytes().to_vec();
let mut inbound_msg =
make_dht_inbound_message(&node_identity, nonsense.clone(), DhtMessageFlags::ENCRYPTED, true);
inbound_msg.dht_header.destination = node_identity.public_key().clone().into();
let err = block_on(service.call(inbound_msg)).unwrap_err();
assert!(err.to_string().starts_with("Message rejected"),);
assert!(result.lock().unwrap().is_none());
}
}
|
#[doc = "Reader of register CTL"]
pub type R = crate::R<u32, super::CTL>;
#[doc = "Writer for register CTL"]
pub type W = crate::W<u32, super::CTL>;
#[doc = "Register CTL `reset()`'s with value 0"]
impl crate::ResetValue for super::CTL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `TAEN`"]
pub type TAEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TAEN`"]
pub struct TAEN_W<'a> {
w: &'a mut W,
}
impl<'a> TAEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `TASTALL`"]
pub type TASTALL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TASTALL`"]
pub struct TASTALL_W<'a> {
w: &'a mut W,
}
impl<'a> TASTALL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "GPTM Timer A Event Mode\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum TAEVENT_A {
#[doc = "0: Positive edge"]
POS = 0,
#[doc = "1: Negative edge"]
NEG = 1,
#[doc = "3: Both edges"]
BOTH = 3,
}
impl From<TAEVENT_A> for u8 {
#[inline(always)]
fn from(variant: TAEVENT_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `TAEVENT`"]
pub type TAEVENT_R = crate::R<u8, TAEVENT_A>;
impl TAEVENT_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, TAEVENT_A> {
use crate::Variant::*;
match self.bits {
0 => Val(TAEVENT_A::POS),
1 => Val(TAEVENT_A::NEG),
3 => Val(TAEVENT_A::BOTH),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `POS`"]
#[inline(always)]
pub fn is_pos(&self) -> bool {
*self == TAEVENT_A::POS
}
#[doc = "Checks if the value of the field is `NEG`"]
#[inline(always)]
pub fn is_neg(&self) -> bool {
*self == TAEVENT_A::NEG
}
#[doc = "Checks if the value of the field is `BOTH`"]
#[inline(always)]
pub fn is_both(&self) -> bool {
*self == TAEVENT_A::BOTH
}
}
#[doc = "Write proxy for field `TAEVENT`"]
pub struct TAEVENT_W<'a> {
w: &'a mut W,
}
impl<'a> TAEVENT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TAEVENT_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "Positive edge"]
#[inline(always)]
pub fn pos(self) -> &'a mut W {
self.variant(TAEVENT_A::POS)
}
#[doc = "Negative edge"]
#[inline(always)]
pub fn neg(self) -> &'a mut W {
self.variant(TAEVENT_A::NEG)
}
#[doc = "Both edges"]
#[inline(always)]
pub fn both(self) -> &'a mut W {
self.variant(TAEVENT_A::BOTH)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 2)) | (((value as u32) & 0x03) << 2);
self.w
}
}
#[doc = "Reader of field `RTCEN`"]
pub type RTCEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RTCEN`"]
pub struct RTCEN_W<'a> {
w: &'a mut W,
}
impl<'a> RTCEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `TAOTE`"]
pub type TAOTE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TAOTE`"]
pub struct TAOTE_W<'a> {
w: &'a mut W,
}
impl<'a> TAOTE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `TAPWML`"]
pub type TAPWML_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TAPWML`"]
pub struct TAPWML_W<'a> {
w: &'a mut W,
}
impl<'a> TAPWML_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `TBEN`"]
pub type TBEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TBEN`"]
pub struct TBEN_W<'a> {
w: &'a mut W,
}
impl<'a> TBEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `TBSTALL`"]
pub type TBSTALL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TBSTALL`"]
pub struct TBSTALL_W<'a> {
w: &'a mut W,
}
impl<'a> TBSTALL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "GPTM Timer B Event Mode\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum TBEVENT_A {
#[doc = "0: Positive edge"]
POS = 0,
#[doc = "1: Negative edge"]
NEG = 1,
#[doc = "3: Both edges"]
BOTH = 3,
}
impl From<TBEVENT_A> for u8 {
#[inline(always)]
fn from(variant: TBEVENT_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `TBEVENT`"]
pub type TBEVENT_R = crate::R<u8, TBEVENT_A>;
impl TBEVENT_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, TBEVENT_A> {
use crate::Variant::*;
match self.bits {
0 => Val(TBEVENT_A::POS),
1 => Val(TBEVENT_A::NEG),
3 => Val(TBEVENT_A::BOTH),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `POS`"]
#[inline(always)]
pub fn is_pos(&self) -> bool {
*self == TBEVENT_A::POS
}
#[doc = "Checks if the value of the field is `NEG`"]
#[inline(always)]
pub fn is_neg(&self) -> bool {
*self == TBEVENT_A::NEG
}
#[doc = "Checks if the value of the field is `BOTH`"]
#[inline(always)]
pub fn is_both(&self) -> bool {
*self == TBEVENT_A::BOTH
}
}
#[doc = "Write proxy for field `TBEVENT`"]
pub struct TBEVENT_W<'a> {
w: &'a mut W,
}
impl<'a> TBEVENT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TBEVENT_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "Positive edge"]
#[inline(always)]
pub fn pos(self) -> &'a mut W {
self.variant(TBEVENT_A::POS)
}
#[doc = "Negative edge"]
#[inline(always)]
pub fn neg(self) -> &'a mut W {
self.variant(TBEVENT_A::NEG)
}
#[doc = "Both edges"]
#[inline(always)]
pub fn both(self) -> &'a mut W {
self.variant(TBEVENT_A::BOTH)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 10)) | (((value as u32) & 0x03) << 10);
self.w
}
}
#[doc = "Reader of field `TBOTE`"]
pub type TBOTE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TBOTE`"]
pub struct TBOTE_W<'a> {
w: &'a mut W,
}
impl<'a> TBOTE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Reader of field `TBPWML`"]
pub type TBPWML_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TBPWML`"]
pub struct TBPWML_W<'a> {
w: &'a mut W,
}
impl<'a> TBPWML_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
impl R {
#[doc = "Bit 0 - GPTM Timer A Enable"]
#[inline(always)]
pub fn taen(&self) -> TAEN_R {
TAEN_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - GPTM Timer A Stall Enable"]
#[inline(always)]
pub fn tastall(&self) -> TASTALL_R {
TASTALL_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bits 2:3 - GPTM Timer A Event Mode"]
#[inline(always)]
pub fn taevent(&self) -> TAEVENT_R {
TAEVENT_R::new(((self.bits >> 2) & 0x03) as u8)
}
#[doc = "Bit 4 - GPTM RTC Stall Enable"]
#[inline(always)]
pub fn rtcen(&self) -> RTCEN_R {
RTCEN_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - GPTM Timer A Output Trigger Enable"]
#[inline(always)]
pub fn taote(&self) -> TAOTE_R {
TAOTE_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - GPTM Timer A PWM Output Level"]
#[inline(always)]
pub fn tapwml(&self) -> TAPWML_R {
TAPWML_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 8 - GPTM Timer B Enable"]
#[inline(always)]
pub fn tben(&self) -> TBEN_R {
TBEN_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - GPTM Timer B Stall Enable"]
#[inline(always)]
pub fn tbstall(&self) -> TBSTALL_R {
TBSTALL_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bits 10:11 - GPTM Timer B Event Mode"]
#[inline(always)]
pub fn tbevent(&self) -> TBEVENT_R {
TBEVENT_R::new(((self.bits >> 10) & 0x03) as u8)
}
#[doc = "Bit 13 - GPTM Timer B Output Trigger Enable"]
#[inline(always)]
pub fn tbote(&self) -> TBOTE_R {
TBOTE_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - GPTM Timer B PWM Output Level"]
#[inline(always)]
pub fn tbpwml(&self) -> TBPWML_R {
TBPWML_R::new(((self.bits >> 14) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - GPTM Timer A Enable"]
#[inline(always)]
pub fn taen(&mut self) -> TAEN_W {
TAEN_W { w: self }
}
#[doc = "Bit 1 - GPTM Timer A Stall Enable"]
#[inline(always)]
pub fn tastall(&mut self) -> TASTALL_W {
TASTALL_W { w: self }
}
#[doc = "Bits 2:3 - GPTM Timer A Event Mode"]
#[inline(always)]
pub fn taevent(&mut self) -> TAEVENT_W {
TAEVENT_W { w: self }
}
#[doc = "Bit 4 - GPTM RTC Stall Enable"]
#[inline(always)]
pub fn rtcen(&mut self) -> RTCEN_W {
RTCEN_W { w: self }
}
#[doc = "Bit 5 - GPTM Timer A Output Trigger Enable"]
#[inline(always)]
pub fn taote(&mut self) -> TAOTE_W {
TAOTE_W { w: self }
}
#[doc = "Bit 6 - GPTM Timer A PWM Output Level"]
#[inline(always)]
pub fn tapwml(&mut self) -> TAPWML_W {
TAPWML_W { w: self }
}
#[doc = "Bit 8 - GPTM Timer B Enable"]
#[inline(always)]
pub fn tben(&mut self) -> TBEN_W {
TBEN_W { w: self }
}
#[doc = "Bit 9 - GPTM Timer B Stall Enable"]
#[inline(always)]
pub fn tbstall(&mut self) -> TBSTALL_W {
TBSTALL_W { w: self }
}
#[doc = "Bits 10:11 - GPTM Timer B Event Mode"]
#[inline(always)]
pub fn tbevent(&mut self) -> TBEVENT_W {
TBEVENT_W { w: self }
}
#[doc = "Bit 13 - GPTM Timer B Output Trigger Enable"]
#[inline(always)]
pub fn tbote(&mut self) -> TBOTE_W {
TBOTE_W { w: self }
}
#[doc = "Bit 14 - GPTM Timer B PWM Output Level"]
#[inline(always)]
pub fn tbpwml(&mut self) -> TBPWML_W {
TBPWML_W { w: self }
}
}
|
pub mod mercury;
pub mod venus;
pub mod earth;
pub mod mars;
pub mod jupiter;
pub mod saturn;
pub mod uranus;
pub mod neptune;
|
// Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::alloc::Layout;
use std::borrow::BorrowMut;
use std::sync::Arc;
use std::vec;
use bumpalo::Bump;
use common_exception::ErrorCode;
use common_exception::Result;
use common_expression::types::DataType;
use common_expression::BlockEntry;
use common_expression::ColumnBuilder;
use common_expression::DataBlock;
use common_expression::Scalar;
use common_expression::Value;
use common_functions::aggregates::AggregateFunctionRef;
use common_functions::aggregates::StateAddr;
use common_pipeline_core::processors::port::InputPort;
use common_pipeline_core::processors::port::OutputPort;
use common_pipeline_core::processors::Processor;
use common_pipeline_transforms::processors::transforms::transform_accumulating::AccumulatingTransform;
use common_pipeline_transforms::processors::transforms::transform_accumulating::AccumulatingTransformer;
use crate::pipelines::processors::AggregatorParams;
/// SELECT COUNT | SUM FROM table;
pub struct PartialSingleStateAggregator {
#[allow(dead_code)]
arena: Bump,
places: Vec<StateAddr>,
arg_indices: Vec<Vec<usize>>,
funcs: Vec<AggregateFunctionRef>,
}
impl PartialSingleStateAggregator {
pub fn try_create(
input: Arc<InputPort>,
output: Arc<OutputPort>,
params: &Arc<AggregatorParams>,
) -> Result<Box<dyn Processor>> {
assert!(!params.offsets_aggregate_states.is_empty());
let arena = Bump::new();
let layout = params
.layout
.ok_or_else(|| ErrorCode::LayoutError("layout shouldn't be None"))?;
let place: StateAddr = arena.alloc_layout(layout).into();
let mut places = Vec::with_capacity(params.offsets_aggregate_states.len());
for (idx, func) in params.aggregate_functions.iter().enumerate() {
let arg_place = place.next(params.offsets_aggregate_states[idx]);
func.init_state(arg_place);
places.push(arg_place);
}
Ok(AccumulatingTransformer::create(
input,
output,
PartialSingleStateAggregator {
arena,
places,
funcs: params.aggregate_functions.clone(),
arg_indices: params.aggregate_functions_arguments.clone(),
},
))
}
}
impl AccumulatingTransform for PartialSingleStateAggregator {
const NAME: &'static str = "AggregatorPartialTransform";
fn transform(&mut self, block: DataBlock) -> Result<Vec<DataBlock>> {
let block = block.convert_to_full();
for (idx, func) in self.funcs.iter().enumerate() {
let mut arg_columns = vec![];
for index in self.arg_indices[idx].iter() {
arg_columns.push(
block
.get_by_offset(*index)
.value
.as_column()
.unwrap()
.clone(),
);
}
let place = self.places[idx];
func.accumulate(place, &arg_columns, None, block.num_rows())?;
}
Ok(vec![])
}
fn on_finish(&mut self, generate_data: bool) -> Result<Vec<DataBlock>> {
let mut generate_data_block = vec![];
if generate_data {
let mut columns = Vec::with_capacity(self.funcs.len());
for (idx, func) in self.funcs.iter().enumerate() {
let place = self.places[idx];
let mut data = Vec::with_capacity(4);
func.serialize(place, &mut data)?;
columns.push(BlockEntry {
data_type: DataType::String,
value: Value::Scalar(Scalar::String(data)),
});
}
generate_data_block = vec![DataBlock::new(columns, 1)];
}
// destroy states
for (place, func) in self.places.iter().zip(self.funcs.iter()) {
if func.need_manual_drop_state() {
unsafe { func.drop_state(*place) }
}
}
Ok(generate_data_block)
}
}
/// SELECT COUNT | SUM FROM table;
pub struct FinalSingleStateAggregator {
arena: Bump,
layout: Layout,
to_merge_places: Vec<Vec<StateAddr>>,
funcs: Vec<AggregateFunctionRef>,
offsets_aggregate_states: Vec<usize>,
}
impl FinalSingleStateAggregator {
pub fn try_create(
input: Arc<InputPort>,
output: Arc<OutputPort>,
params: &Arc<AggregatorParams>,
) -> Result<Box<dyn Processor>> {
assert!(!params.offsets_aggregate_states.is_empty());
let arena = Bump::new();
let layout = params
.layout
.ok_or_else(|| ErrorCode::LayoutError("layout shouldn't be None"))?;
Ok(AccumulatingTransformer::create(
input,
output,
FinalSingleStateAggregator {
arena,
layout,
funcs: params.aggregate_functions.clone(),
to_merge_places: vec![vec![]; params.aggregate_functions.len()],
offsets_aggregate_states: params.offsets_aggregate_states.clone(),
},
))
}
fn new_places(&self) -> Vec<StateAddr> {
let place: StateAddr = self.arena.alloc_layout(self.layout).into();
self.funcs
.iter()
.enumerate()
.map(|(idx, func)| {
let arg_place = place.next(self.offsets_aggregate_states[idx]);
func.init_state(arg_place);
arg_place
})
.collect()
}
}
impl AccumulatingTransform for FinalSingleStateAggregator {
const NAME: &'static str = "AggregatorFinalTransform";
fn transform(&mut self, block: DataBlock) -> Result<Vec<DataBlock>> {
if !block.is_empty() {
let block = block.convert_to_full();
let places = self.new_places();
for (index, func) in self.funcs.iter().enumerate() {
let binary_array = block.get_by_offset(index).value.as_column().unwrap();
let binary_array = binary_array.as_string().ok_or_else(|| {
ErrorCode::IllegalDataType("binary array should be string type")
})?;
let mut data = unsafe { binary_array.index_unchecked(0) };
func.deserialize(places[index], &mut data)?;
self.to_merge_places[index].push(places[index]);
}
}
Ok(vec![])
}
fn on_finish(&mut self, generate_data: bool) -> Result<Vec<DataBlock>> {
let mut generate_data_block = vec![];
if generate_data {
let mut aggr_values = {
let mut builders = vec![];
for func in &self.funcs {
let data_type = func.return_type()?;
builders.push(ColumnBuilder::with_capacity(&data_type, 1));
}
builders
};
let main_places = self.new_places();
for (index, func) in self.funcs.iter().enumerate() {
let main_place = main_places[index];
for place in self.to_merge_places[index].iter() {
func.merge(main_place, *place)?;
}
let array = aggr_values[index].borrow_mut();
func.merge_result(main_place, array)?;
}
let mut columns = Vec::with_capacity(self.funcs.len());
for builder in aggr_values {
columns.push(builder.build());
}
// destroy states
for (place, func) in main_places.iter().zip(self.funcs.iter()) {
if func.need_manual_drop_state() {
unsafe { func.drop_state(*place) }
}
}
generate_data_block = vec![DataBlock::new_from_columns(columns)];
}
for (places, func) in self.to_merge_places.iter().zip(self.funcs.iter()) {
if func.need_manual_drop_state() {
for place in places {
unsafe { func.drop_state(*place) }
}
}
}
Ok(generate_data_block)
}
}
|
use crate::server::LSPServer;
use crate::sources::LSPSupport;
use log::info;
use ropey::Rope;
use std::process::{Command, Stdio};
use tower_lsp::lsp_types::*;
impl LSPServer {
pub fn formatting(&self, params: DocumentFormattingParams) -> Option<Vec<TextEdit>> {
let uri = params.text_document.uri;
info!("formatting {}", &uri);
let file_id = self.srcs.get_id(&uri).to_owned();
self.srcs.wait_parse_ready(file_id, false);
let file = self.srcs.get_file(file_id)?;
let file = file.read().ok()?;
let conf = self.conf.read().unwrap();
if conf.verible.format.enabled {
Some(vec![TextEdit::new(
Range::new(
file.text.char_to_pos(0),
file.text.char_to_pos(file.text.len_chars()),
),
format_document(
&file.text,
None,
&conf.verible.format.path,
&conf.verible.format.args,
)?,
)])
} else {
None
}
}
pub fn range_formatting(&self, params: DocumentRangeFormattingParams) -> Option<Vec<TextEdit>> {
let uri = params.text_document.uri;
info!("range formatting {}", &uri);
let file_id = self.srcs.get_id(&uri).to_owned();
self.srcs.wait_parse_ready(file_id, false);
let file = self.srcs.get_file(file_id)?;
let file = file.read().ok()?;
let conf = self.conf.read().unwrap();
if conf.verible.format.enabled {
Some(vec![TextEdit::new(
file.text.char_range_to_range(0..file.text.len_chars()),
format_document(
&file.text,
Some(params.range),
&conf.verible.format.path,
&conf.verible.format.args,
)?,
)])
} else {
None
}
}
}
/// format the document using verible-verilog-format
pub fn format_document(
rope: &Rope,
range: Option<Range>,
verible_format_path: &str,
verible_format_args: &[String],
) -> Option<String> {
let mut child = Command::new(verible_format_path);
child
.stdin(Stdio::piped())
.stderr(Stdio::piped())
.stdout(Stdio::piped())
.args(verible_format_args);
// rangeFormatting
if let Some(r) = range {
child
.arg("--lines")
.arg(format!("{}-{}", r.start.line + 1, r.end.line + 1));
}
let mut child = child.arg("-").spawn().ok()?;
// write file to stdin, read output from stdout
rope.write_to(child.stdin.as_mut()?).ok()?;
let output = child.wait_with_output().ok()?;
if output.status.success() {
info!("formatting succeeded");
let raw_output = String::from_utf8(output.stdout).ok()?;
Some(raw_output)
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::ProjectConfig;
use crate::support::test_init;
use which::which;
#[test]
fn test_formatting() {
test_init();
let text = r#"
module test;
logic a;
logic b;
endmodule"#;
let text_fixed = r#"
module test;
logic a;
logic b;
endmodule
"#;
let doc = Rope::from_str(&text);
if which("verible-verilog-format").is_ok() {
assert_eq!(
format_document(
&doc,
None,
&ProjectConfig::default().verible.format.path,
&[]
)
.unwrap(),
text_fixed.to_string()
);
}
}
#[test]
fn test_range_formatting() {
test_init();
let text = r#"module t1;
logic a;
logic b;
logic c;
endmodule
module t2;
logic a;
logic b;
logic c;
endmodule"#;
let text_fixed = r#"module t1;
logic a;
logic b;
logic c;
endmodule
module t2;
logic a;
logic b;
logic c;
endmodule
"#;
let doc = Rope::from_str(&text);
if which("verible-verilog-format").is_ok() {
assert_eq!(
format_document(
&doc,
Some(Range::new(Position::new(0, 0), Position::new(4, 9))),
&ProjectConfig::default().verible.format.path,
&[]
)
.unwrap(),
text_fixed.to_string()
);
}
}
}
|
/**
A somewhat reduced test case to expose some Valgrind issues.
This originally came from the word-count benchmark.
*/
use std;
import std::io;
import option = std::option::t;
import std::option::some;
import std::option::none;
import std::str;
import std::vec;
import std::map;
fn map(filename: str, emit: map_reduce::putter) { emit(filename, "1"); }
mod map_reduce {
export putter;
export mapper;
export map_reduce;
type putter = fn(str, str) ;
type mapper = fn(str, putter) ;
tag ctrl_proto { find_reducer(str, chan[int]); mapper_done; }
fn start_mappers(ctrl: chan[ctrl_proto], inputs: vec[str]) {
for i: str in inputs { spawn map_task(ctrl, i); }
}
fn map_task(ctrl: chan[ctrl_proto], input: str) {
let intermediates = map::new_str_hash();
fn emit(im: &map::hashmap[str, int], ctrl: chan[ctrl_proto], key: str,
val: str) {
let c;
alt im.find(key) {
some(_c) { c = _c }
none. {
let p = port();
log_err "sending find_reducer";
ctrl <| find_reducer(key, chan(p));
log_err "receiving";
p |> c;
log_err c;
im.insert(key, c);
}
}
}
map(input, bind emit(intermediates, ctrl, _, _));
ctrl <| mapper_done;
}
fn map_reduce(inputs: vec[str]) {
let ctrl = port[ctrl_proto]();
// This task becomes the master control task. It spawns others
// to do the rest.
let reducers: map::hashmap[str, int];
reducers = map::new_str_hash();
start_mappers(chan(ctrl), inputs);
let num_mappers = vec::len(inputs) as int;
while num_mappers > 0 {
let m;
ctrl |> m;
alt m {
mapper_done. { num_mappers -= 1; }
find_reducer(k, cc) {
let c;
alt reducers.find(k) { some(_c) { c = _c; } none. { c = 0; } }
cc <| c;
}
}
}
}
}
fn main() {
map_reduce::map_reduce(["../src/test/run-pass/hashmap-memory.rs"]);
} |
pub mod get_message_timestamp_field;
pub mod get_message_type;
|
use serde::Deserialize;
#[derive(Deserialize, Debug)]
pub struct FlightIdentificationNumberRaw {
pub default: Option<String>,
pub alternative: Option<String>, // Guess
} |
use std::collections::BinaryHeap;
#[test]
fn binary_peek_pop_test() {
//Binary heap organizes the greatest values up to the front of the queue
let mut heap = BinaryHeap::from(vec![2, 3, 8, 6, 9, 5, 4]);
//Retrieves a Option<&T> reference without removing
assert_eq!(heap.peek(), Some(&9));
//Pops the element in Option<T> format
assert_eq!(heap.pop(), Some(9));
assert_eq!(heap.pop(), Some(8));
assert_eq!(heap.pop(), Some(6));
}
#[test]
fn binary_pop_with_let_test() {
let mut results = vec![];
let mut heap = BinaryHeap::new();
heap.push("c");
heap.push("a");
heap.push("r");
while let Some(value) = heap.pop() {
results.push(value)
}
assert_eq!(results, vec!["r", "c", "a"])
}
|
//! Expressions
//!
//! See: [6.4 Expressions](http://erlang.org/doc/apps/erts/absform.html#id87350)
use super::*;
pub type LocalCall = common::LocalCall<Expression>;
pub type RemoteCall = common::RemoteCall<Expression>;
pub type Match = common::Match<Pattern, Expression>;
pub type Tuple = common::Tuple<Expression>;
pub type Cons = common::Cons<Expression>;
pub type Binary = common::Binary<Expression>;
pub type UnaryOp = common::UnaryOp<Expression>;
pub type BinaryOp = common::BinaryOp<Expression>;
pub type Record = common::Record<Expression>;
pub type RecordIndex = common::RecordIndex<Expression>;
pub type Map = common::Map<Expression>;
#[derive(Debug, Clone)]
pub enum Expression {
Integer(Box<Integer>),
Float(Box<Float>),
String(Box<Str>),
Char(Box<Char>),
Atom(Box<Atom>),
Match(Box<Match>),
Var(Box<Var>),
Tuple(Box<Tuple>),
Nil(Box<Nil>),
Cons(Box<Cons>),
Binary(Binary),
UnaryOp(Box<UnaryOp>),
BinaryOp(Box<BinaryOp>),
Record(Box<Record>),
RecordIndex(Box<RecordIndex>),
Map(Box<Map>),
Catch(Box<Catch>),
LocalCall(Box<LocalCall>),
RemoteCall(Box<RemoteCall>),
Comprehension(Box<Comprehension>),
Block(Box<Block>),
If(Box<If>),
Case(Box<Case>),
Try(Box<Try>),
Receive(Box<Receive>),
InternalFun(Box<InternalFun>),
ExternalFun(Box<ExternalFun>),
AnonymousFun(Box<AnonymousFun>),
}
impl_from!(Expression::Integer(Integer));
impl_from!(Expression::Float(Float));
impl_from!(Expression::String(Str));
impl_from!(Expression::Char(Char));
impl_from!(Expression::Atom(Atom));
impl_from!(Expression::Match(Match));
impl_from!(Expression::Var(Var));
impl_from!(Expression::Tuple(Tuple));
impl_from!(Expression::Nil(Nil));
impl_from!(Expression::Cons(Cons));
impl_from!(Expression::Binary(Binary));
impl_from!(Expression::UnaryOp(UnaryOp));
impl_from!(Expression::BinaryOp(BinaryOp));
impl_from!(Expression::Record(Record));
impl_from!(Expression::RecordIndex(RecordIndex));
impl_from!(Expression::Map(Map));
impl_from!(Expression::Catch(Catch));
impl_from!(Expression::LocalCall(LocalCall));
impl_from!(Expression::RemoteCall(RemoteCall));
impl_from!(Expression::Comprehension(Comprehension));
impl_from!(Expression::Block(Block));
impl_from!(Expression::If(If));
impl_from!(Expression::Case(Case));
impl_from!(Expression::Try(Try));
impl_from!(Expression::Receive(Receive));
impl_from!(Expression::InternalFun(InternalFun));
impl_from!(Expression::ExternalFun(ExternalFun));
impl_from!(Expression::AnonymousFun(AnonymousFun));
impl Node for Expression {
fn line(&self) -> LineNum {
match *self {
Self::Integer(ref x) => x.line(),
Self::Float(ref x) => x.line(),
Self::String(ref x) => x.line(),
Self::Char(ref x) => x.line(),
Self::Atom(ref x) => x.line(),
Self::Match(ref x) => x.line(),
Self::Var(ref x) => x.line(),
Self::Tuple(ref x) => x.line(),
Self::Nil(ref x) => x.line(),
Self::Cons(ref x) => x.line(),
Self::Binary(ref x) => x.line(),
Self::UnaryOp(ref x) => x.line(),
Self::BinaryOp(ref x) => x.line(),
Self::Record(ref x) => x.line(),
Self::RecordIndex(ref x) => x.line(),
Self::Map(ref x) => x.line(),
Self::Catch(ref x) => x.line(),
Self::LocalCall(ref x) => x.line(),
Self::RemoteCall(ref x) => x.line(),
Self::Comprehension(ref x) => x.line(),
Self::Block(ref x) => x.line(),
Self::If(ref x) => x.line(),
Self::Case(ref x) => x.line(),
Self::Try(ref x) => x.line(),
Self::Receive(ref x) => x.line(),
Self::InternalFun(ref x) => x.line(),
Self::ExternalFun(ref x) => x.line(),
Self::AnonymousFun(ref x) => x.line(),
}
}
}
impl Expression {
pub fn atom(line: LineNum, name: String) -> Self {
Self::Atom(Box::new(Atom::new(line, name)))
}
}
#[derive(Debug, Clone)]
pub struct Catch {
pub line: LineNum,
pub expr: Expression,
}
impl Catch {
pub fn new(line: LineNum, expr: Expression) -> Self {
Catch { line, expr }
}
}
impl Node for Catch {
fn line(&self) -> LineNum {
self.line
}
}
#[derive(Debug, Clone)]
pub struct If {
pub line: LineNum,
pub clauses: Vec<Clause>,
}
impl If {
pub fn new(line: LineNum, clauses: Vec<Clause>) -> Self {
If { line, clauses }
}
}
impl Node for If {
fn line(&self) -> LineNum {
self.line
}
}
#[derive(Debug, Clone)]
pub struct Case {
pub line: LineNum,
pub expr: Expression,
pub clauses: Vec<Clause>,
}
impl Case {
pub fn new(line: LineNum, expr: Expression, clauses: Vec<Clause>) -> Self {
Case {
line,
expr,
clauses,
}
}
}
impl Node for Case {
fn line(&self) -> LineNum {
self.line
}
}
#[derive(Debug, Clone)]
pub struct Try {
pub line: LineNum,
pub body: Vec<Expression>,
pub case_clauses: Vec<Clause>,
pub catch_clauses: Vec<Clause>,
pub after: Vec<Expression>,
}
impl Try {
pub fn new(
line: LineNum,
body: Vec<Expression>,
case_clauses: Vec<Clause>,
catch_clauses: Vec<Clause>,
after: Vec<Expression>,
) -> Self {
Try {
line,
body,
case_clauses,
catch_clauses,
after,
}
}
}
impl Node for Try {
fn line(&self) -> LineNum {
self.line
}
}
#[derive(Debug, Clone)]
pub struct Receive {
pub line: LineNum,
pub clauses: Vec<Clause>,
pub timeout: Option<Expression>,
pub after: Vec<Expression>,
}
impl Receive {
pub fn new(line: LineNum, clauses: Vec<Clause>) -> Self {
Receive {
line,
clauses,
timeout: None,
after: Vec::new(),
}
}
pub fn timeout(mut self, timeout: Expression) -> Self {
self.timeout = Some(timeout);
self
}
pub fn after(mut self, after: Vec<Expression>) -> Self {
self.after = after;
self
}
}
impl Node for Receive {
fn line(&self) -> LineNum {
self.line
}
}
#[derive(Debug, Clone)]
pub struct Block {
pub line: LineNum,
pub body: Vec<Expression>,
}
impl Block {
pub fn new(line: LineNum, body: Vec<Expression>) -> Self {
Block { line, body }
}
}
impl Node for Block {
fn line(&self) -> LineNum {
self.line
}
}
#[derive(Debug, Clone)]
pub struct Comprehension {
pub line: LineNum,
pub is_list: bool,
pub expr: Expression,
pub qualifiers: Vec<Qualifier>,
}
impl Comprehension {
pub fn new(line: LineNum, is_list: bool, expr: Expression, qualifiers: Vec<Qualifier>) -> Self {
Comprehension {
line,
is_list,
expr,
qualifiers,
}
}
}
impl Node for Comprehension {
fn line(&self) -> LineNum {
self.line
}
}
#[derive(Debug, Clone)]
pub enum Qualifier {
Generator(Generator),
BitStringGenerator(Generator),
Filter(Expression),
}
#[derive(Debug, Clone)]
pub struct Generator {
pub line: LineNum,
pub pattern: Pattern,
pub expr: Expression,
}
impl Generator {
pub fn new(line: LineNum, pattern: Pattern, expr: Expression) -> Self {
Generator {
line,
pattern,
expr,
}
}
}
impl Node for Generator {
fn line(&self) -> LineNum {
self.line
}
}
#[derive(Debug, Clone)]
pub struct AnonymousFun {
pub line: LineNum,
pub name: Option<String>,
pub clauses: Vec<Clause>,
}
impl AnonymousFun {
pub fn new(line: LineNum, clauses: Vec<Clause>) -> Self {
AnonymousFun {
line,
name: None,
clauses,
}
}
pub fn name(mut self, name: String) -> Self {
self.name = Some(name);
self
}
}
impl Node for AnonymousFun {
fn line(&self) -> LineNum {
self.line
}
}
|
use rsa::{pkcs8::FromPublicKey, RsaPublicKey};
use collectxyz::nft::{Config, XyzTokenInfo};
use cosmwasm_std::{Addr, StdError, StdResult, Storage};
use cw_storage_plus::{Index, IndexList, IndexedMap, Item, MultiIndex, UniqueIndex};
pub const CONFIG: Item<Config> = Item::new("config");
const CAPTCHA_PUBLIC_KEY: Item<String> = Item::new("captcha_public_key");
pub fn save_captcha_public_key(storage: &mut dyn Storage, public_key: &str) -> StdResult<()> {
RsaPublicKey::from_public_key_pem(public_key)
.map_err(|_| StdError::generic_err("invalid public key"))?;
CAPTCHA_PUBLIC_KEY.save(storage, &public_key.to_string())?;
Ok(())
}
pub fn load_captcha_public_key(storage: &dyn Storage) -> StdResult<RsaPublicKey> {
let public_key = CAPTCHA_PUBLIC_KEY.load(storage)?;
RsaPublicKey::from_public_key_pem(&public_key)
.map_err(|_| StdError::generic_err("invalid public key"))
}
pub struct TokenIndexes<'a> {
pub owner: MultiIndex<'a, (Addr, Vec<u8>), XyzTokenInfo>,
pub coordinates: UniqueIndex<'a, Vec<u8>, XyzTokenInfo>,
}
impl<'a> IndexList<XyzTokenInfo> for TokenIndexes<'a> {
fn get_indexes(&'_ self) -> Box<dyn Iterator<Item = &'_ dyn Index<XyzTokenInfo>> + '_> {
let v: Vec<&dyn Index<XyzTokenInfo>> = vec![&self.owner, &self.coordinates];
Box::new(v.into_iter())
}
}
pub fn tokens<'a>() -> IndexedMap<'a, &'a str, XyzTokenInfo, TokenIndexes<'a>> {
let indexes = TokenIndexes {
owner: MultiIndex::new(
|d: &XyzTokenInfo, k: Vec<u8>| (d.owner.clone(), k),
"tokens",
"tokens__owner",
),
coordinates: UniqueIndex::new(
|d: &XyzTokenInfo| d.extension.coordinates.to_bytes(),
"tokens__coordinates",
),
};
IndexedMap::new("tokens", indexes)
}
pub const OWNER: Item<String> = Item::new("owner");
|
use std::{
pin::Pin,
task::{Context, Poll},
};
use futures::{ready, Stream, StreamExt};
use iox_query::QueryCompletedToken;
/// Wraps an inner query stream, calling the `QueryCompletedToken::set_success` on success
#[derive(Debug)]
pub struct QueryCompletedTokenStream<S, T, E>
where
S: Stream<Item = Result<T, E>> + Unpin + Send,
{
inner: S,
token: QueryCompletedToken,
found_err: bool,
}
impl<S, T, E> QueryCompletedTokenStream<S, T, E>
where
S: Stream<Item = Result<T, E>> + Unpin + Send,
{
pub fn new(inner: S, token: QueryCompletedToken) -> Self {
Self {
inner,
token,
found_err: false,
}
}
}
impl<S, T, E> Stream for QueryCompletedTokenStream<S, T, E>
where
S: Stream<Item = Result<T, E>> + Unpin + Send,
{
type Item = Result<T, E>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = &mut *self;
match ready!(this.inner.poll_next_unpin(cx)) {
None => {
if !this.found_err {
this.token.set_success();
}
Poll::Ready(None)
}
Some(Ok(x)) => Poll::Ready(Some(Ok(x))),
Some(Err(e)) => {
this.found_err = true;
Poll::Ready(Some(Err(e)))
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use parking_lot::Mutex;
use super::*;
#[tokio::test]
async fn test_empty() {
let (res, token) = token();
let stream =
QueryCompletedTokenStream::new(futures::stream::empty::<Result<(), ()>>(), token);
assert_eq!(stream.collect::<Vec<_>>().await, vec![],);
assert_eq!(*res.lock(), Some(true));
}
#[tokio::test]
async fn test_not_finished() {
let (res, token) = token();
QueryCompletedTokenStream::new(futures::stream::empty::<Result<(), ()>>(), token);
assert_eq!(*res.lock(), Some(false));
}
#[tokio::test]
async fn test_err() {
let (res, token) = token();
let stream =
QueryCompletedTokenStream::new(futures::stream::iter([Ok(()), Err(()), Ok(())]), token);
assert_eq!(
stream.collect::<Vec<_>>().await,
vec![Ok(()), Err(()), Ok(())],
);
assert_eq!(*res.lock(), Some(false));
}
fn token() -> (Arc<Mutex<Option<bool>>>, QueryCompletedToken) {
let token = Arc::new(Mutex::new(None));
let token_captured = Arc::clone(&token);
let qct = QueryCompletedToken::new(move |success| {
*token_captured.lock() = Some(success);
});
(token, qct)
}
}
|
extern crate libcomchannel;
extern crate libconfig;
extern crate libdb;
extern crate libdbgserver;
extern crate libengine;
extern crate libmercury;
use libcomchannel::*;
use libdb::DataBase;
use libengine::*;
use libmercury::device::*;
use libmercury::iface::*;
use std::cell::RefCell;
use std::io::prelude::*;
use std::io::{BufReader};
use std::net::TcpListener;
use std::net::TcpStream;
use std::path::*;
use std::sync::mpsc;
use std::sync::Arc;
use std::sync::Mutex;
use std::thread;
use std::*;
enum Message {
NewJob(Job),
Terminate,
}
pub struct ThreadPool {
workers: Vec<Worker>,
sender: mpsc::Sender<Message>,
}
trait FnBox {
fn call_box(self: Box<Self>);
}
impl<F: FnOnce()> FnBox for F {
fn call_box(self: Box<F>) {
(*self)()
}
}
type Job = Box<dyn FnBox + Send + 'static>;
impl ThreadPool {
/// Create a new ThreadPool.
///
/// The size is the number of threads in the pool.
///
/// # Panics
///
/// The `new` function will panic if the size is zero.
pub fn new(size: usize) -> ThreadPool {
assert!(size > 0);
let (sender, receiver) = mpsc::channel();
let receiver = Arc::new(Mutex::new(receiver));
let mut workers = Vec::with_capacity(size);
for id in 0..size {
workers.push(Worker::new(id, Arc::clone(&receiver)));
}
ThreadPool { workers, sender }
}
pub fn execute<F>(&self, f: F)
where
F: FnOnce() + Send + 'static,
{
let job = Box::new(f);
self.sender.send(Message::NewJob(job)).unwrap();
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
println!("Sending terminate message to all workers.");
for _ in &mut self.workers {
self.sender.send(Message::Terminate).unwrap();
}
println!("Shutting down all workers.");
for worker in &mut self.workers {
println!("Shutting down worker {}", worker.id);
if let Some(thread) = worker.thread.take() {
thread.join().unwrap();
}
}
}
}
struct Worker {
id: usize,
thread: Option<thread::JoinHandle<()>>,
}
impl Worker {
fn new(id: usize, receiver: Arc<Mutex<mpsc::Receiver<Message>>>) -> Worker {
let thread = thread::spawn(move || loop {
let message = receiver.lock().unwrap().recv().unwrap();
match message {
Message::NewJob(job) => {
println!("Worker {} got a job; executing.", id);
job.call_box();
}
Message::Terminate => {
println!("Worker {} was told to terminate.", id);
break;
}
}
});
Worker {
id,
thread: Some(thread),
}
}
}
fn main() {
// Список интерфейсов-связи для создания
let channels_registered: &[(&str, Box<RefCell<dyn ILinkChannelFactory>>)] = &mut [(
SerialChannel::type_name(),
Box::new(RefCell::new(LinkChannelFactory::default())),
)];
let iface_registered: &[(&str, Box<RefCell<dyn IFaceFactory>>)] = &mut [(
InterfaceMercury::type_name(),
Box::new(RefCell::new(FaceFactory::default())),
)];
let counter_registered: &[(&str, Box<RefCell<dyn ICounterFactory>>)] = &mut [(
InterfaceMercury::type_name(),
Box::new(RefCell::new(Mercury230Factory::default())),
)];
// Чтение объектов из БД
let mut db = DataBase::new();
db.open(Path::new("debug.sqlite"));
db.clear();
let mut channels_list: Vec<Arc<Mutex<dyn ILinkChannel>>> = Vec::new();
let mut ifaces_list: Vec<Arc<Mutex<IFace>>> = Vec::new();
let mut counters_list: Vec<Arc<Mutex<ICounter>>> = Vec::new();
// Восстановление каналов и интерфейсов
let objects = db.load_objects();
for obj in objects {
let container = obj.unwrap();
let guid = &container.guid;
let class_name = &container.class;
for channel_reg in channels_registered {
let (channel_classname, channel_factory) = channel_reg;
if *class_name == channel_classname.to_owned() {
let mut channel = channel_factory
.borrow_mut()
.spawn_with_uuid(guid.to_owned());
channels_list.push(channel);
}
}
for iface_reg in iface_registered {
let (iface_classname, iface_factory) = iface_reg;
if *class_name == iface_classname.to_owned() {
let mut iface = iface_factory.borrow_mut().spawn_with_uuid(guid.to_owned());
ifaces_list.push(iface);
}
}
}
// Восстановление счётчиков
let objects = db.load_objects();
for obj in objects {
let container = obj.unwrap();
let guid = &container.guid;
let class_name = &container.class;
let parent = &container.parent;
for counter_reg in counter_registered {
let (counter_classname, counter_factory) = counter_reg;
if *class_name == counter_classname.to_owned() {
let _ = channels_list.iter_mut().map(|channel| {
let arc_channel = channel.clone();
let mut locked_channel = arc_channel.lock().unwrap();
if parent == locked_channel.guid().as_str() {
let counter = counter_factory
.borrow_mut()
.spawn_with_uuid(guid.to_owned(), arc_channel.clone());
&counters_list.push(counter);
};
});
}
}
}
// Восстановление настроек
let rows = db.load_properties();
for row in rows {
let container = row.unwrap();
if &container.name == "Активность" {
continue;
}
for counter in &counters_list {
if *counter.lock().unwrap().guid() == container.guid {
let mut properties = counter.lock().unwrap().properties();
let mut properties = properties.lock().unwrap();
let mut item = PropertiesItem {
name: container.name.clone(),
value: container.value.clone(),
ptype: container.ptype.into(),
variants: vec![],
regexpr: String::new(),
min: 0,
max: 32_767,
err_msg: String::new(),
required: container.required,
};
properties.add(item);
}
}
}
// Активизация объектов
let rows = db.load_properties();
for row in rows {
let container = row.unwrap();
if &container.name != "Активность" {
continue;
}
for counter in &counters_list {
if *counter.lock().unwrap().guid() == container.guid {
let mut properties = counter.lock().unwrap().properties();
let mut properties = properties.lock().unwrap();
let mut item = PropertiesItem {
name: container.name.clone(),
value: container.value.clone(),
ptype: container.ptype.into(),
variants: vec![],
regexpr: String::new(),
min: 0,
max: 32_767,
err_msg: String::new(),
required: container.required,
};
properties.add(item);
}
}
}
// Ожидание комманд на отладочном сервере
let listener = TcpListener::bind("127.0.0.1:7878").unwrap();
let pool = ThreadPool::new(4);
for stream in listener.incoming().take(4) {
let stream = stream.unwrap();
pool.execute(|| {
handle_connection(stream);
});
}
println!("Shutting down.");
}
fn handle_connection(mut stream: TcpStream) {
let mut buffer = String::new();
{
let mut reader = BufReader::new(&mut stream);
reader.read_line(&mut buffer).unwrap();
println!("{}", &buffer);
}
let response = engine::processing(&buffer.as_str()).unwrap_or("{error}".to_string());
println!("{}", &response);
stream.write_all(response.as_bytes()).unwrap();
stream.flush().unwrap();
println!("End");
}
|
use legion::{storage::Component, *};
use super::{
spatial_grid::DenseGrid,
};
use crate::{
simulation::{Config, RigidCircle},
timing::timer::time_func,
};
pub struct PhysicsPipeline {
grid: DenseGrid,
}
impl PhysicsPipeline {
pub fn new(_world: &mut World, config: &Config) -> Self {
let grid = DenseGrid::new((config.cell_radius * 32.0) as u32, (config.bounds.1.x) as u32);
Self { grid }
}
pub fn step(&mut self, world: &mut World, resources: &mut Resources) {
time_func!(physics, step);
self.update_positions(world, resources);
self.detect_collisions(world);
}
fn update_positions(&mut self, world: &mut World, resources: &Resources) {
time_func!(physics, pos_update);
let bounds = resources.get::<Config>().unwrap().bounds;
self.grid.clear();
<(Entity, &mut RigidCircle)>::query().par_for_each_mut(world, |(entity, circ)| {
circ.vel = circ.to_vel;
circ.pos = circ.to_pos + circ.vel;
if (circ.pos.x - circ.radius) <= bounds.0.x || (circ.pos.x + circ.radius) >= bounds.1.x {
circ.pos.x = circ.pos.x.clamp(bounds.0.x + circ.radius, bounds.1.x - circ.radius);
circ.vel.x = -circ.vel.x;
}
if (circ.pos.y - circ.radius) <= bounds.0.y || (circ.pos.y + circ.radius) > bounds.1.y {
circ.pos.y = circ.pos.y.clamp(bounds.0.y + circ.radius, bounds.1.y - circ.radius);
circ.vel.y = -circ.vel.y;
}
circ.to_vel = circ.vel;
circ.to_pos = circ.pos;
self.grid.insert(circ.pos, *entity);
});
}
fn detect_collisions(&self, world: &mut World) {
time_func!(physics, col_detect);
let mut q = <(Entity, &mut RigidCircle)>::query().filter(component::<RigidCircle>());
unsafe {
q.par_for_each_unchecked(world, |(ent, c)| {
let around = self.grid.query(c.pos, 2.0 * c.radius, *ent);
around.iter().for_each(|e| {
elastic_collision(c, self.unsafe_component(world, *e));
});
});
}
}
fn unsafe_component<'a, T: Component>(&self, world: &'a World, entity: Entity) -> &'a mut T {
unsafe {
world
.entry_ref(entity)
.unwrap()
.into_component_unchecked::<T>()
.unwrap()
}
}
}
/// Elastic collision between two circles.
/// Updates RigidCircles in place
fn elastic_collision(a: &mut RigidCircle, b: &RigidCircle) {
let del = b.pos - a.pos;
let dist = del.length();
let norm = dist.powi(2);
let vdel = b.vel - a.vel;
a.to_vel += ((vdel).dot(del) / norm) * del;
a.to_pos -= del / dist * (a.radius * 2.0 - dist) * 0.5;
}
|
//! # kdtree
//!
//! K-dimensional tree for Rust (bucket point-region implementation)
//!
//! ## Usage
//!
//! ```
//! use kdtree::KdTree;
//! use kdtree::ErrorKind;
//! use kdtree::distance::squared_euclidean;
//!
//! let a: ([f64; 2], usize) = ([0f64, 0f64], 0);
//! let b: ([f64; 2], usize) = ([1f64, 1f64], 1);
//! let c: ([f64; 2], usize) = ([2f64, 2f64], 2);
//! let d: ([f64; 2], usize) = ([3f64, 3f64], 3);
//!
//! let dimensions = 2;
//! let mut kdtree = KdTree::new(dimensions);
//!
//! kdtree.add(&a.0, a.1).unwrap();
//! kdtree.add(&b.0, b.1).unwrap();
//! kdtree.add(&c.0, c.1).unwrap();
//! kdtree.add(&d.0, d.1).unwrap();
//!
//! assert_eq!(kdtree.size(), 4);
//! assert_eq!(
//! kdtree.nearest(&a.0, 0, &squared_euclidean).unwrap(),
//! vec![]
//! );
//! assert_eq!(
//! kdtree.nearest(&a.0, 1, &squared_euclidean).unwrap(),
//! vec![(0f64, &0)]
//! );
//! assert_eq!(
//! kdtree.nearest(&a.0, 2, &squared_euclidean).unwrap(),
//! vec![(0f64, &0), (2f64, &1)]
//! );
//! assert_eq!(
//! kdtree.nearest(&a.0, 3, &squared_euclidean).unwrap(),
//! vec![(0f64, &0), (2f64, &1), (8f64, &2)]
//! );
//! assert_eq!(
//! kdtree.nearest(&a.0, 4, &squared_euclidean).unwrap(),
//! vec![(0f64, &0), (2f64, &1), (8f64, &2), (18f64, &3)]
//! );
//! assert_eq!(
//! kdtree.nearest(&a.0, 5, &squared_euclidean).unwrap(),
//! vec![(0f64, &0), (2f64, &1), (8f64, &2), (18f64, &3)]
//! );
//! assert_eq!(
//! kdtree.nearest(&b.0, 4, &squared_euclidean).unwrap(),
//! vec![(0f64, &1), (2f64, &0), (2f64, &2), (8f64, &3)]
//! );
//! ```
extern crate num_traits;
#[cfg(feature = "serialize")]
#[cfg_attr(feature = "serialize", macro_use)]
extern crate serde_derive;
extern crate rsmalloc;
#[global_allocator]
static GLOBAL: rsmalloc::Allocator = rsmalloc::Allocator;
pub mod distance;
mod heap_element;
pub mod kdtree;
mod util;
pub use crate::kdtree::ErrorKind;
pub use crate::kdtree::KdTree;
use distance::squared_euclidean;
use std::convert::From;
// A struct that can be passed between C and Rust
#[repr(C)]
pub struct ResTuple {
npoints: f64,
dist: usize,
}
// Conversion functions
impl From<(f64, usize)> for ResTuple {
fn from(tup: (f64, usize)) -> ResTuple {
ResTuple { npoints: tup.0, dist: tup.1 }
}
}
impl From<ResTuple> for (f64, usize) {
fn from(tup: ResTuple) -> (f64, usize) {
(tup.npoints, tup.dist)
}
}
#[no_mangle]
pub extern "C" fn kdtree_create(size: u64) -> *mut KdTree<f64, usize, [f64; 2]> {
println!("Calling kdtree new");
let a: ([f64; 2], usize) = ([0f64, 0f64], 0);
let b: ([f64; 2], usize) = ([1f64, 1f64], 1);
let c: ([f64; 2], usize) = ([2f64, 2f64], 2);
let d: ([f64; 2], usize) = ([3f64, 3f64], 3);
let dimensions = 2;
let mut kdtree = KdTree::new(dimensions);
kdtree.add(a.0, a.1).unwrap();
kdtree.add(b.0, b.1).unwrap();
kdtree.add(c.0, c.1).unwrap();
kdtree.add(d.0, d.1).unwrap();
Box::into_raw(Box::new(kdtree))
}
#[no_mangle]
pub unsafe extern "C" fn kdtree_lookup(kdtree: *mut KdTree<f64, usize, [f64; 2]>, idx: &[f64; 2]) -> ResTuple {
let kdtree = kdtree.as_ref().unwrap();
println!("Idxs: {} {}", idx[0], idx[1]);
let res = kdtree.nearest(idx, 2, &squared_euclidean).unwrap();
(res[1].0, *res[1].1).into()
}
|
/// Build libspiro binding for Rust.
extern crate bindgen;
extern crate cc;
extern crate dos2unix;
use std::env;
use std::path::PathBuf;
fn do_bindgen() {
// Based on `bindgen` manual.
let bindings = bindgen::Builder::default()
.header("wrapper.h")
// Tell cargo to invalidate the built crate whenever any of the
// included header files changed.
.parse_callbacks(Box::new(bindgen::CargoCallbacks))
.derive_partialeq(true)
.derive_debug(true)
.derive_copy(true)
.derive_default(true)
// This function doesn't exist! Struct must be created.
.blacklist_function("new_bezctx")
// Avoids silly warnings about case of types. They're from C...
.raw_line("#![allow(non_camel_case_types, non_snake_case)]")
.generate()
.expect("Unable to generate bindings");
let out_path = PathBuf::from("src");
let out_file = out_path.join("lib.rs");
bindings
.write_to_file(out_file.clone())
.expect("Couldn't write bindings!");
// Windows sanity - use \n even on Windows.
dos2unix::Dos2Unix::convert(&out_file.into_os_string().into_string().unwrap(), false);
}
fn main() {
cc::Build::new()
// We aren't using libspiro's autotools build, so I wrote a generic spiro-config.h which
// will work on all the systems I care about.
.include(".") // for spiro-config.h
.file("libspiro/bezctx.c")
.file("libspiro/spiro.c")
.file("libspiro/spiroentrypoints.c")
.static_flag(true)
.shared_flag(true)
.static_crt(true)
.compile("libspiro.a");
println!("cargo:rerun-if-changed=wrapper.h");
// Link built library. "lib" and ".a" are added back by Cargo.
println!("cargo:rustc-link-lib=static=spiro");
if env::var("DO_BINDGEN").is_ok() {
do_bindgen();
}
}
|
mod auth;
mod user;
mod channel;
mod login;
mod group;
mod invite;
pub use auth::*;
pub use user::*;
pub use channel::*;
pub use login::*;
pub use group::*;
pub use invite::*;
|
#[doc = "Reader of register MBCNT"]
pub type R = crate::R<u32, super::MBCNT>;
#[doc = "Reader of field `CNTL`"]
pub type CNTL_R = crate::R<u8, u8>;
impl R {
#[doc = "Bits 0:7 - I2C Master Burst Count"]
#[inline(always)]
pub fn cntl(&self) -> CNTL_R {
CNTL_R::new((self.bits & 0xff) as u8)
}
}
|
fn main() {
let a= "Ciao";
let b= "popolo!";
println!("{} {}", a, b)
} |
use crate::position::Position;
use std::collections::VecDeque;
pub struct CharsWithPosition<I: Iterator<Item = char>> {
iter: MultiPeek<I>,
prev_pos: Position,
next_pos: Position,
}
impl<I: Iterator<Item = char>> CharsWithPosition<I> {
pub fn new(iter: I) -> CharsWithPosition<I> {
CharsWithPosition {
iter: MultiPeek::new(iter),
prev_pos: Position { line: 1, column: 0 },
next_pos: Position { line: 1, column: 1 },
}
}
pub fn peek(&mut self, index: usize) -> Option<&I::Item> {
self.iter.peek(index)
}
pub fn prev_pos(&self) -> Position {
self.prev_pos
}
pub fn next_pos(&self) -> Position {
self.next_pos
}
}
impl<I: Iterator<Item = char>> Iterator for CharsWithPosition<I> {
type Item = char;
fn next(&mut self) -> Option<char> {
self.prev_pos = self.next_pos;
match self.iter.next() {
Some('\n') => {
self.next_pos.column = 1;
self.next_pos.line += 1;
Some('\n')
},
Some(c) => {
self.next_pos.column += 1;
Some(c)
},
None => None,
}
}
}
#[derive(Clone, Debug)]
pub struct MultiPeek<I: Iterator> {
iter: I,
buf: VecDeque<I::Item>,
}
impl<I: Iterator> MultiPeek<I> {
pub fn new(iter: I) -> MultiPeek<I> {
MultiPeek {
iter,
buf: VecDeque::new(),
}
}
}
impl<I: Iterator> MultiPeek<I> {
pub fn peek(&mut self, index: usize) -> Option<&I::Item> {
if index < self.buf.len() {
return Some(&self.buf[index]);
}
for _ in 0..=(index - self.buf.len()) {
match self.iter.next() {
Some(x) => self.buf.push_back(x),
None => break,
}
}
self.buf.get(index)
}
pub fn inner(&self) -> &I {
&self.iter
}
}
impl<I: Iterator> Iterator for MultiPeek<I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
if self.buf.is_empty() {
self.iter.next()
} else {
self.buf.pop_front()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn chars_with_position() {
macro_rules! assert_chars_with_pos {
($iter:expr, $val:expr, $prev_pos:expr, $next_pos:expr) => {
assert_eq!($iter.next(), $val);
assert_eq!($iter.prev_pos(), Position { line: $prev_pos.0, column: $prev_pos.1 });
assert_eq!($iter.next_pos(), Position { line: $next_pos.0, column: $next_pos.1 });
}
}
let mut iter = CharsWithPosition::new("abc\ndef".chars());
assert_chars_with_pos!(iter, Some('a'), (1, 1), (1, 2));
assert_chars_with_pos!(iter, Some('b'), (1, 2), (1, 3));
assert_chars_with_pos!(iter, Some('c'), (1, 3), (1, 4));
assert_chars_with_pos!(iter, Some('\n'), (1, 4), (2, 1));
assert_chars_with_pos!(iter, Some('d'), (2, 1), (2, 2));
assert_chars_with_pos!(iter, Some('e'), (2, 2), (2, 3));
assert_chars_with_pos!(iter, Some('f'), (2, 3), (2, 4));
assert_chars_with_pos!(iter, None, (2, 4), (2, 4));
}
#[test]
fn multi_peek() {
let mut iter = MultiPeek::new(vec![0, 1, 2, 3, 4].into_iter());
assert_eq!(iter.peek(0), Some(&0));
assert_eq!(iter.peek(0), Some(&0));
assert_eq!(iter.peek(2), Some(&2));
assert_eq!(iter.peek(2), Some(&2));
assert_eq!(iter.peek(1), Some(&1));
assert_eq!(iter.next(), Some(0));
assert_eq!(iter.next(), Some(1));
assert_eq!(iter.peek(1), Some(&3));
assert_eq!(iter.peek(0), Some(&2));
assert_eq!(iter.next(), Some(2));
assert_eq!(iter.peek(0), Some(&3));
assert_eq!(iter.peek(1), Some(&4));
assert_eq!(iter.peek(2), None);
assert_eq!(iter.peek(3), None);
assert_eq!(iter.peek(4), None);
assert_eq!(iter.peek(1), Some(&4));
assert_eq!(iter.peek(0), Some(&3));
assert_eq!(iter.next(), Some(3));
assert_eq!(iter.next(), Some(4));
assert_eq!(iter.next(), None);
assert_eq!(iter.next(), None);
assert_eq!(iter.next(), None);
}
}
|
#![allow(clippy::comparison_chain)]
#![allow(clippy::collapsible_if)]
use std::cmp::Reverse;
use std::cmp::{max, min};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::fmt::Debug;
use itertools::Itertools;
use whiteread::parse_line;
const ten97: usize = 1000_000_007;
/// 2の逆元 mod ten97.割りたいときに使う
const inv2ten97: u128 = 500_000_004;
fn main() {
let (n, s): (usize, usize) = parse_line().unwrap();
let mut abab: Vec<(usize, usize)> = vec![];
for _ in 0..n {
abab.push(parse_line().unwrap());
}
let mut dp: Vec<Vec<char>> = vec![vec!['a'; s + 1]; n + 1];
// 1日目の初期化
if abab[0].0 <= s {
dp[1][abab[0].0] = 'A';
}
if abab[0].1 <= s {
dp[1][abab[0].1] = 'B';
}
for day in 2..=n {
for en in 1..=s {
if en + abab[day - 1].0 <= s && (dp[day - 1][en] == 'A' || dp[day - 1][en] == 'B') {
dp[day][en + abab[day - 1].0] = 'A';
}
if en + abab[day - 1].1 <= s && (dp[day - 1][en] == 'A' || dp[day - 1][en] == 'B') {
dp[day][en + abab[day - 1].1] = 'B';
}
}
}
if dp[n][s] == 'a' {
println!("Impossible");
return;
}
let mut now = s;
let mut ans = "".to_owned();
for day in (1..=n).rev() {
if dp[day][now] == 'A' {
now -= abab[day - 1].0;
ans.push('A');
} else if dp[day][now] == 'B' {
now -= abab[day - 1].1;
ans.push('B');
} else {
panic!("wtf");
}
}
println!("{}", ans.chars().rev().collect::<String>());
}
|
extern crate advent_14;
use std::cmp;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
fn find_max<'a, T: Ord>(lst: &'a Vec<T>) -> Option<&'a T> {
let mut max = None;
let find_max = |i: &'a T| {
max = match max {
None => Some(i),
Some(ref m) if i > *m => Some(i),
_ => max
};
max
};
lst.iter().map(find_max).last().unwrap()
}
fn main() {
let path = Path::new("input");
let display = path.display();
let mut file = match File::open(&path) {
Err(why) => panic!("couldn't open {}: {}", display,
Error::description(&why)),
Ok(file) => file,
};
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => panic!("couldn't read {}: {}", display,
Error::description(&why)),
Ok(_) => {},
}
let mut max: u32 = 0;
for line in s.split("\n") {
if line.is_empty() {
continue;
}
let (speed, fly, rest) = advent_14::parse_string(&line.to_string());
max = cmp::max(max, advent_14::total_distance(speed, fly, rest, 2503));
}
println!("Max: {}", max);
let mut points = vec![0; 9];
let mut reindeer = vec![0; 9];
for seconds in 1..2504 {
for (i, line) in s.split("\n").enumerate() {
if line.is_empty() {
continue;
}
let (speed, fly, rest) = advent_14::parse_string(&line.to_string());
reindeer[i] = advent_14::total_distance(speed, fly, rest, seconds);
}
let m = find_max(&reindeer).expect("Need a max");
for (i, r) in reindeer.iter().enumerate() {
if r == m {
points[i] += 1;
}
}
}
println!("Highest points: {}", find_max(&points).expect("Need a max"));
}
|
pub use VkCommandPoolTrimFlags::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkCommandPoolTrimFlags {
VK_COMMAND_POOL_TRIM_NULL_BIT = 0,
}
use crate::SetupVkFlags;
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
pub struct VkCommandPoolTrimFlagBits(u32);
SetupVkFlags!(VkCommandPoolTrimFlags, VkCommandPoolTrimFlagBits);
|
// Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(clippy::uninlined_format_args)]
use std::env;
use std::ops::Deref;
use std::sync::Arc;
use std::time::Duration;
use anyerror::AnyError;
use common_base::base::tokio;
use common_base::base::StopHandle;
use common_base::base::Stoppable;
use common_base::mem_allocator::GlobalAllocator;
use common_grpc::RpcClientConf;
use common_meta_sled_store::init_sled_db;
use common_meta_store::MetaStoreProvider;
use common_meta_types::Cmd;
use common_meta_types::LogEntry;
use common_meta_types::MetaAPIError;
use common_meta_types::Node;
use common_metrics::init_default_metrics_recorder;
use common_tracing::init_logging;
use common_tracing::set_panic_hook;
use databend_meta::api::GrpcServer;
use databend_meta::api::HttpService;
use databend_meta::configs::Config;
use databend_meta::meta_service::MetaNode;
use databend_meta::version::METASRV_COMMIT_VERSION;
use databend_meta::version::METASRV_SEMVER;
use databend_meta::version::MIN_METACLI_SEMVER;
use tracing::info;
use tracing::warn;
mod kvapi;
pub use kvapi::KvApiCommand;
use crate::tokio::time::sleep;
#[global_allocator]
pub static GLOBAL_ALLOCATOR: GlobalAllocator = GlobalAllocator;
const CMD_KVAPI_PREFIX: &str = "kvapi::";
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let conf = Config::load()?;
conf.validate()?;
if run_cmd(&conf).await {
return Ok(());
}
let mut _sentry_guard = None;
let bend_sentry_env = env::var("DATABEND_SENTRY_DSN").unwrap_or_else(|_| "".to_string());
if !bend_sentry_env.is_empty() {
// NOTE: `traces_sample_rate` is 0.0 by default, which disable sentry tracing
let traces_sample_rate = env::var("SENTRY_TRACES_SAMPLE_RATE").ok().map_or(0.0, |s| {
s.parse()
.unwrap_or_else(|_| panic!("`{}` was defined but could not be parsed", s))
});
_sentry_guard = Some(sentry::init((bend_sentry_env, sentry::ClientOptions {
release: common_tracing::databend_semver!(),
traces_sample_rate,
..Default::default()
})));
}
set_panic_hook();
let _guards = init_logging("databend-meta", &conf.log);
info!("Databend Meta version: {}", METASRV_COMMIT_VERSION.as_str());
info!(
"Databend Meta start with config: {:?}",
serde_json::to_string_pretty(&conf).unwrap()
);
conf.raft_config.check()?;
// Leave cluster and quit if `--leave-via` and `--leave-id` is specified.
let has_left = MetaNode::leave_cluster(&conf.raft_config).await?;
if has_left {
info!("node {:?} has left cluster", conf.raft_config.leave_id);
return Ok(());
}
init_sled_db(conf.raft_config.raft_dir.clone());
init_default_metrics_recorder();
info!(
"Starting MetaNode single: {} with config: {:?}",
conf.raft_config.single, conf
);
let meta_node = MetaNode::start(&conf).await?;
let mut stop_handler = StopHandle::<AnyError>::create();
let stop_tx = StopHandle::<AnyError>::install_termination_handle();
// HTTP API service.
{
let mut srv = HttpService::create(conf.clone(), meta_node.clone());
info!("HTTP API server listening on {}", conf.admin_api_address);
srv.start().await.expect("Failed to start http server");
stop_handler.push(srv);
}
// gRPC API service.
{
let mut srv = GrpcServer::create(conf.clone(), meta_node.clone());
info!(
"Databend meta server listening on {}",
conf.grpc_api_address.clone()
);
srv.start().await.expect("Databend meta service error");
stop_handler.push(Box::new(srv));
}
// Join a raft cluster only after all service started.
let join_res = meta_node
.join_cluster(&conf.raft_config, conf.grpc_api_advertise_address())
.await?;
info!("Join result: {:?}", join_res);
register_node(&meta_node, &conf).await?;
// Print information to users.
println!("Databend Metasrv");
println!();
println!("Version: {}", METASRV_COMMIT_VERSION.as_str());
println!("Log:");
println!(" File: {}", conf.log.file);
println!(" Stderr: {}", conf.log.stderr);
println!("Id: {}", conf.raft_config.config_id);
println!("Raft Cluster Name: {}", conf.raft_config.cluster_name);
println!("Raft Dir: {}", conf.raft_config.raft_dir);
println!(
"Raft Status: {}",
if conf.raft_config.single {
"single".to_string()
} else {
format!("join {:#?}", conf.raft_config.join)
}
);
println!();
println!("HTTP API");
println!(" listened at {}", conf.admin_api_address);
println!("gRPC API");
println!(" listened at {}", conf.grpc_api_address);
stop_handler.wait_to_terminate(stop_tx).await;
info!("Databend-meta is done shutting down");
Ok(())
}
/// The meta service GRPC API address can be changed by administrator in the config file.
///
/// Thus every time a meta server starts up, re-register the node info to broadcast its latest grpc address
async fn register_node(meta_node: &Arc<MetaNode>, conf: &Config) -> Result<(), anyhow::Error> {
info!(
"Register node to update raft_api_advertise_host_endpoint and grpc_api_advertise_address"
);
let wait_leader_timeout = Duration::from_millis(conf.raft_config.election_timeout().1 * 10);
info!(
"Wait {:?} for active leader to register node, raft election timeouts: {:?}",
wait_leader_timeout,
conf.raft_config.election_timeout()
);
let wait = meta_node.raft.wait(Some(wait_leader_timeout));
let metrics = wait
.metrics(|x| x.current_leader.is_some(), "receive an active leader")
.await?;
info!("Current raft node metrics: {:?}", metrics);
let leader_id = metrics.current_leader.unwrap();
for _i in 0..20 {
if meta_node.get_node(&leader_id).await?.is_none() {
warn!("Leader node is not replicated to local store, wait and try again");
sleep(Duration::from_millis(500)).await
}
info!(
"Leader node is replicated to local store. About to register node with grpc-advertise-addr"
);
let res = do_register(meta_node, conf).await;
info!("Register-node result: {:?}", res);
match res {
Ok(_) => {
return Ok(());
}
Err(e) => {
match &e {
MetaAPIError::ForwardToLeader(f) => {
info!(
"Leader changed, sleep a while and retry forwarding to {:?}",
f
);
sleep(Duration::from_millis(500)).await;
continue;
}
MetaAPIError::CanNotForward(any_err) => {
info!(
"Leader changed, can not forward, sleep a while and retry: {:?}",
any_err
);
sleep(Duration::from_millis(500)).await;
continue;
}
_ => {
// un-handle-able error
return Err(e.into());
}
}
}
}
}
unreachable!("Tried too many times registering node")
}
async fn do_register(meta_node: &Arc<MetaNode>, conf: &Config) -> Result<(), MetaAPIError> {
let node_id = meta_node.sto.id;
let raft_endpoint = conf.raft_config.raft_api_advertise_host_endpoint();
let node = Node::new(node_id, raft_endpoint)
.with_grpc_advertise_address(conf.grpc_api_advertise_address());
let ent = LogEntry {
txid: None,
time_ms: None,
cmd: Cmd::AddNode {
node_id,
node,
overriding: true,
},
};
info!("Raft log entry for updating node: {:?}", ent);
meta_node.write(ent).await?;
info!("Done register");
Ok(())
}
async fn run_kvapi_command(conf: &Config, op: &str) {
match KvApiCommand::from_config(conf, op) {
Ok(kv_cmd) => {
let rpc_conf = RpcClientConf {
endpoints: vec![conf.grpc_api_address.clone()],
username: conf.username.clone(),
password: conf.password.clone(),
..Default::default()
};
let client = match MetaStoreProvider::new(rpc_conf).create_meta_store().await {
Ok(s) => Arc::new(s),
Err(e) => {
eprintln!("{}", e);
return;
}
};
match kv_cmd.execute(client).await {
Ok(res) => {
println!("{}", res);
}
Err(e) => {
eprintln!("{}", e);
}
}
}
Err(e) => {
eprintln!("{}", e);
}
}
}
async fn run_cmd(conf: &Config) -> bool {
if conf.cmd.is_empty() {
return false;
}
match conf.cmd.as_str() {
"ver" => {
println!("version: {}", METASRV_SEMVER.deref());
println!("min-compatible-client-version: {}", MIN_METACLI_SEMVER);
}
"show-config" => {
println!(
"config:\n{}",
pretty(&conf).unwrap_or_else(|e| format!("error format config: {}", e))
);
}
cmd => {
if cmd.starts_with(CMD_KVAPI_PREFIX) {
if let Some(op) = cmd.strip_prefix(CMD_KVAPI_PREFIX) {
run_kvapi_command(conf, op).await;
return true;
}
}
eprintln!("Invalid cmd: {}", conf.cmd);
eprintln!("Available cmds:");
eprintln!(" --cmd ver");
eprintln!(" Print version and min compatible meta-client version");
eprintln!(" --cmd show-config");
eprintln!(" Print effective config");
eprintln!(" --cmd kvapi::<cmd>");
eprintln!(" Run kvapi command (upsert, get, mget, list)");
}
}
true
}
fn pretty<T>(v: &T) -> Result<String, serde_json::Error>
where T: serde::Serialize {
serde_json::to_string_pretty(v)
}
|
// Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! In Butterfly, as in life, new rumors are "hot", but they get less
//! exciting the more you hear them. For a given rumor, we keep track
//! of how many times we've sent it to each member. Once we've sent
//! that member the rumor a maximum number of times, the rumor has
//! "cooled off". At that point we'll stop sending that rumor to the
//! member; by now they will have heard it!
//!
//! Note that the "heat" of a rumor is tracked *per member*, and is
//! not global.
// Standard Library
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
// Internal Modules
use rumor::RumorKey;
// TODO (CM): Can we key by member instead? What do we do more frequently?
// TODO (CM): Might want to type the member ID explicitly
// TODO (CM): what do we do with rumors that have officially
// "cooled off"? Can we just remove them?
/// The number of times a rumor will be shared before it goes cold for
/// that member.
// NOTE: This doesn't strictly need to be public, but making it so allows it
// to be present in generated documentation (the documentation strings
// of the functions in this module make reference to it).
pub const RUMOR_COOL_DOWN_LIMIT: usize = 2;
/// Tracks the number of times a given rumor has been sent to each
/// member of the supervision ring. This models the "heat" of a
/// rumor; if a member has never heard it, it's "hot", but it "cools
/// off" with each successive hearing.
///
/// When a rumor changes, we can effectively reset things by starting
/// the rumor mill up again. This will zero out all counters for every
/// member, starting the sharing cycle over again.
#[derive(Debug, Clone)]
pub struct RumorHeat(Arc<RwLock<HashMap<RumorKey, HashMap<String, usize>>>>);
impl RumorHeat {
/// Add a rumor to track; members will see it as "hot".
///
/// If the rumor was already being tracked, we reset all
/// previously-recorded "heat" information; the rumor is once
/// again "hot" for _all_ members.
pub fn start_hot_rumor<T: Into<RumorKey>>(&self, rumor: T) {
let rk: RumorKey = rumor.into();
let mut rumors = self.0.write().expect("RumorHeat lock poisoned");
rumors.insert(rk, HashMap::new());
}
/// Return a list of currently "hot" rumors for the specified
/// member. This will be the subset of all rumors being tracked
/// which have not already been sent to the member more than
/// `RUMOR_COOL_DOWN_LIMIT` times.
///
/// These rumors will be sorted by their "heat"; coldest rumors
/// first, hotter rumors later. That is, rumors that have been
/// shared `RUMOR_COOL_DOWN_LIMIT - 1` times will come first,
/// followed by those that have been shared `RUMOR_COOL_DOWN_LIMIT
/// -2` times, and so on, with those that have _never_ been
/// shared with the member coming last.
///
/// **NOTE**: The ordering of rumors within each of these "heat"
/// cohorts is currently undefined.
pub fn currently_hot_rumors(&self, id: &str) -> Vec<RumorKey> {
let mut rumor_heat: Vec<(RumorKey, usize)> = self
.0
.read()
.expect("RumorHeat lock poisoned")
.iter()
.map(|(k, heat_map)| (k.clone(), heat_map.get(id).unwrap_or(&0).clone()))
.filter(|&(_, heat)| heat < RUMOR_COOL_DOWN_LIMIT)
.collect();
// Reverse sorting by heat; 0s come last!
rumor_heat.sort_by(|&(_, ref h1), &(_, ref h2)| h2.cmp(h1));
// We don't need the heat anymore, just return the rumors.
rumor_heat.into_iter().map(|(k, _)| k).collect()
}
/// For each rumor given, "cool" the rumor for the given member by
/// incrementing the count for how many times it has been sent
/// out. As a rumor cools, it will eventually cross a threshold
/// past which it will no longer be gossipped to the member.
///
/// Call this after sending rumors out across the network.
///
/// **NOTE**: "cool" in the name of the function is a *verb*; you're
/// not going to get a list of cool rumors from this.
pub fn cool_rumors(&self, id: &str, rumors: &[RumorKey]) {
if rumors.len() > 0 {
let mut rumor_map = self.0.write().expect("RumorHeat lock poisoned");
for ref rk in rumors {
if rumor_map.contains_key(&rk) {
let heat_map = rumor_map.get_mut(&rk).unwrap();
if heat_map.contains_key(id) {
let heat = heat_map.get_mut(id).unwrap();
*heat += 1;
} else {
heat_map.insert(String::from(id), 1);
}
} else {
debug!(
"Rumor does not exist in map; was probably deleted between retrieval \
and sending"
);
}
}
}
}
}
impl Default for RumorHeat {
fn default() -> RumorHeat {
RumorHeat(Arc::new(RwLock::new(HashMap::new())))
}
}
#[cfg(test)]
mod tests {
use super::*;
use error::Result;
use protocol::{self, newscast};
use rumor::{Rumor, RumorKey, RumorType};
use uuid::Uuid;
// TODO (CM): This FakeRumor implementation is copied from
// rumor.rs; factor this helper code better.
#[derive(Clone, Debug, Serialize)]
struct FakeRumor {
pub id: String,
pub key: String,
}
impl Default for FakeRumor {
fn default() -> FakeRumor {
FakeRumor {
id: format!("{}", Uuid::new_v4().to_simple_ref()),
key: String::from("fakerton"),
}
}
}
impl Rumor for FakeRumor {
fn kind(&self) -> RumorType {
RumorType::Fake
}
fn key(&self) -> &str {
&self.key
}
fn id(&self) -> &str {
&self.id
}
fn merge(&mut self, mut _other: FakeRumor) -> bool {
false
}
}
impl protocol::FromProto<newscast::Rumor> for FakeRumor {
fn from_proto(_other: newscast::Rumor) -> Result<Self> {
Ok(FakeRumor::default())
}
}
impl From<FakeRumor> for newscast::Rumor {
fn from(_other: FakeRumor) -> newscast::Rumor {
newscast::Rumor::default()
}
}
impl protocol::Message<newscast::Rumor> for FakeRumor {
fn from_bytes(_bytes: &[u8]) -> Result<Self> {
Ok(FakeRumor::default())
}
fn write_to_bytes(&self) -> Result<Vec<u8>> {
Ok(Vec::from(format!("{}-{}", self.id, self.key).as_bytes()))
}
}
/// Helper function that tests that a given rumor is currently
/// considered "hot" for the given member.
fn assert_rumor_is_hot<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.contains(&key));
}
/// Helper function that tests that a given rumor is currently
/// NOT considered "hot" for the given member.
fn assert_rumor_is_cold<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let key = rumor.into();
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&key));
}
/// Helper function that takes a rumor that has already been
/// introduced into the `RumorHeat` and cools it enough to no
/// longer be considered "hot".
fn cool_rumor_completely<T>(heat: &RumorHeat, member_id: &str, rumor: T)
where
T: Into<RumorKey>,
{
let rumor_keys = &[rumor.into()];
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
heat.cool_rumors(&member_id, rumor_keys);
}
}
#[test]
fn there_are_no_hot_rumors_to_begin_with() {
let heat = RumorHeat::default();
let member_id = "test_member";
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(hot_rumors.is_empty());
}
#[test]
fn a_hot_rumor_is_returned_as_such() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert_eq!(hot_rumors.len(), 1);
assert_eq!(hot_rumors[0], RumorKey::from(&rumor));
}
#[test]
fn a_hot_rumor_eventually_cools_off() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
let rumor_key = RumorKey::from(&rumor);
let rumor_keys = &[rumor_key.clone()];
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
//
// Not using the helper function here, as this function is
// what this test is actually testing.
for _ in 0..RUMOR_COOL_DOWN_LIMIT {
assert_rumor_is_hot(&heat, &member_id, &rumor);
heat.cool_rumors(&member_id, rumor_keys);
}
// At this point, our member should have heard this rumor
// enough that it's no longer hot
let hot_rumors = heat.currently_hot_rumors(&member_id);
assert!(!hot_rumors.contains(&rumor_key));
}
#[test]
fn rumors_can_become_hot_again_by_restarting_them() {
let heat = RumorHeat::default();
let member_id = "test_member";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Simulate going through the requisite number of gossip
// cycles to cool the rumor down
cool_rumor_completely(&heat, &member_id, &rumor);
// At this point, our member should have heard this rumor
// enough that it's no longer hot
assert_rumor_is_cold(&heat, &member_id, &rumor);
// NOW we'll start the rumor again!
heat.start_hot_rumor(&rumor);
// Rumors... *so hot right now*
assert_rumor_is_hot(&heat, &member_id, &rumor);
}
#[test]
fn rumor_heat_is_tracked_per_member() {
let heat = RumorHeat::default();
let member_one = "test_member_1";
let member_two = "test_member_2";
let rumor = FakeRumor::default();
heat.start_hot_rumor(&rumor);
// Both members should see the rumor as hot.
assert_rumor_is_hot(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
// Now, let's cool the rumor for only one of the members
cool_rumor_completely(&heat, &member_one, &rumor);
// Now it should be cold for the one member, but still hot
// for the other.
assert_rumor_is_cold(&heat, &member_one, &rumor);
assert_rumor_is_hot(&heat, &member_two, &rumor);
}
#[test]
fn hot_rumors_are_sorted_colder_to_warmer() {
let heat = RumorHeat::default();
let member = "test_member";
// TODO (CM): for ease of test reading (esp. with failures), I'd like fake
// rumors that I can control the IDs
let hot_rumor = FakeRumor::default();
let warm_rumor = FakeRumor::default();
let cold_rumor = FakeRumor::default();
// Start all rumors off as hot
heat.start_hot_rumor(&hot_rumor);
heat.start_hot_rumor(&warm_rumor);
heat.start_hot_rumor(&cold_rumor);
// Cool some rumors off, to varying degrees
let hot_key = RumorKey::from(&hot_rumor);
let warm_key = RumorKey::from(&warm_rumor);
// Freeze this one right out
cool_rumor_completely(&heat, &member, &cold_rumor);
// Cool this one off just a little bit
heat.cool_rumors(&member, &[warm_key.clone()]);
// cold_rumor should be completely out, and the cooler
// rumor sorts before the hotter one.
let rumors = heat.currently_hot_rumors(&member);
let expected_hot_rumors = &[warm_key.clone(), hot_key.clone()];
assert_eq!(rumors, expected_hot_rumors);
}
}
|
use async_dup::{Arc, Mutex};
use async_std::io::{Read, Result, Write};
use async_std::net::TcpStream;
use async_tls::server::TlsStream;
use std::pin::Pin;
use std::task::{Context, Poll};
#[derive(Clone)]
pub(crate) struct TlsStreamWrapper(Arc<Mutex<TlsStream<TcpStream>>>);
impl TlsStreamWrapper {
pub(crate) fn new(stream: TlsStream<TcpStream>) -> Self {
Self(Arc::new(Mutex::new(stream)))
}
}
impl Read for TlsStreamWrapper {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<Result<usize>> {
Pin::new(&mut &*self.0).poll_read(cx, buf)
}
}
impl Write for TlsStreamWrapper {
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize>> {
Pin::new(&mut &*self.0).poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
Pin::new(&mut &*self.0).poll_flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
Pin::new(&mut &*self.0).poll_close(cx)
}
}
|
use super::*;
#[derive(Debug, PartialEq)]
pub struct UnaryOp {
pub op: UnaryOpKind,
pub rhs: Box<Node>,
}
#[derive(Debug, PartialEq)]
pub enum UnaryOpKind {
/// ~
BitNot,
/// +
Positive,
/// -
Negative,
/// !
LogicalNot,
}
impl From<char> for UnaryOpKind {
fn from(c: char) -> Self {
match c {
'~' => Self::BitNot,
'+' => Self::Positive,
'-' => Self::Negative,
'!' => Self::LogicalNot,
_ => unreachable!(),
}
}
}
|
mod app;
mod config;
mod subcommands;
use log::error;
use std::process;
fn main() {
let matches = app::init();
app::init_logger(&matches);
let mut commands = match config::read() {
Ok(c) => c,
Err(e) => {
error!("{}", e);
process::exit(1);
}
};
app::handle_subcommands(&matches, &mut commands);
if let Err(e) = config::write(&commands) {
error!("{}", e);
};
}
|
//! The application domain.
//!
//! The domain is separated in two main areas:
//! * catalog: contains the basic information for a railway models;
//! * collecting: everything related to collecting models, collections and wishlists.
pub mod catalog;
pub mod collecting;
|
use yew::{
format::{
Json,
// Nothing
},
prelude::*,
services::fetch::{FetchService, FetchTask, Request, Response},
};
use yew_router::components::RouterAnchor;
use crate::app::AppRoute;
use yewtil::NeqAssign;
use crate::store::reducer_account::{
AppDispatch,
DataAccountAction,
// DataAccount,
};
use crate::types::{
ResponseLogin,
LocalStorage,
LOCALSTORAGE_KEY,
};
use yewdux::dispatch::Dispatcher;
// use crate::app::AppRoute;
use yew_router::service::RouteService;
// use yew_router
use yew::services::{
ConsoleService,
storage::{ StorageService, Area },
};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Serialize, Debug, Clone)]
pub struct RequestLogin {
email: String,
password: String,
}
pub struct LoginPage {
fetch_task: Option<FetchTask>,
form_data: RequestLogin,
error: Option<String>,
// user: Option<User>,
link: ComponentLink<Self>,
dispatch: AppDispatch,
route_service: RouteService,
}
pub enum Msg {
Login,
LoginResponse(Result<ResponseLogin, anyhow::Error>),
EditEmail(String),
EditPassword(String),
}
impl Component for LoginPage {
type Message = Msg;
type Properties = AppDispatch;
fn create(dispatch: Self::Properties, link: ComponentLink<Self>) -> Self {
let form_data = RequestLogin {
email: String::from(""),
password: String::from(""),
};
LoginPage {
fetch_task: None,
form_data,
error: None,
// user: None,
link,
dispatch,
route_service: RouteService::new(),
}
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
use Msg::*;
match msg {
Login => {
// ConsoleService::info(&self.form_data.email);
let request = Request::post("http://localhost:3000/user")
.header("Content-Type", "application/json")
.body(Json(&self.form_data))
// .body(Nothing)
.expect("Could not build request.");
let callback =
self.link
.callback(|response: Response<Json<Result<ResponseLogin, anyhow::Error>>>| {
let Json(data) = response.into_body();
Msg::LoginResponse(data)
});
let task = FetchService::fetch(request, callback).expect("failed to start request");
self.fetch_task = Some(task);
true
}
LoginResponse(response) => {
match response {
Ok(data) => {
ConsoleService::info("response ok");
ConsoleService::info(&format!("{:?}", data));
// ConsoleService::info(&data.email.clone());
// self.user = Some(data.clone());
// UPDATE REDUCER
let newdata = ResponseLogin {
username: String::from(data.username.clone()),
email: String::from(data.email.clone()),
token: String::from(data.token.clone()),
};
self.dispatch.send(DataAccountAction::Update(newdata));
// SET LOCALSTORAGE
let mut storage = StorageService::new(Area::Local).expect("storage was disabled");
let user_data = LocalStorage{
username: Some(data.username),
email: Some(data.email),
token: Some(data.token),
};
let localstorage_data = Json(&user_data);
// // let localstorage_data: Result<String, anyhow::Error> = Ok(String::from("tokendata_telkomdomain"));
storage.store(LOCALSTORAGE_KEY, localstorage_data);
// REDIRECT ROUTE
// let router = RouteService::new();
self.route_service.set_route("/apis", ());
// router.set_route(AppRoute::ApisHome, );
// yew_router::push_route(AppRoute::ApisHome);
}
Err(error) => {
ConsoleService::info("response error");
ConsoleService::info(&error.to_string());
self.error = Some(error.to_string())
}
}
self.fetch_task = None;
true
}
EditEmail(email) => {
self.form_data.email = email;
true
}
EditPassword(password) => {
self.form_data.password = password;
true
}
}
}
fn change(&mut self, dispatch: Self::Properties) -> ShouldRender {
self.dispatch.neq_assign(dispatch)
}
fn view(&self) -> Html {
type Anchor = RouterAnchor<AppRoute>;
let is_fetching:bool = self.fetch_task.is_some();
html! {
<div style="background-color: #dee2e6; min-height: 100vh;">
<div class="login-page form-signin border">
<div class="text-center" style="background-color: white; margin-top: 7vh;">
<div class="logo-image">
<img src="https://www.telkom.co.id/data/image_upload/page/1594112895830_compress_PNG%20Icon%20Telkom.png"
alt="telAuth"
style="width: 80px; height: 80px;"/>
</div>
<form>
<h1 class="h3 mb-3 fw-normal">{"TelAuth"}</h1>
<h1 class="h5 mb-2 fw-normal fs-6">{"Login to TelAuth to continue"}</h1>
<div
class="form-floating m-auto w-75 d-flex justify-content-center mt-4"
>
<input
type="email"
class="d-flex form-control"
id={"floatingInput"}
placeholder="name@example.com"
value=self.form_data.email.clone()
oninput=self.link.callback(|data: InputData| Msg::EditEmail(data.value))
/>
<label for="floatingInput">{"Email address"}</label>
</div>
<div
class="form-floating m-auto w-75 d-flex justify-content-center mt-4"
>
<input
type="password"
class="d-flex form-control"
id={"floatingInput"}
placeholder="password"
value=self.form_data.password.clone()
oninput=self.link.callback(|data: InputData| Msg::EditPassword(data.value))
/>
<label for="floatingInput">{"Password"}</label>
</div>
<button
type="button"
onclick=self.link.callback(|_| Msg::Login)
class="w-75 btn btn-lg btn-primary mt-3 fs-6"
// class={
// if is_fetching {
// } else {
// "w-75 btn btn-lg btn-primary mt-3 fs-6"
// }
// }
disabled={ if is_fetching {true} else {false} }
>
{
if is_fetching {
html! {
<div class="d-flex justify-content-center">
<div class="spinner-border" role="status">
<span class="visually-hidden">{"Loading..."}</span>
</div>
</div>
}
} else {
html! {
"Continue"
}
}
}
</button>
<button
type="button"
class="w-75 btn btn-lg btn-secondary mt-3 fs-6"
>
<Anchor
route=AppRoute::RegisterPage
classes="text-decoration-none text-light px-2 link-primary pe-auto"
>
{"Register"}
</Anchor>
</button>
{
if let Some(ref error) = self.error {
html! {
<p class="mt-3 text-danger">
{ error.clone() }
</p>
}
} else {
html! { }
}
}
// { self.view_user() }
<h1 class="h3 mt-3 mb-1 fw-normal" style="font-family: fakt-web, Helvetica Neue, Helvetica, sans-serif;">{"or"}</h1>
<button class="w-75 btn btn-lg btn-outline-dark mt-3 fs-6" type="submit">
<div class="text-start">
<span>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-linkedin" viewBox="0 0 16 16">
<path d="M0 1.146C0 .513.526 0 1.175 0h13.65C15.474 0 16 .513 16 1.146v13.708c0 .633-.526 1.146-1.175 1.146H1.175C.526 16 0 15.487 0 14.854V1.146zm4.943 12.248V6.169H2.542v7.225h2.401zm-1.2-8.212c.837 0 1.358-.554 1.358-1.248-.015-.709-.52-1.248-1.342-1.248-.822 0-1.359.54-1.359 1.248 0 .694.521 1.248 1.327 1.248h.016zm4.908 8.212V9.359c0-.216.016-.432.08-.586.173-.431.568-.878 1.232-.878.869 0 1.216.662 1.216 1.634v3.865h2.401V9.25c0-2.22-1.184-3.252-2.764-3.252-1.274 0-1.845.7-2.165 1.193v.025h-.016a5.54 5.54 0 0 1 .016-.025V6.169h-2.4c.03.678 0 7.225 0 7.225h2.4z"/>
</svg>
</span>
<span class="ms-4">{"Login with LinkedIn"}</span>
</div>
</button>
<button class="w-75 btn btn-lg btn-outline-dark mt-3 fs-6" type="submit">
<div class="text-start">
<span>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-github" viewBox="0 0 16 16">
<path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.012 8.012 0 0 0 16 8c0-4.42-3.58-8-8-8z"/>
</svg>
</span>
<span class="ms-4">{"Login with Github"}</span>
</div>
</button>
<button class="w-75 btn btn-lg btn-outline-dark mt-3 mb-3 fs-6" type="submit">
<div class="text-start">
<span>
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-google" viewBox="0 0 16 16">
<path d="M15.545 6.558a9.42 9.42 0 0 1 .139 1.626c0 2.434-.87 4.492-2.384 5.885h.002C11.978 15.292 10.158 16 8 16A8 8 0 1 1 8 0a7.689 7.689 0 0 1 5.352 2.082l-2.284 2.284A4.347 4.347 0 0 0 8 3.166c-2.087 0-3.86 1.408-4.492 3.304a4.792 4.792 0 0 0 0 3.063h.003c.635 1.893 2.405 3.301 4.492 3.301 1.078 0 2.004-.276 2.722-.764h-.003a3.702 3.702 0 0 0 1.599-2.431H8v-3.08h7.545z"/>
</svg>
</span>
<span class="ms-4">{"Login with Google"}</span>
</div>
</button>
</form>
</div>
</div>
</div>
}
}
}
// fn show_error () |
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::Mutex;
use tokio_stream::Stream;
use tonic::{Request, Response, Status};
use common::t_service_server::TService;
use common::*;
use crate::client_cert::ClientName;
use crate::job::Job;
use crate::output_stream;
type BoxStream<T> = Pin<Box<dyn Stream<Item = Result<T, Status>> + Send + Sync + 'static>>;
/// Enforce authentication, return client CN from the certificate
fn authenticate<T>(request: &Request<T>) -> Result<ClientName, Status> {
match ClientName::from_request(&request) {
Some(name) => {
log::info!("Authenticated as {:?}", name);
Ok(name)
}
None => {
log::warn!("Client certificate missing");
Err(Status::unauthenticated("Client certificate missing"))
}
}
}
/// Enforce authorization
fn verify_authorized(client_name: &ClientName, job: &Job) -> Result<(), Status> {
if &job.owner != client_name {
log::warn!(
"Client {:?} tried to access a job without permission",
client_name
);
return Err(Status::permission_denied("Job is owned by another user"));
}
Ok(())
}
pub struct TServiceImpl {
state: Mutex<HashMap<JobId, Job>>,
}
impl TServiceImpl {
pub fn new() -> Self {
Self {
state: Mutex::new(HashMap::new()),
}
}
/// Access job by id.
fn with_job<F, R>(&self, jobid: JobId, mut f: F) -> Result<R, Status>
where
F: FnMut(&mut Job) -> Result<R, Status>,
{
let mut jobs = self.state.lock().unwrap();
if let Some(job) = jobs.get_mut(&jobid) {
f(job)
} else {
Err(Status::not_found("No such job"))
}
}
/// Access job by target id.
/// Handles job id parsing and verifies authorization automatically.
fn target_job<F, R>(
&self,
target_jobid: TargetJobId,
client_name: &ClientName,
mut f: F,
) -> Result<R, Status>
where
F: FnMut(&mut Job) -> Result<R, Status>,
{
if let Ok(jobid) = JobId::from_bytes(&target_jobid.jobid) {
self.with_job(jobid, |job| {
verify_authorized(&client_name, job)?;
f(job)
})
} else {
Err(Status::invalid_argument("JobId"))
}
}
}
impl Default for TServiceImpl {
fn default() -> Self {
Self::new()
}
}
#[tonic::async_trait]
impl TService for TServiceImpl {
/// Spawn a new job
async fn start(
&self,
request: Request<JobStartRequest>,
) -> Result<Response<TargetJobId>, Status> {
let client_name = authenticate(&request)?;
match Job::spawn(client_name, request.into_inner()) {
Ok(job) => {
let jobid = JobId::new();
let mut jobs = self.state.lock().unwrap();
jobs.insert(jobid, job);
Ok(Response::new(jobid.into()))
}
Err(msg) => Err(Status::failed_precondition(msg)),
}
}
/// Starts killing the child process, but doesn't wait until it's actually stopped
async fn stop(
&self,
request: Request<TargetJobId>,
) -> Result<Response<StopSignalSent>, Status> {
let client_name = authenticate(&request)?;
self.target_job(request.into_inner(), &client_name, |job| {
job.start_kill();
Ok(Response::new(StopSignalSent {}))
})
}
/// Get status of a job
async fn status(&self, request: Request<TargetJobId>) -> Result<Response<JobStatus>, Status> {
let client_name = authenticate(&request)?;
self.target_job(request.into_inner(), &client_name, |job| {
Ok(Response::new(job.status()))
})
}
type OutputStream = BoxStream<OutputEvent>;
/// Stream output of a job
async fn output(
&self,
request: Request<TargetJobId>,
) -> Result<Response<Self::OutputStream>, Status> {
let client_name = authenticate(&request)?;
let (tx, rx) = tokio::sync::mpsc::channel(2);
self.target_job(request.into_inner(), &client_name, |job| {
verify_authorized(&client_name, job)?;
output_stream::stream_to(job.stdout.clone(), tx.clone());
output_stream::stream_to(job.stderr.clone(), tx.clone());
Ok(())
})?;
let s = tokio_stream::wrappers::ReceiverStream::new(rx);
Ok(Response::new(Box::pin(s)))
}
}
|
pub mod error;
pub mod language;
pub mod parser;
pub trait NodeExt {
fn matches_subtypes(&self, supertype_id: u16, subtype_ids: &[u16]) -> bool;
}
impl<'tree> NodeExt for tree_sitter::Node<'tree> {
fn matches_subtypes(&self, supertype_id: u16, subtype_ids: &[u16]) -> bool {
if let Some(child) = self.named_child(0) {
supertype_id == self.kind_id() && subtype_ids.contains(&child.kind_id())
} else {
false
}
}
}
|
mod manifest;
pub type Specifier {
type Id;
fn matches(&self, id: &Self::Id) -> bool;
}
|
use criterion::{criterion_group, criterion_main, Criterion};
use day08::{part1, part2};
fn part1_benchmark(c: &mut Criterion) {
let input = include_str!("../../input/2018/day8.txt");
c.bench_function("part1", move |b| b.iter(|| part1(&input)));
}
fn part2_benchmark(c: &mut Criterion) {
let input = include_str!("../../input/2018/day8.txt");
c.bench_function("part2", move |b| b.iter(|| part2(&input)));
}
criterion_group!(benches, part1_benchmark, part2_benchmark);
criterion_main!(benches);
|
pub mod transform;
pub mod camera;
pub mod mesh;
pub mod light;
pub mod audio;
pub mod alarm;
pub mod singleton_component_manager;
pub mod struct_component_manager;
pub mod collider;
use ecs::*;
use engine::*;
use self::struct_component_manager::StructComponentManager;
use std::boxed::FnBox;
use std::fmt::Debug;
use std::ops::{Deref, DerefMut};
pub use self::singleton_component_manager::SingletonComponentManager;
pub use self::transform::{Transform, TransformManager};
pub use self::camera::{Camera, CameraManager};
pub use self::mesh::{Mesh, MeshManager};
pub use self::light::{Light, LightManager};
pub use self::audio::{AudioSource, AudioSourceManager, AudioSystem};
pub use self::alarm::{AlarmId, AlarmManager, alarm_update};
pub use self::collider::{Collider, ColliderManager, CollisionSystem, bounding_volume, grid_collision};
#[derive(Debug, Clone)]
pub struct DefaultManager<T>(StructComponentManager<T>)
where T: Component + Clone + Debug,
T::Message: Message<Target=T>;
impl<T> DefaultManager<T>
where T: Component<Manager=DefaultManager<T>> + Clone + Debug,
T::Message: Message<Target=T>,
{
pub fn new() -> DefaultManager<T> {
DefaultManager(StructComponentManager::new())
}
}
impl<T> ComponentManagerBase for DefaultManager<T>
where T: Component<Manager=DefaultManager<T>> + Clone + Debug,
T::Message: Message<Target=T>,
{
fn update(&mut self) {
self.0.process_messages();
}
}
impl<T> ComponentManager for DefaultManager<T>
where T: Component<Manager=DefaultManager<T>> + Clone + Debug,
T::Message: Message<Target=T>,
{
type Component = T;
fn register(builder: &mut EngineBuilder) {
builder.register_manager(Self::new());
}
fn get(&self, entity: Entity) -> Option<&Self::Component> {
self.0.get(entity)
}
fn destroy(&self, entity: Entity) {
self.0.destroy(entity);
}
}
impl<T> Deref for DefaultManager<T>
where T: Component<Manager=DefaultManager<T>> + Clone + Debug,
T::Message: Message<Target=T>,
{
type Target = StructComponentManager<T>;
fn deref(&self) -> &StructComponentManager<T> {
&self.0
}
}
impl<T> DerefMut for DefaultManager<T>
where T: Component<Manager=DefaultManager<T>> + Clone + Debug,
T::Message: Message<Target=T>,
{
fn deref_mut(&mut self) -> &mut StructComponentManager<T> {
&mut self.0
}
}
pub struct DefaultMessage<T>(Box<FnBox(&mut T)>)
where T: Component;
impl<T: Component<Message=DefaultMessage<T>>> Message for DefaultMessage<T> {
type Target = T;
fn apply(self, component: &mut T) {
let inner = self.0;
inner.call_once((component,));
}
}
impl<T, U> From<U> for DefaultMessage<T>
where T: Component,
U: 'static + FnOnce(&mut T),
{
fn from(callback: U) -> DefaultMessage<T> {
DefaultMessage(Box::new(callback))
}
}
|
fn main() {
let stdin = std::io::stdin();
let mut rd = ProconReader::new(stdin.lock());
let n: u64 = rd.get();
let x: usize = rd.get();
let m: usize = rd.get();
let b = 34;
let mut nxt = vec![vec![0; m]; b];
// dp[i][j]: x = j から始めたときの a[1] + a[2] + ... + a[2^i]
let mut dp = vec![vec![0; m]; b];
for y in 0..m {
nxt[0][y] = (y * y) % m;
dp[0][y] = y;
}
for i in 1..b {
for j in 0..m {
nxt[i][j] = nxt[i - 1][nxt[i - 1][j]];
dp[i][j] = dp[i - 1][j] + dp[i - 1][nxt[i - 1][j]];
}
}
let mut y = x;
let mut ans = 0;
for i in 0..b {
if (n >> i & 1) == 1 {
ans += dp[i][y];
y = nxt[i][y];
}
}
println!("{}", ans);
}
pub struct ProconReader<R: std::io::Read> {
reader: R,
}
impl<R: std::io::Read> ProconReader<R> {
pub fn new(reader: R) -> Self {
Self { reader }
}
pub fn get<T: std::str::FromStr>(&mut self) -> T {
use std::io::Read;
let buf = self
.reader
.by_ref()
.bytes()
.map(|b| b.unwrap())
.skip_while(|&byte| byte == b' ' || byte == b'\n' || byte == b'\r')
.take_while(|&byte| byte != b' ' && byte != b'\n' && byte != b'\r')
.collect::<Vec<_>>();
std::str::from_utf8(&buf)
.unwrap()
.parse()
.ok()
.expect("Parse Error.")
}
}
|
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(name = "readlater", about, author)]
pub struct Args {
#[structopt(subcommand)]
pub cmd: Command,
#[structopt(short, long)]
pub verbose: bool,
}
#[derive(Debug, StructOpt)]
pub enum Command {
#[structopt(name = "newsboat")]
Newsboat(Newsboat),
#[structopt(name = "epub")]
Epub(Epub),
#[structopt(name = "rss")]
Rss(Rss),
#[structopt(name = "cleanup")]
Cleanup(Cleanup),
#[structopt(name = "article")]
Article(Article),
}
#[derive(Debug, StructOpt)]
pub struct Newsboat {
/// URL of the article
pub url: String,
/// Title of the article
pub title: Option<String>,
/// Description of the article
pub desc: Option<String>,
/// Feed title
pub feed_title: Option<String>,
}
#[derive(Debug, StructOpt)]
pub struct Epub {
/// Write an epub file to the given file (given locally cached articles)
#[structopt(name = "filename")]
pub epub: String,
}
#[derive(Debug, StructOpt)]
pub struct Rss {
/// Write an rss file to the given file (given locally cached articles)
#[structopt(name = "filename")]
pub rss: String,
}
#[derive(Debug, StructOpt)]
pub struct Cleanup {
/// Cleanup cached articles based on created time.
#[structopt(name = "days")]
pub days: u8,
}
#[derive(Debug, StructOpt)]
pub struct Article {
/// Save a readable version of the article
#[structopt(name = "url")]
pub url: String,
}
|
#[doc = "Reader of register IM"]
pub type R = crate::R<u32, super::IM>;
#[doc = "Writer for register IM"]
pub type W = crate::W<u32, super::IM>;
#[doc = "Register IM `reset()`'s with value 0"]
impl crate::ResetValue for super::IM {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `FPIDCIM`"]
pub type FPIDCIM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FPIDCIM`"]
pub struct FPIDCIM_W<'a> {
w: &'a mut W,
}
impl<'a> FPIDCIM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `FPDZCIM`"]
pub type FPDZCIM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FPDZCIM`"]
pub struct FPDZCIM_W<'a> {
w: &'a mut W,
}
impl<'a> FPDZCIM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `FPIOCIM`"]
pub type FPIOCIM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FPIOCIM`"]
pub struct FPIOCIM_W<'a> {
w: &'a mut W,
}
impl<'a> FPIOCIM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `FPUFCIM`"]
pub type FPUFCIM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FPUFCIM`"]
pub struct FPUFCIM_W<'a> {
w: &'a mut W,
}
impl<'a> FPUFCIM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `FPOFCIM`"]
pub type FPOFCIM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FPOFCIM`"]
pub struct FPOFCIM_W<'a> {
w: &'a mut W,
}
impl<'a> FPOFCIM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `FPIXCIM`"]
pub type FPIXCIM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FPIXCIM`"]
pub struct FPIXCIM_W<'a> {
w: &'a mut W,
}
impl<'a> FPIXCIM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
impl R {
#[doc = "Bit 0 - Floating-Point Input Denormal Exception Interrupt Mask"]
#[inline(always)]
pub fn fpidcim(&self) -> FPIDCIM_R {
FPIDCIM_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Floating-Point Divide By 0 Exception Interrupt Mask"]
#[inline(always)]
pub fn fpdzcim(&self) -> FPDZCIM_R {
FPDZCIM_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Floating-Point Invalid Operation Interrupt Mask"]
#[inline(always)]
pub fn fpiocim(&self) -> FPIOCIM_R {
FPIOCIM_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Floating-Point Underflow Exception Interrupt Mask"]
#[inline(always)]
pub fn fpufcim(&self) -> FPUFCIM_R {
FPUFCIM_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Floating-Point Overflow Exception Interrupt Mask"]
#[inline(always)]
pub fn fpofcim(&self) -> FPOFCIM_R {
FPOFCIM_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Floating-Point Inexact Exception Interrupt Mask"]
#[inline(always)]
pub fn fpixcim(&self) -> FPIXCIM_R {
FPIXCIM_R::new(((self.bits >> 5) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Floating-Point Input Denormal Exception Interrupt Mask"]
#[inline(always)]
pub fn fpidcim(&mut self) -> FPIDCIM_W {
FPIDCIM_W { w: self }
}
#[doc = "Bit 1 - Floating-Point Divide By 0 Exception Interrupt Mask"]
#[inline(always)]
pub fn fpdzcim(&mut self) -> FPDZCIM_W {
FPDZCIM_W { w: self }
}
#[doc = "Bit 2 - Floating-Point Invalid Operation Interrupt Mask"]
#[inline(always)]
pub fn fpiocim(&mut self) -> FPIOCIM_W {
FPIOCIM_W { w: self }
}
#[doc = "Bit 3 - Floating-Point Underflow Exception Interrupt Mask"]
#[inline(always)]
pub fn fpufcim(&mut self) -> FPUFCIM_W {
FPUFCIM_W { w: self }
}
#[doc = "Bit 4 - Floating-Point Overflow Exception Interrupt Mask"]
#[inline(always)]
pub fn fpofcim(&mut self) -> FPOFCIM_W {
FPOFCIM_W { w: self }
}
#[doc = "Bit 5 - Floating-Point Inexact Exception Interrupt Mask"]
#[inline(always)]
pub fn fpixcim(&mut self) -> FPIXCIM_W {
FPIXCIM_W { w: self }
}
}
|
#![crate_name = "cql"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#[macro_use]
extern crate enum_primitive as ep;
extern crate mio;
extern crate eventual;
extern crate uuid;
extern crate bytes;
pub use cluster::Cluster;
pub use def::Consistency;
pub use def::BatchType;
pub use def::CqlValue;
pub use def::CqlValue::*;
pub use def::CqlEventType;
pub use def::CQLList;
pub use def::CQLMap;
pub use def::CQLSet;
pub use def::Query::QueryStr;
pub use def::Query::QueryPrepared;
pub use def::OpcodeResponse;
pub use def::CqlResponseBody;
pub use error::*;
pub use error::RCErrorType::*;
pub use reader::CqlReader;
pub use def::CassFuture;
use def::CqlResponse;
pub use def::CqlBytesSize;
pub use def::CqlBytesSize::*;
pub use load_balancing::BalancerType;
pub use load_balancing::BalancerType::*;
#[macro_export]
macro_rules! try_bo(
($call: expr, $msg: expr) => {
match $call {
Ok(val) => val,
Err(self::byteorder::Error::UnexpectedEOF) => return Err($crate::error::RCError::new(format!("{} -> {}", $msg, "Unexpected EOF"), $crate::error::RCErrorType::IOError)),
Err(self::byteorder::Error::Io(ref err)) => {
use std::error::Error;
return Err($crate::error::RCError::new(format!("{} -> {}", $msg, err.description()), $crate::error::RCErrorType::IOError))
}
};
}
);
#[macro_export]
macro_rules! try_io(
($call: expr, $msg: expr) => {
match $call {
Ok(val) => val,
Err(ref err) => {
use std::error::Error;
return Err(RCError::new(format!("{} -> {}", $msg, err.description()), RCErrorType::IOError))
}
};
}
);
#[macro_export]
macro_rules! try_rc(
($call: expr, $msg: expr) => {
match $call {
Ok(val) => val,
Err(ref err) => return Err($crate::error::RCError::new(format!("{} -> {}", $msg, err.description()), $crate::error::RCErrorType::IOError))
};
}
);
macro_rules! try_rc_length(
($call: expr, $msg: expr) => {
match $call {
Ok(-1) => return Ok(None),
Ok(val) => val,
Err(ref err) => return Err($crate::error::RCError::new(format!("{} -> {}", $msg, err.description()), $crate::error::RCErrorType::IOError))
};
}
);
macro_rules! try_rc_noption(
($call: expr, $msg: expr) => {
match $call {
Ok(option) => match option {
None => return Err($crate::error::RCError::new(format!("{} -> {}", $msg, "No data found (length == -1)"), $crate::error::RCErrorType::IOError)),
Some(val) => val
},
Err(ref err) => return Err($crate::error::RCError::new(format!("{} -> {}", $msg, err.description()), $crate::error::RCErrorType::IOError))
};
}
);
#[macro_export]
macro_rules! try_unwrap(
($call: expr) => {
match $call {
Ok(val) => val,
Err(err) => return Err($crate::error::RCError::new(format!("{:?}", err), $crate::error::RCErrorType::IOError))
};
}
);
#[macro_export]
macro_rules! try_unwrap_op(
($call: expr) => {
match $call {
Some(val) => val,
None => return Err($crate::error::RCError::new(format!(""), $crate::error::RCErrorType::IOError))
};
}
);
macro_rules! CowStr_tuple_void(
() => {
(Cow::Borrowed(""), Cow::Borrowed(""))
}
);
mod def;
mod reader;
mod serialize;
mod connection;
mod connection_pool;
mod node;
mod load_balancing;
mod util;
mod error;
pub mod cluster;
|
use crate::instructions::Opcode;
use std::result::Result;
#[derive(Default)]
pub struct VM {
/// Array of `hardware` registers
pub registers: [i32; 32],
/// Program counter
pc: usize,
/// The bytecode of the program being run
pub program: Vec<u8>,
/// Remainder of modulo division ops
remainder: u32,
/// Last comparison result
equal_flag: bool,
}
#[derive(Debug)]
pub enum Step {
Done,
Continue,
}
impl VM {
pub fn new() -> Self {
Self::default()
}
pub fn add_byte(&mut self, byte: u8) {
self.program.push(byte)
}
pub fn run(&mut self) -> Result<(), u8> {
loop {
match self.run_once() {
Err(err) => break Err(err),
Ok(Step::Done) => break Ok(()),
Ok(Step::Continue) => (),
}
}
}
pub fn run_once(&mut self) -> Result<Step, u8> {
if self.pc >= self.program.len() {
println!("pc overflow");
return Err(1);
}
match self.decode_opcode() {
Opcode::HLT => {
println!("HLT encountered");
Ok(Step::Done)
}
Opcode::IGL(opcode) => {
println!("IGL {} encountered", opcode);
Err(2)
}
Opcode::LOAD => {
let register = self.next_8_bits() as usize;
let number = self.next_16_bits() as u16;
self.registers[register] = number as i32;
Ok(Step::Continue)
}
Opcode::ADD => {
let (reg_l, reg_r) = self.read_registers_pair();
self.registers[self.next_8_bits() as usize] = reg_l + reg_r;
Ok(Step::Continue)
}
Opcode::MUL => {
let (reg_l, reg_r) = self.read_registers_pair();
// TODO: handle overflow
self.registers[self.next_8_bits() as usize] = reg_l * reg_r;
Ok(Step::Continue)
}
Opcode::SUB => {
let (reg_l, reg_r) = self.read_registers_pair();
self.registers[self.next_8_bits() as usize] = reg_l - reg_r;
Ok(Step::Continue)
}
Opcode::DIV => {
let (reg_l, reg_r) = self.read_registers_pair();
self.registers[self.next_8_bits() as usize] = reg_l / reg_r;
self.remainder = (reg_l % reg_r) as u32;
Ok(Step::Continue)
}
Opcode::JMP => {
let jump = self.read_register();
self.pc = jump as usize;
Ok(Step::Continue)
}
Opcode::JMPF => {
let value = self.read_register() as usize;
self.pc += value;
Ok(Step::Continue)
}
Opcode::JMPB => {
let value = self.read_register() as usize;
self.pc -= value;
Ok(Step::Continue)
}
Opcode::JEQ => {
let jump = self.read_register();
if self.equal_flag {
self.pc = jump as usize;
}
Ok(Step::Continue)
}
Opcode::EQ => {
self.equal_flag = {
let (reg_l, reg_r) = self.read_registers_pair();
reg_l == reg_r
};
Ok(Step::Continue)
}
Opcode::NEQ => {
self.equal_flag = {
let (reg_l, reg_r) = self.read_registers_pair();
reg_l != reg_r
};
Ok(Step::Continue)
}
Opcode::GT => {
self.equal_flag = {
let (reg_l, reg_r) = self.read_registers_pair();
reg_l > reg_r
};
Ok(Step::Continue)
}
Opcode::LT => {
self.equal_flag = {
let (reg_l, reg_r) = self.read_registers_pair();
reg_l < reg_r
};
Ok(Step::Continue)
}
Opcode::GTQ => {
self.equal_flag = {
let (reg_l, reg_r) = self.read_registers_pair();
reg_l >= reg_r
};
Ok(Step::Continue)
}
Opcode::LTQ => {
self.equal_flag = {
let (reg_l, reg_r) = self.read_registers_pair();
reg_l <= reg_r
};
Ok(Step::Continue)
}
}
}
fn decode_opcode(&mut self) -> Opcode {
let opcode = Opcode::from(self.program[self.pc]);
self.pc += 1;
opcode
}
fn next_8_bits(&mut self) -> u8 {
let result = self.program[self.pc];
self.pc += 1;
result
}
fn next_16_bits(&mut self) -> u16 {
let result = ((self.program[self.pc] as u16) << 8) | self.program[self.pc + 1] as u16;
self.pc += 2;
result
}
fn read_registers_pair(&mut self) -> (i32, i32) {
(self.read_register(), self.read_register())
}
// return value of a register that specified in the next program unit
fn read_register(&mut self) -> i32 {
self.registers[self.next_8_bits() as usize]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_create_vm() {
let test_vm = VM::new();
assert_eq!(test_vm.registers[0], 0)
}
#[test]
fn test_opcode_hlt() {
let mut test_vm = VM::new();
let test_bytes = vec![0, 0, 0, 0];
test_vm.program = test_bytes;
assert_eq!(test_vm.run(), Ok(()));
assert_eq!(test_vm.pc, 1);
}
#[test]
fn test_opcode_igl() {
let mut test_vm = VM::new();
let test_bytes = vec![200, 0, 0, 0];
test_vm.program = test_bytes;
assert_eq!(test_vm.run(), Err(2));
assert_eq!(test_vm.pc, 1);
}
#[test]
fn test_load_opcode() {
let mut test_vm = VM::new();
test_vm.program = vec![1, 0, 1, 244, 0];
assert_eq!(test_vm.run(), Ok(()));
assert_eq!(test_vm.registers[0], 500);
}
#[test]
fn test_load_add() {
let mut test_vm = VM::new();
test_vm.program = vec![1, 0, 1, 244, 1, 1, 1, 245, 2, 0, 1, 4, 0];
assert_eq!(test_vm.run(), Ok(()));
assert_eq!(test_vm.registers[4], 1001);
}
#[test]
fn test_load_sub() {
let mut test_vm = VM::new();
test_vm.program = vec![1, 0, 1, 244, 1, 1, 1, 245, 3, 1, 0, 4, 0];
assert_eq!(test_vm.run(), Ok(()));
assert_eq!(test_vm.registers[4], 1);
}
#[test]
fn test_load_mul() {
let mut test_vm = VM::new();
test_vm.program = vec![1, 0, 1, 244, 1, 1, 1, 245, 4, 1, 0, 5, 0];
assert_eq!(test_vm.run(), Ok(()));
assert_eq!(test_vm.registers[5], 250_500);
}
#[test]
fn test_load_div() {
let mut test_vm = VM::new();
test_vm.program = vec![1, 0, 1, 246, 1, 1, 1, 244, 5, 0, 1, 5, 0];
assert_eq!(test_vm.run(), Ok(()));
assert_eq!(test_vm.registers[5], 1);
assert_eq!(test_vm.remainder, 2);
}
#[test]
fn test_jmp_opcode() {
let mut test_vm = VM::new();
test_vm.registers[0] = 1;
test_vm.program = vec![6, 0, 0, 0];
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 1);
}
#[test]
fn test_jmpf_opcode() {
let mut test_vm = VM::new();
test_vm.registers[0] = 2;
test_vm.program = vec![7, 0, 0, 0, 6, 0, 0, 0];
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 4);
}
#[test]
fn test_jmpb_opcode() {
let mut test_vm = VM::new();
test_vm.registers[0] = 2;
let jmp = 4;
test_vm.program = vec![1, 0, 0, jmp, 8, 0, 0, 0];
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
let before_jmp = test_vm.pc;
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
// +2 comes from JMPB size
assert_eq!(test_vm.pc, before_jmp - jmp as usize + 2);
}
#[test]
fn test_eq_opcode() {
let mut test_vm = VM::new();
test_vm.registers[5] = 7;
test_vm.registers[6] = 7;
test_vm.registers[7] = 0;
test_vm.program = vec![0xA, 5, 6, 0xA, 5, 7];
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 3);
assert!(test_vm.equal_flag);
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 6);
assert!(!test_vm.equal_flag);
}
#[test]
fn test_neq_opcode() {
let mut test_vm = VM::new();
test_vm.registers[5] = 7;
test_vm.registers[6] = 7;
test_vm.registers[7] = 0;
test_vm.program = vec![0xB, 5, 7, 0xB, 5, 6];
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 3);
assert!(test_vm.equal_flag);
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 6);
assert!(!test_vm.equal_flag);
}
#[test]
fn test_gt_opcode() {
let mut test_vm = VM::new();
test_vm.registers[5] = 7;
test_vm.registers[6] = 7;
test_vm.registers[7] = 0;
test_vm.program = vec![0xC, 5, 7, 0xC, 5, 6];
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 3);
assert!(test_vm.equal_flag);
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 6);
assert!(!test_vm.equal_flag);
}
#[test]
fn test_lt_opcode() {
let mut test_vm = VM::new();
test_vm.registers[5] = 7;
test_vm.registers[6] = 7;
test_vm.registers[7] = 0;
test_vm.program = vec![0xD, 7, 5, 0xD, 6, 5];
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 3);
assert!(test_vm.equal_flag);
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 6);
assert!(!test_vm.equal_flag);
}
#[test]
fn test_gtq_opcode() {
let mut test_vm = VM::new();
test_vm.registers[5] = 7;
test_vm.registers[6] = 7;
test_vm.registers[7] = 8;
test_vm.program = vec![0xE, 5, 6, 0xE, 5, 7, 0xE, 5, 8];
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 3);
assert!(test_vm.equal_flag);
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 6);
assert!(!test_vm.equal_flag);
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 9);
assert!(test_vm.equal_flag);
}
#[test]
fn test_ltq_opcode() {
let mut test_vm = VM::new();
test_vm.registers[5] = 7;
test_vm.registers[6] = 7;
test_vm.registers[7] = 8;
test_vm.program = vec![0xF, 5, 7, 0xF, 5, 8, 0xF, 5, 6];
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 3);
assert!(test_vm.equal_flag);
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 6);
assert!(!test_vm.equal_flag);
assert!(matches!(test_vm.run_once(), Ok(Step::Continue)));
assert_eq!(test_vm.pc, 9);
assert!(test_vm.equal_flag);
}
}
|
extern crate hyper;
extern crate rss;
extern crate chrono;
extern crate rand;
use std::io::{BufReader, Read, Write};
use std::fs::File;
use std::fmt::{Display, Formatter};
use std::fmt::Error as FmtError;
use std::result::Result;
use std::error::Error;
use self::chrono::*;
use self::rand::{thread_rng, Rng};
use self::hyper::Client;
use self::hyper::header::{UserAgent};
use self::rss::{Channel, Item};
const BASE_RSS_FEED_URL: &'static str = "http://www.chipmusic.org/music/rss/feed.xml";
const LIMIT: u8 = 24;
const DAY_IN_SECONDS: i64 = 86400;
pub struct ChipmusicApi {
client: Client,
channel: Option<Channel>
}
#[derive(Debug, PartialEq, Eq)]
pub enum ChipmusicApiError {
NoChiptunesFound
}
impl Display for ChipmusicApiError {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), FmtError> {
Display::fmt(self.description(), formatter)
}
}
impl Error for ChipmusicApiError {
fn description(&self) -> &str {
match *self {
ChipmusicApiError::NoChiptunesFound => "Couldn't retrieve any chiptunes"
}
}
}
impl ChipmusicApi {
pub fn new() -> Self {
ChipmusicApi {
client: Client::new(),
channel: None
}
}
pub fn get_random_chiptune(&mut self) -> Result<Item, Box<Error>> {
if self.should_refresh_cache() {
let channel = try!(self.get_rss_feed());
self.channel = Some(channel);
}
match self.channel {
Some(ref channel) => {
let mut rng = thread_rng();
let i: usize = rng.gen_range(0, channel.items.len());
let ref item = channel.items[i];
Ok(item.clone())
},
None => Err(Box::new(ChipmusicApiError::NoChiptunesFound))
}
}
pub fn get_random_chiptunes(&mut self, n: u8) -> Vec<Result<Item, Box<Error>>> {
let mut chiptunes = vec![];
let n = if n >= LIMIT { LIMIT } else { n };
for _ in 0..n {
chiptunes.push(self.get_random_chiptune());
}
chiptunes
}
pub fn get_mp3_from_chiptune(&self, chiptune: &Item) -> Result<String, Box<Error>> {
let chiptune = chiptune.clone();
let title = chiptune.title.unwrap();
let url = chiptune.enclosure.unwrap().url;
let request = self.client.get(url.as_str());
let mut response = try!(request.send());
let mut buffer = vec![];
try!(response.read_to_end(&mut buffer));
let mut file = try!(File::create(format!("{}.mp3", title)));
try!(file.write_all(buffer.as_ref()));
Ok("Download complete!".to_string())
}
fn get_rss_feed(&self) -> Result<Channel, Box<Error>> {
let request = self.client.get(BASE_RSS_FEED_URL).header(UserAgent("chipmusic-rss".to_string()));
let response = try!(request.send());
let reader = BufReader::new(response);
let channel = Channel::read_from(reader).unwrap();
Ok(channel)
}
fn should_refresh_cache(&self) -> bool {
match self.channel {
Some(ref channel) => {
self.is_pub_date_old(channel.pub_date.clone())
},
None => true
}
}
fn is_pub_date_old(&self, pub_date: Option<String>) -> bool {
match pub_date {
Some(pub_date) => {
// The RSS feed for chipmusic.org updates infrequently, so we only
// want to refresh the cached feed if the publication date is more than a day old
let now = UTC::now().num_seconds_from_unix_epoch();
let tmp = DateTime::parse_from_rfc2822(pub_date.as_str());
// Default to refreshing if there is a DateTime parsing error
let last = if tmp.is_ok() { tmp.unwrap().num_seconds_from_unix_epoch() } else { 0 };
now - last >= DAY_IN_SECONDS
},
None => true
}
}
}
|
use std::rand;
fn main() {
println!("{}", (rand::random::<u32>() % 100) + 1);
}
|
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut, Drop};
use abi;
#[repr(transparent)]
struct MutexInner(u32);
pub struct MutexGuard<'a, T: 'a> {
pfex_item: &'a MutexInner,
data: &'a mut T,
}
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T: Send> Sync for Mutex<T> {}
pub struct Mutex<T> {
pfex_item: MutexInner,
data: UnsafeCell<T>,
}
impl<T> Mutex<T> {
pub const fn new(data: T) -> Mutex<T> {
Mutex {
pfex_item: MutexInner(0),
data: UnsafeCell::new(data),
}
}
pub fn lock(&self) -> MutexGuard<T> {
let addr = &self.pfex_item as *const _ as *const u32;
// println!("lock addr: {:p}", addr);
unsafe {
abi::pfex_acquire(addr);
}
MutexGuard {
pfex_item: &self.pfex_item,
data: unsafe { &mut *self.data.get() },
}
}
}
impl<'a, T: 'a> Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.data
}
}
impl<'a, T: 'a> DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
self.data
}
}
impl<'a, T: 'a> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
unsafe {
let addr = self.pfex_item as *const _ as *const u32;
abi::pfex_release(addr);
}
}
}
|
use parking_lot::{Condvar, Mutex, MutexGuard, RawMutex, WaitTimeoutResult};
use std::{
ops::{Deref, DerefMut},
time::{Duration, Instant},
};
#[derive(Debug, Default)]
pub struct Monitor<T> {
mutex: Mutex<T>,
cv: Condvar,
}
impl<T> Monitor<T> {
pub fn new(t: T) -> Self {
Monitor {
mutex: Mutex::new(t),
cv: Condvar::new(),
}
}
pub fn lock(&self) -> MonitorGuard<T> {
MonitorGuard::new(&self.cv, self.mutex.lock())
}
pub fn try_lock(&self) -> Option<MonitorGuard<T>> {
self.mutex
.try_lock()
.map(|g| MonitorGuard::new(&self.cv, g))
}
pub fn try_lock_for(&self, timeout: Duration) -> Option<MonitorGuard<T>> {
self.mutex
.try_lock_for(timeout)
.map(|g| MonitorGuard::new(&self.cv, g))
}
pub fn try_lock_until(&self, timeout: Instant) -> Option<MonitorGuard<T>> {
self.mutex
.try_lock_until(timeout)
.map(|g| MonitorGuard::new(&self.cv, g))
}
pub fn with_lock<U, F>(&self, f: F) -> U
where
F: FnOnce(MonitorGuard<T>) -> U,
{
f(self.lock())
}
pub fn try_with_lock<U, F>(&self, f: F) -> Option<U>
where
F: FnOnce(MonitorGuard<T>) -> U,
{
self.try_lock().map(f)
}
pub fn try_with_lock_for<U, F>(&self, timeout: Duration, f: F) -> Option<U>
where
F: FnOnce(MonitorGuard<T>) -> U,
{
self.try_lock_for(timeout).map(f)
}
pub fn try_with_lock_until<U, F>(&self, timeout: Instant, f: F) -> Option<U>
where
F: FnOnce(MonitorGuard<T>) -> U,
{
self.try_lock_until(timeout).map(f)
}
pub fn into_inner(self) -> T {
self.mutex.into_inner()
}
pub fn get_mut(&mut self) -> &mut T {
self.mutex.get_mut()
}
pub unsafe fn raw(&self) -> &RawMutex {
self.mutex.raw()
}
pub unsafe fn force_unlock(&self) {
self.mutex.force_unlock()
}
pub unsafe fn force_unlock_fair(&self) {
self.mutex.force_unlock_fair()
}
}
impl<T> From<T> for Monitor<T> {
fn from(t: T) -> Self {
Monitor::new(t)
}
}
pub struct MonitorGuard<'a, T> {
cv: &'a Condvar,
guard: MutexGuard<'a, T>,
}
impl<'a, T> MonitorGuard<'a, T> {
pub fn new(cv: &'a Condvar, guard: MutexGuard<'a, T>) -> Self {
MonitorGuard { cv, guard }
}
pub fn notify_one(&self) {
self.cv.notify_one();
}
pub fn notify_all(&self) {
self.cv.notify_all();
}
pub fn wait(&mut self) {
self.cv.wait(&mut self.guard);
}
pub fn wait_for(&mut self, timeout: Duration) -> WaitTimeoutResult {
self.cv.wait_for(&mut self.guard, timeout)
}
pub fn wait_until(&mut self, timeout: Instant) -> WaitTimeoutResult {
self.cv.wait_until(&mut self.guard, timeout)
}
}
impl<T> Deref for MonitorGuard<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.guard.deref()
}
}
impl<T> DerefMut for MonitorGuard<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.guard.deref_mut()
}
}
|
use crate::cell::Cell;
use std::cell::UnsafeCell;
#[derive(Copy, Clone)]
enum RefState {
Unshared,
Shared(usize),
Exclusive,
}
// implied since we are using UnsafeCell which is !Sync
// impl<T> !Sync for RefCell<T> {}
/// RefCell is a shareable mutable container that allows interior mutability.
///
/// RefCell<T> uses Rust's lifetimes to implement 'dynamic borrowing', a process whereby one can
/// claim temporary, exclusive, mutable access to the inner value. Borrows for RefCell<T>s are
/// tracked *at runtime*, unlike Rust's native reference types which are entirely tracked
/// statically, at compile time. Because RefCell<T> borrows are dynamic it is possible to attempt
/// to borrow a value that is already mutably borrowed; when this happens it results in thread panic.
pub struct RefCell<T> {
value: UnsafeCell<T>,
state: Cell<RefState>,
}
impl<T> RefCell<T> {
pub fn new(value: T) -> Self {
Self {
value: UnsafeCell::new(value),
state: Cell::new(RefState::Unshared),
}
}
pub fn borrow(&self) -> Option<Ref<'_, T>> {
match self.state.get() {
RefState::Unshared => {
self.state.set(RefState::Shared(1));
Some(Ref { refcell: self })
}
RefState::Shared(share_count) => {
self.state.set(RefState::Shared(share_count + 1));
Some(Ref { refcell: self })
}
RefState::Exclusive => {
// RefCell is currently exclusively borrowed, so don't give out anything
None
}
}
}
pub fn borrow_mut(&self) -> Option<RefMut<'_, T>> {
if let RefState::Unshared = self.state.get() {
self.state.set(RefState::Exclusive);
Some(RefMut { refcell: self })
} else {
None
}
}
}
pub struct Ref<'refcell, T> {
refcell: &'refcell RefCell<T>,
}
impl<T> std::ops::Deref for Ref<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
// SAFETY: a Ref is only created if no exclusive references have been given out. Once it
// has been given out, state is set to Shared, so no exclusive references are given out.
// So de-referencing into a shared reference is fine
unsafe { &*self.refcell.value.get() }
}
}
impl<T> Drop for Ref<'_, T> {
fn drop(&mut self) {
match self.refcell.state.get() {
RefState::Shared(1) => {
self.refcell.state.set(RefState::Unshared);
}
RefState::Shared(count) => {
self.refcell.state.set(RefState::Shared(count - 1));
}
RefState::Exclusive | RefState::Unshared => {
unreachable!();
}
}
}
}
pub struct RefMut<'refcell, T> {
refcell: &'refcell RefCell<T>,
}
impl<T> std::ops::Deref for RefMut<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
// SAFETY: a RefMut is only created if no shared (or other exclusive) references have been
// given out. Once it has been given out, state is set to Exclusive, so no shared
// references are given out. So de-referencing into a mutable reference is fine
unsafe { &*self.refcell.value.get() }
}
}
impl<T> std::ops::DerefMut for RefMut<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.refcell.value.get() }
}
}
impl<T> Drop for RefMut<'_, T> {
fn drop(&mut self) {
match self.refcell.state.get() {
RefState::Shared(_) | RefState::Unshared => {
unreachable!();
}
RefState::Exclusive => {
self.refcell.state.set(RefState::Unshared);
}
}
}
}
|
use super::IDT_SIZE;
extern
{
static _asm_irq_handler_array: [u64, ..IDT_SIZE];
}
pub fn get_irq_handler(num: u16) -> u64
{
_asm_irq_handler_array[num]
}
|
use alewife;
use core::event;
use glutin;
use std::f32::consts::PI;
use na::{Point3, Vector2, Vector3, Matrix4, Isometry3, Perspective3, Translation3};
use na;
pub struct Camera {
event_queue: alewife::Subscriber<event::EventID, event::Event>,
eye: Point3<f32>,
pitch: f32,
yaw: f32,
speed: f32,
rotate_speed: f32,
projection: Perspective3<f32>,
inv_proj_view: Matrix4<f32>,
proj_view: Matrix4<f32>,
cur_mouse_pos: Vector2<f32>,
prev_mouse_pos: Vector2<f32>,
moving_up: bool,
moving_down: bool,
moving_forward: bool,
moving_backward: bool,
moving_left: bool,
moving_right: bool,
moving_rotating: bool,
}
// TODO: Create a camera builder so perspective settings etc can be tweaked.
impl Camera {
pub fn new(fov: f32,
ratio: f32,
pos: Point3<f32>,
e_que: alewife::Subscriber<event::EventID, event::Event>)
-> Camera {
Camera {
event_queue: e_que,
eye: Point3::new(0.0, 0.0, 0.0),
pitch: 0.0,
yaw: 0.0,
speed: 0.2,
rotate_speed: 0.005,
projection: Perspective3::new(ratio, fov, 0.01, 10000.0),
inv_proj_view: na::zero(),
proj_view: na::zero(),
cur_mouse_pos: na::zero(),
prev_mouse_pos: na::zero(),
moving_up: false,
moving_down: false,
moving_forward: false,
moving_backward: false,
moving_left: false,
moving_right: false,
moving_rotating: false,
}
}
pub fn get_view_proj(&self) -> Matrix4<f32> {
self.proj_view
}
pub fn get_inv_view_proj(&self) -> Matrix4<f32> {
self.inv_proj_view
}
pub fn get_eye(&self) -> Point3<f32> {
self.eye
}
pub fn get_view_matrix(&self) -> Matrix4<f32> {
self.view_transform().to_homogeneous()
}
pub fn get_proj_matrix(&self) -> Matrix4<f32> {
*self.projection.as_matrix()
}
pub fn set_eye(&mut self, eye: Point3<f32>) {
self.eye = eye;
self.update_restrictions();
self.update_proj_view();
}
pub fn set_pitch_deg(&mut self, angle: f32) {}
pub fn set_yaw_deg(&mut self, angle: f32) {}
pub fn set_pitch_rad(&mut self, angle: f32) {}
pub fn set_yaw_rad(&mut self, angle: f32) {}
pub fn at(&self) -> Point3<f32> {
let ax = self.eye.x + self.yaw.cos() * self.pitch.sin();
let ay = self.eye.y + self.pitch.cos();
let az = self.eye.z + self.yaw.sin() * self.pitch.sin();
Point3::new(ax, ay, az)
}
fn view_transform(&self) -> Isometry3<f32> {
Isometry3::look_at_rh(&self.eye, &self.at(), &Vector3::y())
}
pub fn look_at(&mut self, eye: Point3<f32>, pos: Point3<f32>) {
// Squared euclidian norm is faster to calculate
let d = na::distance(&eye, &pos);
let n_pitch = ((pos.y - eye.y) / d).acos();
let n_yaw = (pos.z - eye.z).atan2(pos.x - eye.x);
self.eye = eye;
self.yaw = n_yaw;
self.pitch = n_pitch;
self.update_proj_view();
}
pub fn translate(&mut self, t: &Translation3<f32>) {
let n_eye = t * self.eye;
self.set_eye(n_eye);
}
fn update_proj_view(&mut self) {
self.proj_view = *self.projection.as_matrix() * self.view_transform().to_homogeneous();
// If determinant is 0, aka we cant take inverse, we get None.
// TODO: work around this instead of ignoring failed inversion.
if let Some(inv) = self.proj_view.try_inverse() {
self.inv_proj_view = inv;
}
}
fn handle_rotate(&mut self, delta: Vector2<f32>) {
self.yaw = self.yaw + delta.x * self.rotate_speed;
self.pitch = self.pitch + delta.y * self.rotate_speed;
self.update_restrictions();
self.update_proj_view();
}
fn handle_input(&mut self) -> Vector3<f32> {
let transf = self.view_transform();
let vforward = transf * Vector3::z();
let vright = transf * Vector3::x();
let mut mvm = na::zero::<Vector3<f32>>();
if self.moving_left {
mvm = mvm - vright
}
if self.moving_right {
mvm = mvm + vright
}
if self.moving_forward {
mvm = mvm - vforward
}
if self.moving_backward {
mvm = mvm + vforward
}
if let Some(normalized) = mvm.try_normalize(1.0e-10) {
normalized
} else {
mvm
}
}
fn update_restrictions(&mut self) {
if self.pitch <= 0.01 {
self.pitch = 0.01
}
let _pi: f32 = PI;
if self.pitch > _pi - 0.01 {
self.pitch = _pi - 0.01
}
}
pub fn update(&mut self) {
let events: Vec<_> = self.event_queue.fetch();
for event in events {
match event {
(_, event::Event::SetCameraPos(x, y)) => info!("Move camera lol"),
_ => {}
}
}
if self.moving_rotating {
let mouse_delta = self.cur_mouse_pos - self.prev_mouse_pos;
self.handle_rotate(mouse_delta);
self.prev_mouse_pos = self.cur_mouse_pos;
}
let mvm_dir = self.handle_input();
let mvm = mvm_dir * self.speed;
self.translate(&Translation3::from_vector(mvm));
}
pub fn process_input(&mut self, event: &glutin::Event) {
match event {
&glutin::Event::KeyboardInput(glutin::ElementState::Pressed,
_,
Some(glutin::VirtualKeyCode::Space)) => {
self.moving_up = true;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Released,
_,
Some(glutin::VirtualKeyCode::Space)) => {
self.moving_up = false;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Pressed,
_,
Some(glutin::VirtualKeyCode::Down)) => {
self.moving_down = true;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Released,
_,
Some(glutin::VirtualKeyCode::Down)) => {
self.moving_down = false;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Pressed,
_,
Some(glutin::VirtualKeyCode::A)) => {
self.moving_left = true;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Released,
_,
Some(glutin::VirtualKeyCode::A)) => {
self.moving_left = false;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Pressed,
_,
Some(glutin::VirtualKeyCode::D)) => {
self.moving_right = true;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Released,
_,
Some(glutin::VirtualKeyCode::D)) => {
self.moving_right = false;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Pressed,
_,
Some(glutin::VirtualKeyCode::W)) => {
self.moving_forward = true;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Released,
_,
Some(glutin::VirtualKeyCode::W)) => {
self.moving_forward = false;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Pressed,
_,
Some(glutin::VirtualKeyCode::S)) => {
self.moving_backward = true;
}
&glutin::Event::KeyboardInput(glutin::ElementState::Released,
_,
Some(glutin::VirtualKeyCode::S)) => {
self.moving_backward = false;
}
&glutin::Event::MouseInput(glutin::ElementState::Pressed,
glutin::MouseButton::Right) => {
self.moving_rotating = true;
}
&glutin::Event::MouseInput(glutin::ElementState::Released,
glutin::MouseButton::Right) => {
self.moving_rotating = false;
}
&glutin::Event::MouseMoved(x, y) => {
self.cur_mouse_pos = Vector2::new(x as f32, y as f32);
}
_ => {}
}
}
}
|
use memory::WasmStack;
use arch::context::ThreadContext;
use arch::cpu::Local;
use nabi::{Result, Error};
use nil::mem::Bin;
/// The current state of a process.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum State {
/// The thread is currently executing.
Running,
/// This thread is not currently running, but it's ready to execute.
/// For example, in the cpu thread queue.
Ready,
/// The thread has been suspended and cannot be run right now.
Suspended,
/// The thread is blocked.
Blocked,
/// It's dead, Jim.
Dead,
}
/// A single thread of execution.
#[allow(dead_code)]
pub struct Thread {
pub state: State,
ctx: ThreadContext,
stack: WasmStack,
entry: usize,
}
impl Thread {
pub fn new<F>(stack_size: usize, entry: Bin<F>) -> Result<Thread>
where F: FnOnce() + Send + Sync
{
let stack = WasmStack::allocate(stack_size)
.ok_or(Error::NO_MEMORY)?;
let thread = Thread {
state: State::Suspended,
ctx: ThreadContext::new(stack.top(), common_thread_entry::<F>),
stack,
entry: entry.into_nonnull().as_ptr() as *const () as usize,
};
Ok(thread)
}
pub unsafe fn swap(&mut self, other: &Thread) {
self.ctx.swap(&other.ctx);
}
}
extern fn common_thread_entry<F>()
where F: FnOnce() + Send + Sync
{
let thread = unsafe { &mut *Local::current_thread().as_ptr() };
let f = unsafe { (thread.entry as *const F).read() };
f();
thread.state = State::Dead;
unsafe {
Local::current()
.scheduler
.switch();
}
unreachable!();
}
|
use std::collections::HashSet;
fn main() {
let mut anyone_count = 0;
let mut everyone_count = 0;
let mut anyone = HashSet::new();
let mut everyone = HashSet::new();
let mut first_person = true;
for line in aoc::read_lines_as_vec("./day6.txt") {
if line.is_empty() {
anyone_count += anyone.len();
anyone.clear();
everyone_count += everyone.len();
everyone.clear();
first_person = true;
continue;
}
let person : HashSet<_> = line.chars().collect();
anyone.extend(line.chars());
if first_person == true {
everyone = person;
first_person = false;
} else {
everyone = everyone.intersection(&person).cloned().collect();
}
}
anyone_count += anyone.len();
everyone_count += everyone.len();
println!("Total Answered Q's: {}", anyone_count);
println!("Total Answered Q's 2: {}", everyone_count);
}
|
#[doc = r"Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r"Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::HB16TIME {
#[doc = r"Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
self.register.set(f(&R { bits }, &mut W { bits }).bits);
}
#[doc = r"Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r"Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
self.register.set(
f(&mut W {
bits: Self::reset_value(),
})
.bits,
);
}
#[doc = r"Reset value of the register"]
#[inline(always)]
pub const fn reset_value() -> u32 {
0
}
#[doc = r"Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.register.set(Self::reset_value())
}
}
#[doc = r"Value of the field"]
pub struct EPI_HB16TIME_RDWSMR {
bits: bool,
}
impl EPI_HB16TIME_RDWSMR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB16TIME_RDWSMW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB16TIME_RDWSMW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 0);
self.w.bits |= ((value as u32) & 1) << 0;
self.w
}
}
#[doc = r"Value of the field"]
pub struct EPI_HB16TIME_WRWSMR {
bits: bool,
}
impl EPI_HB16TIME_WRWSMR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB16TIME_WRWSMW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB16TIME_WRWSMW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 4);
self.w.bits |= ((value as u32) & 1) << 4;
self.w
}
}
#[doc = r"Value of the field"]
pub struct EPI_HB16TIME_CAPWIDTHR {
bits: u8,
}
impl EPI_HB16TIME_CAPWIDTHR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB16TIME_CAPWIDTHW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB16TIME_CAPWIDTHW<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits &= !(3 << 12);
self.w.bits |= ((value as u32) & 3) << 12;
self.w
}
}
#[doc = "Possible values of the field `EPI_HB16TIME_PSRAMSZ`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB16TIME_PSRAMSZR {
#[doc = "No row size limitation"]
EPI_HB16TIME_PSRAMSZ_0,
#[doc = "128 B"]
EPI_HB16TIME_PSRAMSZ_128B,
#[doc = "256 B"]
EPI_HB16TIME_PSRAMSZ_256B,
#[doc = "512 B"]
EPI_HB16TIME_PSRAMSZ_512B,
#[doc = "1024 B"]
EPI_HB16TIME_PSRAMSZ_1KB,
#[doc = "2048 B"]
EPI_HB16TIME_PSRAMSZ_2KB,
#[doc = "4096 B"]
EPI_HB16TIME_PSRAMSZ_4KB,
#[doc = "8192 B"]
EPI_HB16TIME_PSRAMSZ_8KB,
}
impl EPI_HB16TIME_PSRAMSZR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
match *self {
EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_0 => 0,
EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_128B => 1,
EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_256B => 2,
EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_512B => 3,
EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_1KB => 4,
EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_2KB => 5,
EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_4KB => 6,
EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_8KB => 7,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _from(value: u8) -> EPI_HB16TIME_PSRAMSZR {
match value {
0 => EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_0,
1 => EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_128B,
2 => EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_256B,
3 => EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_512B,
4 => EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_1KB,
5 => EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_2KB,
6 => EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_4KB,
7 => EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_8KB,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `EPI_HB16TIME_PSRAMSZ_0`"]
#[inline(always)]
pub fn is_epi_hb16time_psramsz_0(&self) -> bool {
*self == EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_0
}
#[doc = "Checks if the value of the field is `EPI_HB16TIME_PSRAMSZ_128B`"]
#[inline(always)]
pub fn is_epi_hb16time_psramsz_128b(&self) -> bool {
*self == EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_128B
}
#[doc = "Checks if the value of the field is `EPI_HB16TIME_PSRAMSZ_256B`"]
#[inline(always)]
pub fn is_epi_hb16time_psramsz_256b(&self) -> bool {
*self == EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_256B
}
#[doc = "Checks if the value of the field is `EPI_HB16TIME_PSRAMSZ_512B`"]
#[inline(always)]
pub fn is_epi_hb16time_psramsz_512b(&self) -> bool {
*self == EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_512B
}
#[doc = "Checks if the value of the field is `EPI_HB16TIME_PSRAMSZ_1KB`"]
#[inline(always)]
pub fn is_epi_hb16time_psramsz_1kb(&self) -> bool {
*self == EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_1KB
}
#[doc = "Checks if the value of the field is `EPI_HB16TIME_PSRAMSZ_2KB`"]
#[inline(always)]
pub fn is_epi_hb16time_psramsz_2kb(&self) -> bool {
*self == EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_2KB
}
#[doc = "Checks if the value of the field is `EPI_HB16TIME_PSRAMSZ_4KB`"]
#[inline(always)]
pub fn is_epi_hb16time_psramsz_4kb(&self) -> bool {
*self == EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_4KB
}
#[doc = "Checks if the value of the field is `EPI_HB16TIME_PSRAMSZ_8KB`"]
#[inline(always)]
pub fn is_epi_hb16time_psramsz_8kb(&self) -> bool {
*self == EPI_HB16TIME_PSRAMSZR::EPI_HB16TIME_PSRAMSZ_8KB
}
}
#[doc = "Values that can be written to the field `EPI_HB16TIME_PSRAMSZ`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EPI_HB16TIME_PSRAMSZW {
#[doc = "No row size limitation"]
EPI_HB16TIME_PSRAMSZ_0,
#[doc = "128 B"]
EPI_HB16TIME_PSRAMSZ_128B,
#[doc = "256 B"]
EPI_HB16TIME_PSRAMSZ_256B,
#[doc = "512 B"]
EPI_HB16TIME_PSRAMSZ_512B,
#[doc = "1024 B"]
EPI_HB16TIME_PSRAMSZ_1KB,
#[doc = "2048 B"]
EPI_HB16TIME_PSRAMSZ_2KB,
#[doc = "4096 B"]
EPI_HB16TIME_PSRAMSZ_4KB,
#[doc = "8192 B"]
EPI_HB16TIME_PSRAMSZ_8KB,
}
impl EPI_HB16TIME_PSRAMSZW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline(always)]
pub fn _bits(&self) -> u8 {
match *self {
EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_0 => 0,
EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_128B => 1,
EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_256B => 2,
EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_512B => 3,
EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_1KB => 4,
EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_2KB => 5,
EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_4KB => 6,
EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_8KB => 7,
}
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB16TIME_PSRAMSZW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB16TIME_PSRAMSZW<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EPI_HB16TIME_PSRAMSZW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "No row size limitation"]
#[inline(always)]
pub fn epi_hb16time_psramsz_0(self) -> &'a mut W {
self.variant(EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_0)
}
#[doc = "128 B"]
#[inline(always)]
pub fn epi_hb16time_psramsz_128b(self) -> &'a mut W {
self.variant(EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_128B)
}
#[doc = "256 B"]
#[inline(always)]
pub fn epi_hb16time_psramsz_256b(self) -> &'a mut W {
self.variant(EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_256B)
}
#[doc = "512 B"]
#[inline(always)]
pub fn epi_hb16time_psramsz_512b(self) -> &'a mut W {
self.variant(EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_512B)
}
#[doc = "1024 B"]
#[inline(always)]
pub fn epi_hb16time_psramsz_1kb(self) -> &'a mut W {
self.variant(EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_1KB)
}
#[doc = "2048 B"]
#[inline(always)]
pub fn epi_hb16time_psramsz_2kb(self) -> &'a mut W {
self.variant(EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_2KB)
}
#[doc = "4096 B"]
#[inline(always)]
pub fn epi_hb16time_psramsz_4kb(self) -> &'a mut W {
self.variant(EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_4KB)
}
#[doc = "8192 B"]
#[inline(always)]
pub fn epi_hb16time_psramsz_8kb(self) -> &'a mut W {
self.variant(EPI_HB16TIME_PSRAMSZW::EPI_HB16TIME_PSRAMSZ_8KB)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits &= !(7 << 16);
self.w.bits |= ((value as u32) & 7) << 16;
self.w
}
}
#[doc = r"Value of the field"]
pub struct EPI_HB16TIME_IRDYDLYR {
bits: u8,
}
impl EPI_HB16TIME_IRDYDLYR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r"Proxy"]
pub struct _EPI_HB16TIME_IRDYDLYW<'a> {
w: &'a mut W,
}
impl<'a> _EPI_HB16TIME_IRDYDLYW<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits &= !(3 << 24);
self.w.bits |= ((value as u32) & 3) << 24;
self.w
}
}
impl R {
#[doc = r"Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - Read Wait State Minus One"]
#[inline(always)]
pub fn epi_hb16time_rdwsm(&self) -> EPI_HB16TIME_RDWSMR {
let bits = ((self.bits >> 0) & 1) != 0;
EPI_HB16TIME_RDWSMR { bits }
}
#[doc = "Bit 4 - Write Wait State Minus One"]
#[inline(always)]
pub fn epi_hb16time_wrwsm(&self) -> EPI_HB16TIME_WRWSMR {
let bits = ((self.bits >> 4) & 1) != 0;
EPI_HB16TIME_WRWSMR { bits }
}
#[doc = "Bits 12:13 - CS0n Inter-transfer Capture Width"]
#[inline(always)]
pub fn epi_hb16time_capwidth(&self) -> EPI_HB16TIME_CAPWIDTHR {
let bits = ((self.bits >> 12) & 3) as u8;
EPI_HB16TIME_CAPWIDTHR { bits }
}
#[doc = "Bits 16:18 - PSRAM Row Size"]
#[inline(always)]
pub fn epi_hb16time_psramsz(&self) -> EPI_HB16TIME_PSRAMSZR {
EPI_HB16TIME_PSRAMSZR::_from(((self.bits >> 16) & 7) as u8)
}
#[doc = "Bits 24:25 - CS0n Input Ready Delay"]
#[inline(always)]
pub fn epi_hb16time_irdydly(&self) -> EPI_HB16TIME_IRDYDLYR {
let bits = ((self.bits >> 24) & 3) as u8;
EPI_HB16TIME_IRDYDLYR { bits }
}
}
impl W {
#[doc = r"Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - Read Wait State Minus One"]
#[inline(always)]
pub fn epi_hb16time_rdwsm(&mut self) -> _EPI_HB16TIME_RDWSMW {
_EPI_HB16TIME_RDWSMW { w: self }
}
#[doc = "Bit 4 - Write Wait State Minus One"]
#[inline(always)]
pub fn epi_hb16time_wrwsm(&mut self) -> _EPI_HB16TIME_WRWSMW {
_EPI_HB16TIME_WRWSMW { w: self }
}
#[doc = "Bits 12:13 - CS0n Inter-transfer Capture Width"]
#[inline(always)]
pub fn epi_hb16time_capwidth(&mut self) -> _EPI_HB16TIME_CAPWIDTHW {
_EPI_HB16TIME_CAPWIDTHW { w: self }
}
#[doc = "Bits 16:18 - PSRAM Row Size"]
#[inline(always)]
pub fn epi_hb16time_psramsz(&mut self) -> _EPI_HB16TIME_PSRAMSZW {
_EPI_HB16TIME_PSRAMSZW { w: self }
}
#[doc = "Bits 24:25 - CS0n Input Ready Delay"]
#[inline(always)]
pub fn epi_hb16time_irdydly(&mut self) -> _EPI_HB16TIME_IRDYDLYW {
_EPI_HB16TIME_IRDYDLYW { w: self }
}
}
|
use std::io::{self, Write};
use super::bus::BusDevice;
pub struct QemuDebugConsole {}
impl QemuDebugConsole {
pub fn new() -> Self {
QemuDebugConsole {}
}
}
impl BusDevice for QemuDebugConsole {
fn write(&mut self, _offset: u64, data: &[u8]) {
io::stdout().write(data).unwrap();
}
fn read(&mut self, offset: u64, data: &mut [u8]) {
if data.len() == 1 && offset == 0 {
data[0] = 0xe9;
}
}
}
|
#![crate_name = "uu_cp"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Jordy Dickinson <jordy.dickinson@gmail.com>
*
* For the full copyright and license information, please view the LICENSE file
* that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use getopts::Options;
use std::fs;
use std::io::{ErrorKind, Result, Write};
use std::path::Path;
use uucore::fs::{canonicalize, CanonicalizeMode};
#[derive(Clone, Eq, PartialEq)]
pub enum Mode {
Copy,
Help,
Version,
}
static NAME: &'static str = "cp";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
show_error!("{}", e);
panic!()
},
};
let usage = opts.usage("Copy SOURCE to DEST, or multiple SOURCE(s) to DIRECTORY.");
let mode = if matches.opt_present("version") {
Mode::Version
} else if matches.opt_present("help") {
Mode::Help
} else {
Mode::Copy
};
match mode {
Mode::Copy => copy(matches),
Mode::Help => help(&usage),
Mode::Version => version(),
}
0
}
fn version() {
println!("{} {}", NAME, VERSION);
}
fn help(usage: &str) {
let msg = format!("{0} {1}\n\n\
Usage: {0} SOURCE DEST\n \
or: {0} SOURCE... DIRECTORY\n \
or: {0} -t DIRECTORY SOURCE\n\
\n\
{2}", NAME, VERSION, usage);
println!("{}", msg);
}
fn copy(matches: getopts::Matches) {
let sources: Vec<String> = if matches.free.is_empty() {
show_error!("Missing SOURCE argument. Try --help.");
panic!()
} else {
// All but the last argument:
matches.free[..matches.free.len() - 1].iter().cloned().collect()
};
let dest = if matches.free.len() < 2 {
show_error!("Missing DEST argument. Try --help.");
panic!()
} else {
// Only the last argument:
Path::new(&matches.free[matches.free.len() - 1])
};
assert!(sources.len() >= 1);
if sources.len() == 1 {
let source = Path::new(&sources[0]);
let same_file = paths_refer_to_same_file(source, dest).unwrap_or_else(|err| {
match err.kind() {
ErrorKind::NotFound => false,
_ => {
show_error!("{}", err);
panic!()
}
}
});
if same_file {
show_error!("\"{}\" and \"{}\" are the same file",
source.display(),
dest.display());
panic!();
}
if let Err(err) = fs::copy(source, dest) {
show_error!("{}", err);
panic!();
}
} else {
if !dest.is_dir() {
show_error!("TARGET must be a directory");
panic!();
}
for src in &sources {
let source = Path::new(&src);
if !source.is_file() {
show_error!("\"{}\" is not a file", source.display());
continue;
}
let mut full_dest = dest.to_path_buf();
full_dest.push(source.to_str().unwrap());
println!("{}", full_dest.display());
let io_result = fs::copy(source, full_dest);
if let Err(err) = io_result {
show_error!("{}", err);
panic!()
}
}
}
}
pub fn paths_refer_to_same_file(p1: &Path, p2: &Path) -> Result<bool> {
// We have to take symlinks and relative paths into account.
let pathbuf1 = try!(canonicalize(p1, CanonicalizeMode::Normal));
let pathbuf2 = try!(canonicalize(p2, CanonicalizeMode::Normal));
Ok(pathbuf1 == pathbuf2)
}
|
use renderer::gl_vertex_format::{get_attribute_format, FVec3, VertexFormat};
use renderer::offset_of;
use std::mem::size_of;
use std::os::raw::c_void;
#[repr(C)]
pub struct Vertex {
position: FVec3,
color: FVec3,
}
impl Vertex {
pub fn new(x: f32, y: f32, z: f32, r: f32, g: f32, b: f32) -> Self {
Self {
position: FVec3 { x, y, z },
color: FVec3 { x: r, y: g, z: b },
}
}
}
impl VertexFormat for Vertex {
fn size() -> usize {
size_of::<Self>()
}
fn on_vertex_layout() -> Vec<(i32, u32, u8, *const c_void)> {
unsafe {
vec![
get_attribute_format::<FVec3>(offset_of!(Self, position)),
get_attribute_format::<FVec3>(offset_of!(Self, color)),
]
}
}
}
|
//! Generating a set of points for one measurement configuration
#![allow(clippy::result_large_err)]
use crate::{
field::FieldGeneratorImpl,
specification, substitution,
tag_pair::TagPair,
tag_set::{GeneratedTagSets, TagSet},
};
use influxdb2_client::models::WriteDataPoint;
use serde_json::json;
use snafu::{OptionExt, ResultExt, Snafu};
use std::{
fmt::Debug,
sync::{Arc, Mutex},
};
/// Measurement-specific Results
pub type Result<T, E = Error> = std::result::Result<T, E>;
/// Errors that may happen while creating measurements
#[derive(Snafu, Debug)]
#[allow(missing_docs)]
pub enum Error {
#[snafu(display(
"Could not build data point for measurement `{}` with Influx Client, caused by:\n{}",
name,
source
))]
InfluxDataPointError {
name: String,
source: influxdb2_client::models::data_point::DataPointError,
},
#[snafu(display("Could not create measurement name, caused by:\n{}", source))]
CouldNotCreateMeasurementName { source: crate::substitution::Error },
#[snafu(display(
"Could not create field generator sets for measurement `{}`, caused by:\n{}",
name,
source
))]
CouldNotCreateFieldGeneratorSets {
name: String,
source: crate::field::Error,
},
#[snafu(display(
"Tag set {} referenced not found for measurement {}",
tag_set,
measurement
))]
GeneratedTagSetNotFound {
tag_set: String,
measurement: String,
},
#[snafu(display("Could not compile template `{}`, caused by:\n{}", template, source))]
CantCompileTemplate {
source: handlebars::TemplateError,
template: String,
},
#[snafu(display("Could not render template `{}`, caused by:\n{}", template, source))]
CantRenderTemplate {
source: handlebars::RenderError,
template: String,
},
#[snafu(display("Error creating measurement tag pairs: {}", source))]
CouldNotCreateMeasurementTagPairs { source: crate::tag_pair::Error },
}
/// Generate measurements
#[derive(Debug)]
pub struct MeasurementGenerator {
measurement: Arc<Mutex<Measurement>>,
}
impl MeasurementGenerator {
/// Create the count specified number of measurement generators from
/// the passed `MeasurementSpec`
pub fn from_spec(
agent_id: usize,
spec: &specification::MeasurementSpec,
execution_start_time: i64,
generated_tag_sets: &GeneratedTagSets,
agent_tag_pairs: &[Arc<TagPair>],
) -> Result<Vec<Self>> {
let count = spec.count.unwrap_or(1) + 1;
(1..count)
.map(|measurement_id| {
Self::new(
agent_id,
measurement_id,
spec,
execution_start_time,
generated_tag_sets,
agent_tag_pairs,
)
})
.collect::<Result<Vec<_>>>()
}
/// Create a new way to generate measurements from a specification
#[allow(clippy::too_many_arguments)]
pub fn new(
agent_id: usize,
measurement_id: usize,
spec: &specification::MeasurementSpec,
execution_start_time: i64,
generated_tag_sets: &GeneratedTagSets,
agent_tag_pairs: &[Arc<TagPair>],
) -> Result<Self> {
let measurement_name = substitution::render_once(
"measurement",
&spec.name,
&json!({
"agent": {"id": agent_id},
"measurement": {"id": measurement_id},
}),
)
.context(CouldNotCreateMeasurementNameSnafu)?;
let fields = spec
.fields
.iter()
.map(|field_spec| {
let data = json!({
"agent": {"id": agent_id},
"measurement": {"id": measurement_id, "name": &measurement_name},
});
FieldGeneratorImpl::from_spec(field_spec, data, execution_start_time)
})
.collect::<crate::field::Result<Vec<_>>>()
.context(CouldNotCreateFieldGeneratorSetsSnafu {
name: &measurement_name,
})?
.into_iter()
.flatten()
.collect();
// generate the tag pairs
let template_data = json!({
"agent": {"id": agent_id},
"measurement": {"id": measurement_id, "name": &measurement_name},
});
let mut tag_pairs = TagPair::pairs_from_specs(&spec.tag_pairs, template_data)
.context(CouldNotCreateMeasurementTagPairsSnafu)?;
for t in agent_tag_pairs {
tag_pairs.push(Arc::clone(t));
}
let generated_tag_sets = match &spec.tag_set {
Some(t) => Arc::clone(generated_tag_sets.sets_for(t).context(
GeneratedTagSetNotFoundSnafu {
tag_set: t,
measurement: &measurement_name,
},
)?),
// if there's no generated tag set, just have an empty set as a single row so
// it can be used to generate the single line that will come out of each generation
// for this measurement.
None => Arc::new(vec![TagSet { tags: vec![] }]),
};
// I have this gnarly tag ordering construction so that I can keep the pre-generated
// tag sets in their existing vecs without moving them around so that I can have
// many thousands of agents and measurements that use the same tagset without blowing
// up the number of vectors and memory I consume.
let mut tag_ordering: Vec<_> = tag_pairs
.iter()
.enumerate()
.map(|(i, p)| (p.key(), TagOrdering::Pair(i)))
.chain(
generated_tag_sets[0]
.tags
.iter()
.enumerate()
.map(|(i, p)| (p.key.to_string(), TagOrdering::Generated(i))),
)
.collect();
tag_ordering.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
let tag_ordering: Vec<_> = tag_ordering.into_iter().map(|(_, o)| o).collect();
Ok(Self {
measurement: Arc::new(Mutex::new(Measurement {
name: measurement_name,
tag_pairs,
generated_tag_sets,
tag_ordering,
fields,
})),
})
}
/// Create a line iterator to generate lines for a single sampling
pub fn generate(&mut self, timestamp: i64) -> Result<MeasurementLineIterator> {
Ok(MeasurementLineIterator {
measurement: Arc::clone(&self.measurement),
index: 0,
timestamp,
})
}
}
/// Details for the measurement to be generated. Can generate many lines
/// for each sampling.
#[derive(Debug)]
pub struct Measurement {
name: String,
tag_pairs: Vec<Arc<TagPair>>,
generated_tag_sets: Arc<Vec<TagSet>>,
tag_ordering: Vec<TagOrdering>,
fields: Vec<FieldGeneratorImpl>,
}
impl Measurement {
/// The number of lines that will be generated for each sampling of this measurement.
pub fn line_count(&self) -> usize {
self.generated_tag_sets.len()
}
/// Write the specified line as line protocol to the passed in writer.
pub fn write_index_to<W: std::io::Write>(
&mut self,
index: usize,
timestamp: i64,
mut w: W,
) -> std::io::Result<()> {
write!(w, "{}", self.name)?;
let row_tags = &self.generated_tag_sets[index].tags;
for t in &self.tag_ordering {
match t {
TagOrdering::Generated(index) => {
let t = &row_tags[*index];
write!(w, ",{}={}", t.key, t.value)?;
}
TagOrdering::Pair(index) => {
let t = &self.tag_pairs[*index].as_ref();
match t {
TagPair::Static(t) => write!(w, ",{}={}", t.key, t.value)?,
TagPair::Regenerating(t) => {
let mut t = t.lock().expect("mutex poisoned");
let p = t.tag_pair();
write!(w, ",{}={}", p.key, p.value)?
}
}
}
}
}
for (i, field) in self.fields.iter_mut().enumerate() {
let d = if i == 0 { b" " } else { b"," };
w.write_all(d)?;
match field {
FieldGeneratorImpl::Bool(f) => {
let v = f.generate_value();
write!(w, "{}={}", f.name, if v { "t" } else { "f" })?;
}
FieldGeneratorImpl::I64(f) => {
let v = f.generate_value();
write!(w, "{}={}i", f.name, v)?;
}
FieldGeneratorImpl::F64(f) => {
let v = f.generate_value();
write!(w, "{}={}", f.name, v)?;
}
FieldGeneratorImpl::String(f) => {
let v = f.generate_value(timestamp);
write!(w, "{}=\"{}\"", f.name, v)?;
}
FieldGeneratorImpl::Uptime(f) => match f.kind {
specification::UptimeKind::I64 => {
let v = f.generate_value();
write!(w, "{}={}i", f.name, v)?;
}
specification::UptimeKind::Telegraf => {
let v = f.generate_value_as_string();
write!(w, "{}=\"{}\"", f.name, v)?;
}
},
}
}
writeln!(w, " {timestamp}")
}
}
#[derive(Debug)]
enum TagOrdering {
Pair(usize),
Generated(usize),
}
/// Iterator to generate the lines for a given measurement
#[derive(Debug)]
pub struct MeasurementLineIterator {
measurement: Arc<Mutex<Measurement>>,
index: usize,
timestamp: i64,
}
impl MeasurementLineIterator {
/// Number of lines that will be generated for this measurement
pub fn line_count(&self) -> usize {
let m = self.measurement.lock().expect("mutex poinsoned");
m.line_count()
}
}
impl Iterator for MeasurementLineIterator {
type Item = LineToGenerate;
/// Get the details for the next `LineToGenerate`
fn next(&mut self) -> Option<Self::Item> {
let m = self.measurement.lock().expect("mutex poinsoned");
if self.index >= m.line_count() {
None
} else {
let n = Some(LineToGenerate {
measurement: Arc::clone(&self.measurement),
index: self.index,
timestamp: self.timestamp,
});
self.index += 1;
n
}
}
}
/// A pointer to the line to be generated. Will be evaluated when asked to write.
#[derive(Debug)]
pub struct LineToGenerate {
/// The measurement state to be used to generate the line
pub measurement: Arc<Mutex<Measurement>>,
/// The index into the generated tag pairs of the line we're generating
pub index: usize,
/// The timestamp of the line that we're generating
pub timestamp: i64,
}
impl WriteDataPoint for LineToGenerate {
/// Generate the data and write the line to the passed in writer.
fn write_data_point_to<W>(&self, w: W) -> std::io::Result<()>
where
W: std::io::Write,
{
let mut m = self.measurement.lock().expect("mutex poisoned");
m.write_index_to(self.index, self.timestamp, w)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::specification::*;
use influxdb2_client::models::WriteDataPoint;
use std::str;
type Error = Box<dyn std::error::Error>;
type Result<T = (), E = Error> = std::result::Result<T, E>;
impl MeasurementGenerator {
fn generate_string(&mut self, timestamp: i64) -> Result<String> {
self.generate_strings(timestamp)
.map(|mut strings| strings.swap_remove(0))
}
fn generate_strings(&mut self, timestamp: i64) -> Result<Vec<String>> {
let points = self.generate(timestamp)?;
points
.into_iter()
.map(|point| {
let mut v = Vec::new();
point.write_data_point_to(&mut v)?;
Ok(String::from_utf8(v)?)
})
.collect()
}
}
#[test]
fn generate_measurement() -> Result {
let fake_now = 5678;
// This is the same as the previous test but with an additional field.
let measurement_spec = MeasurementSpec {
name: "cpu".into(),
count: Some(2),
fields: vec![
FieldSpec {
name: "load".into(),
field_value_spec: FieldValueSpec::F64 { range: 0.0..100.0 },
count: None,
},
FieldSpec {
name: "response_time".into(),
field_value_spec: FieldValueSpec::I64 {
range: 0..60_000,
increment: false,
reset_after: None,
},
count: None,
},
],
tag_set: None,
tag_pairs: vec![],
};
let generated_tag_sets = GeneratedTagSets::default();
let mut measurement_generator =
MeasurementGenerator::new(0, 0, &measurement_spec, fake_now, &generated_tag_sets, &[])
.unwrap();
let line_protocol = vec![measurement_generator.generate_string(fake_now)?];
let response_times = extract_field_values("response_time", &line_protocol);
let next_line_protocol = vec![measurement_generator.generate_string(fake_now + 1)?];
let next_response_times = extract_field_values("response_time", &next_line_protocol);
// Each line should have a different response time unless we get really, really unlucky
assert_ne!(response_times, next_response_times);
Ok(())
}
#[test]
fn generate_measurement_with_basic_tags() -> Result {
let fake_now = 678;
let measurement_spec = MeasurementSpec {
name: "measurement".to_string(),
count: None,
tag_set: None,
tag_pairs: vec![
TagPairSpec {
key: "some_name".to_string(),
template: "some_value".to_string(),
count: None,
regenerate_after_lines: None,
},
TagPairSpec {
key: "tag_name".to_string(),
template: "tag_value".to_string(),
count: None,
regenerate_after_lines: None,
},
],
fields: vec![FieldSpec {
name: "field_name".to_string(),
field_value_spec: FieldValueSpec::I64 {
range: 1..1,
increment: false,
reset_after: None,
},
count: None,
}],
};
let generated_tag_sets = GeneratedTagSets::default();
let mut measurement_generator =
MeasurementGenerator::new(0, 0, &measurement_spec, fake_now, &generated_tag_sets, &[])
.unwrap();
let line_protocol = measurement_generator.generate_string(fake_now)?;
assert_eq!(
line_protocol,
format!(
"measurement,some_name=some_value,tag_name=tag_value field_name=1i {fake_now}\n"
)
);
Ok(())
}
#[test]
fn generate_measurement_with_tags_with_count() {
let fake_now = 678;
let measurement_spec = MeasurementSpec {
name: "measurement".to_string(),
count: None,
tag_set: None,
tag_pairs: vec![TagPairSpec {
key: "some_name".to_string(),
template: "some_value {{id}}".to_string(),
count: Some(2),
regenerate_after_lines: None,
}],
fields: vec![FieldSpec {
name: "field_name".to_string(),
field_value_spec: FieldValueSpec::I64 {
range: 1..1,
increment: false,
reset_after: None,
},
count: None,
}],
};
let generated_tag_sets = GeneratedTagSets::default();
let mut measurement_generator =
MeasurementGenerator::new(0, 0, &measurement_spec, fake_now, &generated_tag_sets, &[])
.unwrap();
let line_protocol = measurement_generator.generate_string(fake_now).unwrap();
assert_eq!(
line_protocol,
format!(
"measurement,some_name=some_value 1,some_name2=some_value 2 field_name=1i {fake_now}\n"
)
);
}
#[test]
fn regenerating_after_lines() {
let data_spec: specification::DataSpec = toml::from_str(
r#"
name = "ex"
[[values]]
name = "foo"
template = "{{id}}"
cardinality = 3
[[tag_sets]]
name = "foo_set"
for_each = ["foo"]
[[agents]]
name = "foo"
[[agents.measurements]]
name = "m1"
tag_set = "foo_set"
tag_pairs = [{key = "reg", template = "data-{{line_number}}", regenerate_after_lines = 2}]
[[agents.measurements.fields]]
name = "val"
i64_range = [3, 3]
[[database_writers]]
agents = [{name = "foo", sampling_interval = "10s"}]"#,
)
.unwrap();
let fake_now = 678;
let generated_tag_sets = GeneratedTagSets::from_spec(&data_spec).unwrap();
let mut measurement_generator = MeasurementGenerator::new(
42,
1,
&data_spec.agents[0].measurements[0],
fake_now,
&generated_tag_sets,
&[],
)
.unwrap();
let points = measurement_generator.generate(fake_now).unwrap();
let mut v = Vec::new();
for point in points {
point.write_data_point_to(&mut v).unwrap();
}
let line_protocol = str::from_utf8(&v).unwrap();
assert_eq!(
line_protocol,
format!(
"m1,foo=1,reg=data-1 val=3i {fake_now}\nm1,foo=2,reg=data-1 val=3i {fake_now}\nm1,foo=3,reg=data-3 val=3i {fake_now}\n"
)
);
}
#[test]
fn tag_set_and_tag_pairs() {
let data_spec: specification::DataSpec = toml::from_str(
r#"
name = "ex"
[[values]]
name = "foo"
template = "foo-{{id}}"
cardinality = 2
[[tag_sets]]
name = "foo_set"
for_each = ["foo"]
[[agents]]
name = "foo"
[[agents.measurements]]
name = "m1"
tag_set = "foo_set"
tag_pairs = [{key = "hello", template = "world{{measurement.id}}"}]
[[agents.measurements.fields]]
name = "val"
i64_range = [3, 3]
[[database_writers]]
database_ratio = 1.0
agents = [{name = "foo", sampling_interval = "10s"}]"#,
)
.unwrap();
let fake_now = 678;
let generated_tag_sets = GeneratedTagSets::from_spec(&data_spec).unwrap();
let mut measurement_generator = MeasurementGenerator::new(
42,
1,
&data_spec.agents[0].measurements[0],
fake_now,
&generated_tag_sets,
&[],
)
.unwrap();
let points = measurement_generator.generate(fake_now).unwrap();
let mut v = Vec::new();
for point in points {
point.write_data_point_to(&mut v).unwrap();
}
let line_protocol = str::from_utf8(&v).unwrap();
assert_eq!(
line_protocol,
format!(
"m1,foo=foo-1,hello=world1 val=3i {fake_now}\nm1,foo=foo-2,hello=world1 val=3i {fake_now}\n"
)
);
}
fn extract_field_values<'a>(field_name: &str, lines: &'a [String]) -> Vec<&'a str> {
lines
.iter()
.map(|line| {
let mut split = line.splitn(2, ' ');
split.next();
let after_space = split.next().unwrap();
let prefix = format!(",{field_name}=");
let after = after_space.rsplit_once(&prefix).unwrap().1;
after.split_once(',').map_or(after, |x| x.0)
})
.collect()
}
}
|
use std::{fmt::Display, str::FromStr};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone, Copy, Default)]
pub enum MainResult {
/// fn main() -> () {()}
#[default]
Unit,
/// fn main() -> Result<(), Box<dyn std::error::Error>> {Ok(())}
/// allows using `?` with no boilerplate
Result,
}
impl MainResult {
pub(crate) fn ttype(&self) -> &'static str {
match self {
Self::Unit => "()",
Self::Result => "Result<(), Box<dyn std::error::Error>>",
}
}
pub(crate) fn instance(&self) -> &'static str {
match self {
Self::Unit => "()",
Self::Result => "Ok(())",
}
}
}
impl FromStr for MainResult {
type Err = Box<dyn std::error::Error>;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"unit" => Ok(MainResult::Unit),
"result" => Ok(MainResult::Result),
_ => Err("Unknown main result type".into()),
}
}
}
impl Display for MainResult {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MainResult::Unit => write!(f, "Unit"),
MainResult::Result => write!(f, "Result<(), Box<dyn std::error::Error>>"),
}
}
}
|
use crate::config::Configuration;
use crate::utils::copy_and_sync_file;
use crate::Platform;
use crate::Result;
use crate::Runnable;
use anyhow::anyhow;
use cargo_metadata::Metadata;
use ignore::WalkBuilder;
use log::{debug, trace};
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
#[derive(Debug)]
pub struct Project {
pub conf: Arc<Configuration>,
pub metadata: Metadata,
}
impl Project {
pub fn new(conf: &Arc<Configuration>, metadata: Metadata) -> Project {
Project {
conf: Arc::clone(conf),
metadata,
}
}
pub fn project_dir(&self) -> Result<PathBuf> {
Ok(self.metadata.workspace_root.clone().into_std_path_buf())
}
pub fn overlay_work_dir(&self, platform: &dyn Platform) -> Result<PathBuf> {
Ok(self
.target_dir(platform.rustc_triple())?
.join(platform.rustc_triple()))
}
pub fn target_dir(&self, triple: &str) -> Result<PathBuf> {
Ok(self
.metadata
.target_directory
.clone()
.into_std_path_buf()
.join(triple))
}
pub fn link_test_data(&self, runnable: &Runnable) -> Result<PathBuf> {
let test_data_path = runnable
.exe
.parent()
.and_then(|it| it.parent())
.map(|it| it.join("dinghy"))
.map(|it| it.join(runnable.exe.file_name().unwrap()))
.map(|it| it.join("test_data"))
.unwrap();
fs::create_dir_all(&test_data_path)?;
let test_data_cfg_path = test_data_path.join("test_data.cfg");
let mut test_data_cfg = File::create(&test_data_cfg_path)?;
debug!("Generating {}", test_data_cfg_path.display());
for td in self.conf.test_data.iter() {
let target_path = td
.base
.parent()
.unwrap_or(&PathBuf::from("/"))
.join(&td.source);
let target_path = target_path
.to_str()
.ok_or_else(|| anyhow!("Invalid UTF-8 path {}", target_path.display()))?;
test_data_cfg.write_all(td.id.as_bytes())?;
test_data_cfg.write_all(b":")?;
test_data_cfg.write_all(target_path.as_bytes())?;
test_data_cfg.write_all(b"\n")?;
}
Ok(test_data_path)
}
pub fn copy_test_data<T: AsRef<Path>>(&self, app_path: T) -> Result<()> {
let app_path = app_path.as_ref();
let test_data_path = app_path.join("test_data");
fs::create_dir_all(&test_data_path)?;
for td in self.conf.test_data.iter() {
let file = td
.base
.parent()
.unwrap_or(&PathBuf::from("/"))
.join(&td.source);
if Path::new(&file).exists() {
let metadata = file.metadata()?;
let dst = test_data_path.join(&td.id);
if metadata.is_dir() {
rec_copy(file, dst, td.copy_git_ignored)?;
} else {
copy_and_sync_file(file, dst)?;
}
} else {
log::warn!(
"configuration required test_data `{:?}` but it could not be found",
td
);
}
}
Ok(())
}
}
pub fn rec_copy<P1: AsRef<Path>, P2: AsRef<Path>>(
src: P1,
dst: P2,
copy_ignored_test_data: bool,
) -> Result<()> {
let empty: &[&str] = &[];
rec_copy_excl(src, dst, copy_ignored_test_data, empty)
}
pub fn rec_copy_excl<P1: AsRef<Path>, P2: AsRef<Path>, P3: AsRef<Path> + ::std::fmt::Debug>(
src: P1,
dst: P2,
copy_ignored_test_data: bool,
more_exclude: &[P3],
) -> Result<()> {
let src = src.as_ref();
let dst = dst.as_ref();
let ignore_file = src.join(".dinghyignore");
debug!(
"Copying recursively from {} to {} excluding {:?}",
src.display(),
dst.display(),
more_exclude
);
let mut walker = WalkBuilder::new(src);
walker.git_ignore(!copy_ignored_test_data);
walker.add_ignore(ignore_file);
for entry in walker.build() {
let entry = entry?;
let metadata = entry.metadata()?;
if more_exclude.iter().any(|ex| entry.path().starts_with(ex)) {
debug!("Exclude {:?}", entry.path());
continue;
}
trace!(
"Processing entry {:?} is_dir:{:?}",
entry.path(),
metadata.is_dir()
);
let path = entry.path().strip_prefix(src)?;
// Check if root path is a file or a directory
let target = if path.parent().is_none() && metadata.is_file() {
fs::create_dir_all(
&dst.parent()
.ok_or_else(|| anyhow!("Invalid file {}", dst.display()))?,
)?;
dst.to_path_buf()
} else {
dst.join(path)
};
if metadata.is_dir() {
if target.exists() && target.is_file() {
fs::remove_file(&target)?;
}
trace!("Creating directory {}", target.display());
fs::create_dir_all(&target)?;
} else if metadata.is_file() {
if target.exists() && !target.is_file() {
trace!("Remove 2 {:?}", target);
fs::remove_dir_all(&target)?;
}
if !target.exists()
|| target.metadata()?.len() != entry.metadata()?.len()
|| target.metadata()?.modified()? < entry.metadata()?.modified()?
{
if target.exists() && target.metadata()?.permissions().readonly() {
fs::remove_dir_all(&target)?;
}
trace!("Copying {} to {}", entry.path().display(), target.display());
copy_and_sync_file(entry.path(), &target)?;
} else {
trace!("{} is already up-to-date", target.display());
}
} else {
debug!("ignored {:?} ({:?})", path, metadata);
}
}
trace!(
"Copied recursively from {} to {} excluding {:?}",
src.display(),
dst.display(),
more_exclude
);
Ok(())
}
|
//! This example show the pattern "Strings and custom fail type" described in the book
extern crate core;
extern crate failure;
use core::fmt::{self, Display};
use failure::{Backtrace, Context, Fail, ResultExt};
fn main() {
let err = err1().unwrap_err();
// Print the error itself
println!("error: {}", err);
// Print the chain of errors that caused it
for cause in Fail::iter_causes(&err) {
println!("caused by: {}", cause);
}
}
fn err1() -> Result<(), MyError> {
// The context can be a String
Ok(err2().context("err1".to_string())?)
}
fn err2() -> Result<(), MyError> {
// The context can be a &'static str
Ok(err3().context("err2")?)
}
fn err3() -> Result<(), MyError> {
Ok(Err(MyError::from("err3"))?)
}
#[derive(Debug)]
pub struct MyError {
inner: Context<String>,
}
impl Fail for MyError {
fn cause(&self) -> Option<&Fail> {
self.inner.cause()
}
fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace()
}
}
impl Display for MyError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.inner, f)
}
}
// Allows writing `MyError::from("oops"))?`
impl From<&'static str> for MyError {
fn from(msg: &'static str) -> MyError {
MyError {
inner: Context::new(msg.into()),
}
}
}
// Allows adding more context via a String
impl From<Context<String>> for MyError {
fn from(inner: Context<String>) -> MyError {
MyError { inner }
}
}
// Allows adding more context via a &str
impl From<Context<&'static str>> for MyError {
fn from(inner: Context<&'static str>) -> MyError {
MyError {
inner: inner.map(|s| s.to_string()),
}
}
}
|
use std::ffi::CString;
use std::fmt::Debug;
use wce_formats::binary_reader::BinaryReader;
use wce_formats::GameVersion;
pub mod unit;
pub mod ability;
pub mod item;
pub mod destructable;
pub mod buff;
pub mod doodad;
pub mod upgrade;
#[derive(Copy, Clone, PartialOrd, PartialEq, Debug)]
pub struct ObjectIdCode(pub [u8;4]);
#[derive(Debug)]
pub struct MetaId([u8;4]);
pub type OriginalIdCode = ObjectIdCode;
pub type CustomIdCode = ObjectIdCode;
#[derive(Debug)]
pub enum VariableValue {
Integer(i32),
Real(f32),
Unreal(f32),
String(String),
Bool(bool),
Char(char),
UnitList(String),
ItemList(String),
RegenType(String),
AttackType(String),
WeaponType(String),
TargetType(String),
MoveType(String),
DefenseType(String),
PathingTexture(String),
UpgradeList(String),
StringList(String),
AbilityList(String),
HeroAbilityList(String),
MissileArt(String),
AttributeType(String),
AttackBits(String),
}
#[derive(Debug)]
pub enum ObjectId{
Original(OriginalIdCode),
Custom(OriginalIdCode, CustomIdCode)
}
impl ObjectId {
pub fn for_custom(original_id: [u8;4], custom_id: [u8;4]) -> Self {
Self::Custom(ObjectIdCode(original_id),ObjectIdCode(custom_id))
}
pub fn for_original(original_id: [u8;4]) -> Self {
Self::Original(ObjectIdCode(original_id))
}
}
#[derive(Debug)]
pub struct MetaModification {
id: MetaId,
value: VariableValue,
level: i32,
data_pointer: i32
}
#[derive(Debug)]
pub struct ObjectDefinition {
id: ObjectId,
modified_datas: Vec<MetaModification>
}
impl ObjectDefinition {
pub fn with_optional(reader: &mut BinaryReader, id: ObjectId, _game_version: &GameVersion) -> Self {
let modif_count = reader.read_u32();
let mut meta_modified = vec![];
for _i in 0..modif_count {
let meta = read_meta_opts(reader, &id);
meta_modified.push(meta);
}
Self {
id,
modified_datas: meta_modified,
}
}
pub fn without_optional(reader: &mut BinaryReader, id: ObjectId, game_version: &GameVersion) -> Self {
let modif_count = reader.read_u32();
let mut meta_modified = vec![];
for _i in 0..modif_count {
let meta = read_meta_no_opts(reader, &id, game_version);
meta_modified.push(meta);
}
Self {
id,
modified_datas: meta_modified,
}
}
}
fn cstring_to_string_meta(cstr: CString, id: &ObjectId, meta_id: &[u8;4]) -> String {
cstr.into_string().expect(
&format!("Failed to read cstring for object '{:?}' of meta '{}'", id, String::from_utf8_lossy(meta_id) ))
}
fn read_meta_no_opts(reader: &mut BinaryReader, id: &ObjectId, game_version: &GameVersion) -> MetaModification {
let meta_id = reader.read_bytes(4);
let meta_id = [meta_id[0],meta_id[1],meta_id[2],meta_id[3]];
let vtype = reader.read_i32();
let value = match (game_version,vtype) {
(_,0) => VariableValue::Integer(reader.read_i32()),
(_,1) => VariableValue::Real(reader.read_f32()),
(_,2) => VariableValue::Unreal(reader.read_f32()),
(_,3) => VariableValue::String(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,4) => VariableValue::Bool(reader.read_u8() == 1),
(GameVersion::RoC,5) => VariableValue::Char(reader.read_char()),
(GameVersion::RoC,6) => VariableValue::UnitList(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,7) => VariableValue::ItemList(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,8) => VariableValue::RegenType(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,9) => VariableValue::AttackType(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,10) => VariableValue::WeaponType(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,11) => VariableValue::TargetType(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,12) => VariableValue::MoveType(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,13) => VariableValue::DefenseType(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,14) => VariableValue::PathingTexture(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,15) => VariableValue::UpgradeList(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,16) => VariableValue::StringList(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,17) => VariableValue::AbilityList(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,18) => VariableValue::HeroAbilityList(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,19) => VariableValue::MissileArt(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,20) => VariableValue::AttributeType(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
(GameVersion::RoC,21) => VariableValue::AttackBits(cstring_to_string_meta(reader.read_c_string(), id, &meta_id)),
_ => panic!("Unsupported vtype '{}' for object {:?} on meta '{}'",vtype, id, String::from_utf8_lossy(&meta_id) )
};
reader.skip(4);
MetaModification {
id: MetaId(meta_id),
value,
level: 0,
data_pointer: 0,
}
}
fn read_meta_opts(reader: &mut BinaryReader, id: &ObjectId) -> MetaModification {
let meta_id = reader.read_bytes(4);
let meta_id = [meta_id[0],meta_id[1],meta_id[2],meta_id[3]];
let vtype = reader.read_i32();
let level = reader.read_i32();
let data_pointer = reader.read_i32();
let value = match vtype {
0 => VariableValue::Integer(reader.read_i32()),
1 => VariableValue::Real(reader.read_f32()),
2 => VariableValue::Unreal(reader.read_f32()),
3 => VariableValue::String(reader.read_c_string().into_string().expect(
&format!("Failed to read cstring for object '{:?}' of meta '{}' (byte position {})", id, String::from_utf8_lossy(&meta_id), reader.pos() )
)),
_ => panic!("Unsupported vtype '{}' for object {:?} on meta '{}' (byte position {})",vtype, id, String::from_utf8_lossy(&meta_id), reader.pos() )
};
reader.skip(4);
MetaModification {
id: MetaId(meta_id),
value,
level,
data_pointer,
}
}
fn assert_meta_end_format(reader: &BinaryReader, id: &ObjectId, end_meta_id: Vec<u8>) {
let end_format_zero = true;
match (end_format_zero, id) {
(false,ObjectId::Original(code)) => assert_eq!(code.0, end_meta_id.as_slice(),
"format reading went wrong meta object end '{}' not equal to object id '{}' (byte position {})",
String::from_utf8_lossy(end_meta_id.as_slice()), String::from_utf8_lossy(&code.0), reader.pos()),
(false,ObjectId::Custom(_, code)) => assert_eq!(code.0, end_meta_id.as_slice(),
"format reading went wrong meta object end '{}' not equal to object id '{}' (byte position {})",
String::from_utf8_lossy(end_meta_id.as_slice()), String::from_utf8_lossy(&code.0), reader.pos()),
_ => ()
}
} |
//! Processor state stored in the FLAGS, EFLAGS, or RFLAGS register.
use shared::PrivilegeLevel;
/// The RFLAGS register. All variants are backwards compatable so only one
/// bitflags struct needed.
bitflags! {
pub flags Flags: usize {
/// ID Flag (ID)
const FLAGS_ID = 1 << 21,
/// Virtual Interrupt Pending (VIP)
const FLAGS_VIP = 1 << 20,
/// Virtual Interrupt Flag (VIF)
const FLAGS_VIF = 1 << 19,
/// Alignment Check (AC)
const FLAGS_AC = 1 << 18,
/// Virtual-8086 Mode (VM)
const FLAGS_VM = 1 << 17,
/// Resume Flag (RF)
const FLAGS_RF = 1 << 16,
/// Nested Task (NT)
const FLAGS_NT = 1 << 14,
/// I/O Privilege Level (IOPL) 0
const FLAGS_IOPL0 = 0 << 12,
/// I/O Privilege Level (IOPL) 1
const FLAGS_IOPL1 = 1 << 12,
/// I/O Privilege Level (IOPL) 2
const FLAGS_IOPL2 = 2 << 12,
/// I/O Privilege Level (IOPL) 3
const FLAGS_IOPL3 = 3 << 12,
/// Overflow Flag (OF)
const FLAGS_OF = 1 << 11,
/// Direction Flag (DF)
const FLAGS_DF = 1 << 10,
/// Interrupt Enable Flag (IF)
const FLAGS_IF = 1 << 9,
/// Trap Flag (TF)
const FLAGS_TF = 1 << 8,
/// Sign Flag (SF)
const FLAGS_SF = 1 << 7,
/// Zero Flag (ZF)
const FLAGS_ZF = 1 << 6,
/// Auxiliary Carry Flag (AF)
const FLAGS_AF = 1 << 4,
/// Parity Flag (PF)
const FLAGS_PF = 1 << 2,
/// Bit 1 is always 1.
const FLAGS_A1 = 1 << 1,
/// Carry Flag (CF)
const FLAGS_CF = 1 << 0,
}
}
impl Flags {
/// Creates a new Flags entry. Ensures bit 1 is set.
pub const fn new() -> Flags {
FLAGS_A1
}
/// Creates a new Flags with the given I/O privilege level.
pub const fn from_priv(iopl: PrivilegeLevel) -> Flags {
Flags { bits: (iopl as usize) << 12 }
}
}
pub fn flags() -> Flags {
#[cfg(target_arch="x86")]
#[inline(always)]
unsafe fn inner() -> Flags {
let r: usize;
asm!("pushfl; popl $0" : "=r"(r) :: "memory");
Flags::from_bits_truncate(r)
}
#[cfg(target_arch="x86_64")]
#[inline(always)]
unsafe fn inner() -> Flags {
let r: usize;
asm!("pushfq; popq $0" : "=r"(r) :: "memory");
Flags::from_bits_truncate(r)
}
unsafe { inner() }
}
pub fn set(val: Flags) {
#[cfg(target_arch="x86")]
#[inline(always)]
unsafe fn inner(val: Flags) {
asm!("pushl $0; popfl" :: "r"(val.bits()) : "memory" "flags");
}
#[cfg(target_arch="x86_64")]
#[inline(always)]
unsafe fn inner(val: Flags) {
asm!("pushq $0; popfq" :: "r"(val.bits()) : "memory" "flags");
}
unsafe { inner(val) }
}
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[cfg(feature = "UI_Core_AnimationMetrics")]
pub mod AnimationMetrics;
#[cfg(feature = "UI_Core_Preview")]
pub mod Preview;
#[link(name = "windows")]
extern "system" {}
pub type AcceleratorKeyEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct AppViewBackButtonVisibility(pub i32);
impl AppViewBackButtonVisibility {
pub const Visible: Self = Self(0i32);
pub const Collapsed: Self = Self(1i32);
pub const Disabled: Self = Self(2i32);
}
impl ::core::marker::Copy for AppViewBackButtonVisibility {}
impl ::core::clone::Clone for AppViewBackButtonVisibility {
fn clone(&self) -> Self {
*self
}
}
pub type AutomationProviderRequestedEventArgs = *mut ::core::ffi::c_void;
pub type BackRequestedEventArgs = *mut ::core::ffi::c_void;
pub type CharacterReceivedEventArgs = *mut ::core::ffi::c_void;
pub type ClosestInteractiveBoundsRequestedEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct CoreAcceleratorKeyEventType(pub i32);
impl CoreAcceleratorKeyEventType {
pub const Character: Self = Self(2i32);
pub const DeadCharacter: Self = Self(3i32);
pub const KeyDown: Self = Self(0i32);
pub const KeyUp: Self = Self(1i32);
pub const SystemCharacter: Self = Self(6i32);
pub const SystemDeadCharacter: Self = Self(7i32);
pub const SystemKeyDown: Self = Self(4i32);
pub const SystemKeyUp: Self = Self(5i32);
pub const UnicodeCharacter: Self = Self(8i32);
}
impl ::core::marker::Copy for CoreAcceleratorKeyEventType {}
impl ::core::clone::Clone for CoreAcceleratorKeyEventType {
fn clone(&self) -> Self {
*self
}
}
pub type CoreAcceleratorKeys = *mut ::core::ffi::c_void;
pub type CoreComponentInputSource = *mut ::core::ffi::c_void;
pub type CoreCursor = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct CoreCursorType(pub i32);
impl CoreCursorType {
pub const Arrow: Self = Self(0i32);
pub const Cross: Self = Self(1i32);
pub const Custom: Self = Self(2i32);
pub const Hand: Self = Self(3i32);
pub const Help: Self = Self(4i32);
pub const IBeam: Self = Self(5i32);
pub const SizeAll: Self = Self(6i32);
pub const SizeNortheastSouthwest: Self = Self(7i32);
pub const SizeNorthSouth: Self = Self(8i32);
pub const SizeNorthwestSoutheast: Self = Self(9i32);
pub const SizeWestEast: Self = Self(10i32);
pub const UniversalNo: Self = Self(11i32);
pub const UpArrow: Self = Self(12i32);
pub const Wait: Self = Self(13i32);
pub const Pin: Self = Self(14i32);
pub const Person: Self = Self(15i32);
}
impl ::core::marker::Copy for CoreCursorType {}
impl ::core::clone::Clone for CoreCursorType {
fn clone(&self) -> Self {
*self
}
}
pub type CoreDispatcher = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct CoreDispatcherPriority(pub i32);
impl CoreDispatcherPriority {
pub const Idle: Self = Self(-2i32);
pub const Low: Self = Self(-1i32);
pub const Normal: Self = Self(0i32);
pub const High: Self = Self(1i32);
}
impl ::core::marker::Copy for CoreDispatcherPriority {}
impl ::core::clone::Clone for CoreDispatcherPriority {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct CoreIndependentInputFilters(pub u32);
impl CoreIndependentInputFilters {
pub const None: Self = Self(0u32);
pub const MouseButton: Self = Self(1u32);
pub const MouseWheel: Self = Self(2u32);
pub const MouseHover: Self = Self(4u32);
pub const PenWithBarrel: Self = Self(8u32);
pub const PenInverted: Self = Self(16u32);
}
impl ::core::marker::Copy for CoreIndependentInputFilters {}
impl ::core::clone::Clone for CoreIndependentInputFilters {
fn clone(&self) -> Self {
*self
}
}
pub type CoreIndependentInputSource = *mut ::core::ffi::c_void;
pub type CoreIndependentInputSourceController = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct CoreInputDeviceTypes(pub u32);
impl CoreInputDeviceTypes {
pub const None: Self = Self(0u32);
pub const Touch: Self = Self(1u32);
pub const Pen: Self = Self(2u32);
pub const Mouse: Self = Self(4u32);
}
impl ::core::marker::Copy for CoreInputDeviceTypes {}
impl ::core::clone::Clone for CoreInputDeviceTypes {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
pub struct CorePhysicalKeyStatus {
pub RepeatCount: u32,
pub ScanCode: u32,
pub IsExtendedKey: bool,
pub IsMenuKeyDown: bool,
pub WasKeyDown: bool,
pub IsKeyReleased: bool,
}
impl ::core::marker::Copy for CorePhysicalKeyStatus {}
impl ::core::clone::Clone for CorePhysicalKeyStatus {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct CoreProcessEventsOption(pub i32);
impl CoreProcessEventsOption {
pub const ProcessOneAndAllPending: Self = Self(0i32);
pub const ProcessOneIfPresent: Self = Self(1i32);
pub const ProcessUntilQuit: Self = Self(2i32);
pub const ProcessAllIfPresent: Self = Self(3i32);
}
impl ::core::marker::Copy for CoreProcessEventsOption {}
impl ::core::clone::Clone for CoreProcessEventsOption {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[cfg(feature = "Foundation")]
pub struct CoreProximityEvaluation {
pub Score: i32,
pub AdjustedPoint: super::super::Foundation::Point,
}
#[cfg(feature = "Foundation")]
impl ::core::marker::Copy for CoreProximityEvaluation {}
#[cfg(feature = "Foundation")]
impl ::core::clone::Clone for CoreProximityEvaluation {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct CoreProximityEvaluationScore(pub i32);
impl CoreProximityEvaluationScore {
pub const Closest: Self = Self(0i32);
pub const Farthest: Self = Self(2147483647i32);
}
impl ::core::marker::Copy for CoreProximityEvaluationScore {}
impl ::core::clone::Clone for CoreProximityEvaluationScore {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct CoreVirtualKeyStates(pub u32);
impl CoreVirtualKeyStates {
pub const None: Self = Self(0u32);
pub const Down: Self = Self(1u32);
pub const Locked: Self = Self(2u32);
}
impl ::core::marker::Copy for CoreVirtualKeyStates {}
impl ::core::clone::Clone for CoreVirtualKeyStates {
fn clone(&self) -> Self {
*self
}
}
pub type CoreWindow = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct CoreWindowActivationMode(pub i32);
impl CoreWindowActivationMode {
pub const None: Self = Self(0i32);
pub const Deactivated: Self = Self(1i32);
pub const ActivatedNotForeground: Self = Self(2i32);
pub const ActivatedInForeground: Self = Self(3i32);
}
impl ::core::marker::Copy for CoreWindowActivationMode {}
impl ::core::clone::Clone for CoreWindowActivationMode {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct CoreWindowActivationState(pub i32);
impl CoreWindowActivationState {
pub const CodeActivated: Self = Self(0i32);
pub const Deactivated: Self = Self(1i32);
pub const PointerActivated: Self = Self(2i32);
}
impl ::core::marker::Copy for CoreWindowActivationState {}
impl ::core::clone::Clone for CoreWindowActivationState {
fn clone(&self) -> Self {
*self
}
}
pub type CoreWindowDialog = *mut ::core::ffi::c_void;
pub type CoreWindowEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct CoreWindowFlowDirection(pub i32);
impl CoreWindowFlowDirection {
pub const LeftToRight: Self = Self(0i32);
pub const RightToLeft: Self = Self(1i32);
}
impl ::core::marker::Copy for CoreWindowFlowDirection {}
impl ::core::clone::Clone for CoreWindowFlowDirection {
fn clone(&self) -> Self {
*self
}
}
pub type CoreWindowFlyout = *mut ::core::ffi::c_void;
pub type CoreWindowPopupShowingEventArgs = *mut ::core::ffi::c_void;
pub type CoreWindowResizeManager = *mut ::core::ffi::c_void;
pub type DispatchedHandler = *mut ::core::ffi::c_void;
pub type ICoreAcceleratorKeys = *mut ::core::ffi::c_void;
pub type ICoreInputSourceBase = *mut ::core::ffi::c_void;
pub type ICorePointerInputSource = *mut ::core::ffi::c_void;
pub type ICorePointerInputSource2 = *mut ::core::ffi::c_void;
pub type ICorePointerRedirector = *mut ::core::ffi::c_void;
pub type ICoreWindow = *mut ::core::ffi::c_void;
pub type ICoreWindowEventArgs = *mut ::core::ffi::c_void;
pub type IInitializeWithCoreWindow = *mut ::core::ffi::c_void;
pub type IdleDispatchedHandler = *mut ::core::ffi::c_void;
pub type IdleDispatchedHandlerArgs = *mut ::core::ffi::c_void;
pub type InputEnabledEventArgs = *mut ::core::ffi::c_void;
pub type KeyEventArgs = *mut ::core::ffi::c_void;
pub type PointerEventArgs = *mut ::core::ffi::c_void;
pub type SystemNavigationManager = *mut ::core::ffi::c_void;
pub type TouchHitTestingEventArgs = *mut ::core::ffi::c_void;
pub type VisibilityChangedEventArgs = *mut ::core::ffi::c_void;
pub type WindowActivatedEventArgs = *mut ::core::ffi::c_void;
pub type WindowSizeChangedEventArgs = *mut ::core::ffi::c_void;
|
extern crate hex;
use self::hex::ToHex;
struct State {
list: Vec<u8>,
position: usize,
skip_size: usize,
}
fn reverse_section<T : Clone>(list: &mut Vec<T>, start_index: usize, end_index: usize) {
if end_index != start_index {
if end_index > start_index {
list[start_index..end_index].reverse();
} else {
let mut items: Vec<T> = list[start_index..].iter().cloned().collect();
let mut second_part: Vec<T> = list[..end_index].iter().cloned().collect();
items.append(&mut second_part);
items.reverse();
for (i, v) in items.iter().enumerate() {
let idx = (start_index + i) % list.len();
list[idx] = v.clone();
}
}
}
}
impl State {
pub fn new(elem_count: i32) -> Self {
let list: Vec<u8> = (0..elem_count).map(|v| v as u8).collect();
State {
list,
position: 0,
skip_size: 0
}
}
pub fn step(&mut self, length: usize) {
let elem_count = self.list.len();
let start_index = self.position;
let end_index = (start_index + length) % elem_count;
reverse_section(&mut self.list, start_index, end_index);
self.position = (self.position + length + self.skip_size) % elem_count;
self.skip_size = self.skip_size + 1;
}
pub fn run(&mut self, lengths: &Vec<usize>) {
for &length in lengths {
self.step(length);;
}
}
pub fn dense_hash(&self) -> Vec<u8> {
let elem_count = self.list.len();
let block_count = elem_count / 16;
let mut result: Vec<u8> = Vec::with_capacity(block_count);
for block_idx in 0..block_count {
let block_start = block_idx * 16;
let block_end = block_start + 16;
let code = self.list[block_start..block_end].iter().fold(0, |acc, &v| acc ^ v);
result.push(code);
}
result
}
#[allow(dead_code)]
fn dump(&self) {
for (i, v) in self.list.iter().enumerate() {
if i == self.position {
print!("[{}] ", v);
} else {
print!("{} ", v);
}
}
println!(" skip_size: {}", self.skip_size);
}
}
fn example() {
let mut state = State::new(5);
state.run(&vec![3, 4, 1, 5]);
println!("{:?}", state.list)
}
pub fn knot_hash(s: &str) -> Vec<u8> {
let mut input: Vec<usize> = s.chars().map(|ch| ch as usize).collect();
for &i in vec![17, 31, 73, 47, 23].iter() {
input.push(i as usize);
}
let mut state = State::new(256);
for _ in 0..64 {
state.run(&input);
}
state.dense_hash()
}
fn part1() {
let mut state = State::new(256);
state.run(&vec![14,58,0,116,179,16,1,104,2,254,167,86,255,55,122,244]);
println!("Day 10 result 1: {}", (state.list[0] as i32) * (state.list[1] as i32));
}
fn part2() {
let dense_hash = knot_hash("14,58,0,116,179,16,1,104,2,254,167,86,255,55,122,244");
let mut result = String::new();
dense_hash.write_hex(&mut result).expect("Hex conversion failed");
println!("Day 10 result 2: {}", result);
}
pub fn run() {
example();
part1();
part2();
} |
use mqtt3::{MqttRead, MqttWrite};
use std::io::{self, Read, Write};
use std::net::Shutdown;
use std::time::Duration;
use netopt::{NetworkStream};
pub struct Connection {
stream: NetworkStream
}
impl Connection {
pub fn new(stream: NetworkStream) -> io::Result<Connection> {
Ok(Connection {
stream: stream
})
}
pub fn set_read_timeout(&self, dur: Option<Duration>) -> io::Result<()> {
self.stream.set_read_timeout(dur)
}
pub fn terminate(&self) -> io::Result<()> {
self.stream.shutdown(Shutdown::Both)
}
}
impl Write for Connection {
fn write(&mut self, msg: &[u8]) -> io::Result<usize> {
self.stream.write(msg)
}
fn flush(&mut self) -> io::Result<()> {
self.stream.flush()
}
}
impl Read for Connection {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.stream.read(buf)
}
}
impl MqttRead for Connection {}
impl MqttWrite for Connection {}
|
use crate::concmap::*;
use crate::memory::*;
use crate::program::Program;
use crate::thread;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
pub struct Cache {
r: ConcurrentMap<Box<[u32]>, Future<bool>>,
n_hits: AtomicU64,
n_consistent: AtomicUsize,
}
impl Cache {
#[inline]
pub fn new() -> Self {
Self {
r: ConcurrentMap::new(),
n_hits: AtomicU64::new(0),
n_consistent: AtomicUsize::new(0),
}
}
pub fn check_full(&self, p: &Program, mem: &Memory, ts: &thread::State) -> bool {
assert!(ts.updating.is_none());
self._check(p, mem, ts, &thread::ConsistencyState::most_restrictive())
}
pub fn check_semi(&self, p: &Program, mem: &Memory, ts: &thread::State) -> bool {
assert!(ts.updating.is_none());
self._check(p, mem, ts, &thread::ConsistencyState::semi())
}
fn _check(
&self,
p: &Program,
mem: &Memory,
ts: &thread::State,
cs: &thread::ConsistencyState,
) -> bool {
if ts.no_promises() {
return true;
}
if p.opt().cc {
let info = p.promise_stat(ts.pc);
for loc in Loc::iter() {
let vm = &mem[loc][ts.cur.get(loc)..];
let loc_info = &info.may_promise[loc];
for pr in ts.pr[loc].iter() {
let msg = &vm[pr];
if loc_info.values.get(&msg.val()).is_none() {
return false;
}
}
}
}
let mut ser = Vec::new();
ts.serialize_for_consistency(&mut ser, p, mem, cs);
let ser = ser.into_boxed_slice();
let (ins, index) = self.r.insert(ser, Future::Pending);
if !ins {
self.n_hits.fetch_add(1, Ordering::Relaxed);
return self.r.poll(index, |_, v| *v);
}
let mut vis = Vis {
cache: self,
p,
mem,
cs,
ts,
};
let r = thread::visit_transitions(&mut vis, p, mem, ts).is_err();
self.r.with(index, |_, v| {
v.set(r);
});
if r {
self.n_consistent.fetch_add(1, Ordering::Relaxed);
}
r
}
#[inline(always)]
pub fn n_hits(&self) -> u64 {
self.n_hits.load(Ordering::Relaxed)
}
#[inline(always)]
pub fn n_misses(&self) -> u64 {
self.r.len() as u64
}
#[inline(always)]
pub fn n_consistent(&self) -> u64 {
self.n_consistent.load(Ordering::Relaxed) as u64
}
#[inline(always)]
pub fn n_inconsistent(&self) -> u64 {
self.n_misses().saturating_sub(self.n_consistent())
}
#[inline(always)]
pub fn n_total(&self) -> u64 {
self.n_hits() + self.n_misses()
}
}
struct Vis<'a> {
cache: &'a Cache,
p: &'a Program,
mem: &'a Memory,
cs: &'a thread::ConsistencyState,
ts: &'a thread::State,
}
impl thread::TransitionVisitor for Vis<'_> {
// `Ok` means inconsistent, `Err` means consistent.
type Err = ();
fn ret(&mut self, _: Val) -> Result<(), ()> {
unreachable!()
}
fn pure(&mut self, s: thread::Pure) -> Result<(), Self::Err> {
let ts2 = self.ts.with_pure(&s);
if self.cache._check(self.p, self.mem, &ts2, self.cs) {
Err(())
} else {
Ok(())
}
}
fn write(&mut self, w: thread::Write) -> Result<(), ()> {
let mut cs2 = self.cs.clone();
if !self.ts.valid_in_future(self.mem, &w, &mut cs2) {
return Ok(());
}
let (ts2, mem2, _) = self.ts.with_write(self.mem, &w);
if self.cache._check(self.p, &mem2, &ts2, &cs2) {
Err(())
} else {
Ok(())
}
}
fn sc_fence(&mut self, _: thread::ScFence) -> Result<(), ()> {
unreachable!()
}
fn rel_fence(&mut self, _: thread::RelFence) -> Result<(), ()> {
unreachable!()
}
fn rel_write(&mut self, _: thread::RelWrite) -> Result<(), ()> {
unreachable!()
}
}
|
use libc::{tmpnam, L_tmpnam};
use std::os::raw::c_char;
use std::ffi::CStr;
use nix::unistd::{mkstemp, unlink};
use nix::sys::stat::fstat;
use nix::fcntl::readlink;
fn main() {
unsafe {
let mut tmp_file_name_ptr = [0u8; L_tmpnam as usize].as_mut_ptr() as *mut c_char;
tmpnam(tmp_file_name_ptr);
println!("tmp_file_name = {}", CStr::from_ptr(tmp_file_name_ptr).to_str().unwrap());
tmpnam(tmp_file_name_ptr);
println!("tmp_file_name = {}", CStr::from_ptr(tmp_file_name_ptr).to_str().unwrap());
let temp_file = match mkstemp("/tmp/tempfile_XXXXXX") {
Ok((fd, path)) => {
unlink(path.as_path()).unwrap(); // flag file to be deleted at app termination
fd
}
Err(e) => panic!("mkstemp failed: {}", e)
};
let link = format!("/proc/self/fd/{}", temp_file);
let mut link_content = [0u8; 1024];
readlink(link.as_str(), &mut link_content);
println!("{}", CStr::from_bytes_with_nul_unchecked(&link_content).to_str().unwrap())
}
} |
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(unused)]
#[cfg(target_os = "macos")]
include!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/file_finder/fts/bindings_macos.rs"
));
#[cfg(target_os = "linux")]
include!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/file_finder/fts/bindings_unknown_linux.rs"
));
|
use num::{traits::NumAssignRef, BigUint, CheckedMul};
// see Square roots by subtraction Frazer Jarvis
// http://www.afjarvis.staff.shef.ac.uk/maths/jarvisspec02.pdf
pub fn get_approximation(n: usize, precision: u32) -> BigUint {
let five = BigUint::from(5usize);
let ten = BigUint::from(10usize);
let fortyfive = BigUint::from(45usize);
let hundred = BigUint::from(100usize);
let mut a = five.clone() * n;
let mut b = five.clone();
for _ in 0..precision {
while &a >= &b {
a -= &b;
b += &ten;
}
a *= &hundred;
b *= &ten;
b -= &fortyfive;
}
b /= &ten;
return b;
}
// TODO PERF: could this also be implemented using the approximation method?
///returns (floor(sqrt(n)), ceil(sqrt(n)))
pub fn sqrt<N: CheckedMul + NumAssignRef + Ord + Clone>(n: N) -> (N, N) {
sqrt_with_lower_bound_hint(n, N::zero())
}
///returns (floor(sqrt(n)), ceil(sqrt(n)))
pub fn sqrt_with_lower_bound_hint<N: CheckedMul + NumAssignRef + Ord + Clone>(
n: N,
lower_bound_hint: N,
) -> (N, N) {
//first we do a really shitty sqrt approximation
use core::cmp::Ordering;
let mut sqrt_range = (lower_bound_hint, n.clone());
while sqrt_range.1 > N::one() {
let mut mid = sqrt_range.1.clone() / (N::one() + N::one());
mid += &sqrt_range.0;
match mid.checked_mul(&mid) {
Some(product) => match product.cmp(&n) {
Ordering::Equal => {
sqrt_range = (mid, N::one());
break;
}
Ordering::Greater => {
sqrt_range.1 = sqrt_range.1 / (N::one() + N::one());
}
Ordering::Less => {
sqrt_range.1 = sqrt_range.0 + sqrt_range.1;
sqrt_range.1 -= ∣
sqrt_range.0 = mid;
}
},
None => {
sqrt_range.1 = sqrt_range.1 / (N::one() + N::one());
}
}
}
let sqrt = sqrt_range.0;
let mut squared = sqrt.clone();
squared *= &sqrt;
let ret = match n.cmp(&squared) {
Ordering::Less => (sqrt.clone() - N::one(), sqrt),
Ordering::Equal => (sqrt.clone(), sqrt),
Ordering::Greater => (sqrt.clone(), sqrt + N::one()),
};
debug_assert!(ret.0.clone() * ret.0.clone() <= n);
debug_assert!(ret.1.clone() * ret.1.clone() >= n);
ret
}
pub fn get_continued_fraction_of(
previous_sqrt: usize,
non_square: usize,
) -> impl Iterator<Item = usize> {
ContinuedFractionFrame::new(previous_sqrt, non_square)
}
/**
* represents a fraction like:
* nominator / (sqrt(non_square) - subtract_from_denominator)
*/
#[derive(PartialEq, Eq, Clone, Copy)]
struct ContinuedFractionFrame {
previous_sqrt: usize,
non_square: usize,
nominator: usize,
subtract_from_denominator: usize,
}
impl ContinuedFractionFrame {
fn new(previous_sqrt: usize, non_square: usize) -> Self {
Self {
previous_sqrt,
non_square,
nominator: 1,
subtract_from_denominator: previous_sqrt,
}
}
}
impl Iterator for ContinuedFractionFrame {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.nominator == 0 {
return None;
}
let next_nominator = (self.non_square
- self.subtract_from_denominator * self.subtract_from_denominator)
/ self.nominator;
let next_extracted = (self.subtract_from_denominator + self.previous_sqrt) / next_nominator;
let next_subtract_from_denominator =
next_extracted * next_nominator - self.subtract_from_denominator;
self.nominator = next_nominator;
self.subtract_from_denominator = next_subtract_from_denominator;
if self.nominator == 1 && self.subtract_from_denominator == self.previous_sqrt {
self.nominator = 0;
}
return Some(next_extracted);
}
}
|
use std::fmt;
use rand::{Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
/// A Dataset is basically an iterator, with some additional capabilities.
///
/// - `shuffle(buffer_size, seed)`: eagerly takes buffer_size items and returns shuffled
/// - `batch(batch_size, drop_remainder)`: an array of batch_size at a time instead of 1 at a time
///
/// TODO:
///
/// - `padded_batch(batch_size, padding_value)`: make the dataset uniform by filling with `padding_value`.
/// - `window`?: described at https://github.com/tensorflow/community/blob/master/rfcs/20180726-tf-data-windowing-reducers.md
///
/// The goal is for this interface to be at feature parity with `tensorflow.data.Dataset`.
pub trait Dataset: Iterator {
/// shuffle
/// TODO: handle error when batch_size is 0
fn shuffle(self, buffer_size: usize, seed: u64) -> Shuffle<Self>
where
Self: Sized,
{
Shuffle::new(self, buffer_size, seed)
}
/// batch
/// TODO: handle error when batch_size is 0
fn batch(self, batch_size: usize, drop_remainder: bool) -> Batch<Self>
where
Self: Sized,
{
Batch {
iter: self,
batch_size,
drop_remainder,
}
}
}
// TODO: reconsider this, do we want all iterators be datasets.
impl<I> Dataset for I where I: Iterator {}
/// Shuffle is an iterator that returns the elements of the inner iterator in a shuffled order.
///
/// ```
/// use datasets::Dataset;
///
/// let v: Vec<usize> = (0..8).shuffle(5, 0).collect();
/// assert_eq!(v.len(), 8);
/// assert_eq!(v, vec![4, 2, 0, 3, 7, 6, 5, 1]);
/// ```
///
/// TODO: implement `reshuffle_each_iteration` as defined at https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle.
pub struct Shuffle<I>
where
I: Iterator,
{
iter: I,
buffer_size: usize,
buffer: Vec<Option<<I as Iterator>::Item>>,
rng: XorShiftRng,
}
impl<I> Shuffle<I>
where
I: Iterator,
{
fn new(mut iter: I, buffer_size: usize, seed: u64) -> Shuffle<I> {
// NOTE: cannot do vec! here because Option<<I as Iterator>::Item> does not implement Clone
let mut buffer = Vec::with_capacity(buffer_size);
let mut i = 0;
while i < buffer_size {
let val = iter.next();
if val.is_none() {
break;
} else {
buffer.push(val);
i += 1;
}
}
Shuffle {
iter,
buffer_size,
buffer,
rng: XorShiftRng::seed_from_u64(seed),
}
}
}
impl<I> fmt::Debug for Shuffle<I>
where
I: Iterator,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Shuffle {{ buffer_size: {}, rng: {:?} }}",
self.buffer_size, self.rng
)
}
}
impl<I> Iterator for Shuffle<I>
where
I: Iterator,
{
type Item = <I as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
if self.buffer_size == 0 {
None
} else {
let index = self.rng.gen_range(0, self.buffer_size);
let val = self.buffer[index].take();
let replace_val = self.iter.next();
if replace_val.is_some() {
self.buffer[index] = replace_val;
} else {
self.buffer[index] = self.buffer[self.buffer_size - 1].take();
self.buffer_size -= 1;
}
val
}
}
}
/// Batch is an iterator that returns the contents of its inner iterator in batches
///
/// ```
/// use datasets::Dataset;
///
/// let vals: Vec<Vec<usize>> = (0..8).batch(5, false).collect();
///
/// assert_eq!(vals.len(), 2);
/// assert_eq!(vals[0], vec![0, 1, 2, 3, 4]);
/// assert_eq!(vals[1], vec![5, 6, 7]);
/// ```
#[derive(Debug)]
pub struct Batch<I>
where
I: Iterator,
{
iter: I,
batch_size: usize,
drop_remainder: bool,
}
impl<I> Iterator for Batch<I>
where
I: Iterator,
{
type Item = Vec<<I as Iterator>::Item>;
fn next(&mut self) -> Option<Self::Item> {
let mut i = 1;
let val = self.iter.next();
if val.is_none() {
None
} else {
let mut v = Vec::with_capacity(self.batch_size);
v.push(val.unwrap());
while i < self.batch_size {
match self.iter.next() {
Some(x) => v.push(x),
None => break,
}
i += 1;
}
if v.len() < self.batch_size && self.drop_remainder {
None
} else {
Some(v)
}
}
}
}
|
use constants::EDITOR_MODES;
use crate::{constants, editor_config, editor_cursor, editor_visual, terminal_utility, input_utility, utility, input_process};
pub fn init_editor(editor_config: &mut editor_config::EditorConfig) {
terminal_utility::get_window_size(editor_config);
editor_visual::editor_refresh_screen(editor_config);
editor_config.lines.push(editor_config::EditorLine::new("".to_string()));
write_line(editor_config, &mut String::new(), 0);
let mut buffer = String::new();
update_mode_in_status_line(editor_config, &mut buffer, &EDITOR_MODES::INSERT);
update_position_in_status_line(editor_config, &mut buffer);
write_line(editor_config, &mut buffer, 0);
}
pub fn write_line(editor_config: &mut editor_config::EditorConfig, buffer: &mut String, line_index: usize) {
if line_index < editor_config.lines.len() {
let line = editor_config.lines[line_index].clone();
editor_cursor::move_cursor_to_position(0, line_index as u16, editor_config, buffer);
let line = format!(" {} {}", line_index + 1, line.content);
buffer.push_str(&line);
editor_cursor::move_cursor_to_position((line.len() + 1) as u16, editor_config.cursor_y, editor_config, buffer);
editor_visual::flush_buffer(buffer);
}
}
pub fn update_mode_in_status_line(editor_config: &mut editor_config::EditorConfig, buffer: &mut String, editor_mode: &constants::EDITOR_MODES) {
editor_cursor::save_cursor_and_move_position(1, editor_config.rows - 1, buffer, true);
let mode_text = utility::center_text(editor_mode.name().to_string(), constants::EDITOR_STATUS_LINE_MODE_WIDTH as usize);
buffer.push_str(&format!("{}{}{}{}", constants::ANSI_BACKGROUND_COLOR_GREEN, constants::ANSI_TEXT_COLOR_BLACK, &mode_text, constants::ANSI_COLOR_RESET));
editor_cursor::restore_cursor(buffer);
}
pub fn update_position_in_status_line(editor_config: &mut editor_config::EditorConfig, buffer: &mut String) {
let initial_cursor: (u16, u16) = (editor_config.cursor_x, editor_config.cursor_y);
editor_cursor::save_cursor_and_move_position((editor_config.cols + 1) - constants::EDITOR_STATUS_LINE_MODE_WIDTH, editor_config.rows - 1, buffer, true);
let mode_text = utility::center_text(format!("{}:{}", initial_cursor.1, initial_cursor.0 - (constants::EDITOR_NUMBER_LINE_INDEX + 2)), constants::EDITOR_STATUS_LINE_MODE_WIDTH as usize);
buffer.push_str(&format!("{}{}{}{}", constants::ANSI_BACKGROUND_COLOR_GREEN, constants::ANSI_TEXT_COLOR_BLACK, &mode_text, constants::ANSI_COLOR_RESET));
editor_cursor::restore_cursor(buffer);
}
pub fn editor_process_key(editor_config: &mut editor_config::EditorConfig) {
loop {
let input = input_utility::editor_read_key();
if let Some(input_char) = input {
let input_char = input_utility::preprocess_characters(input_char);
match input_char.as_str() {
constants::COMMAND_START => {
if input_process::command_matcher(&input_char, editor_config) {
break;
}
}
constants::MOVE_CURSOR_UP | constants::MOVE_CURSOR_LEFT | constants::MOVE_CURSOR_DOWN | constants::MOVE_CURSOR_RIGHT => {
editor_cursor::editor_scroll(editor_config, input_char, 1);
}
_ => {}
}
}
}
}
|
use super::{ConnectionSM, ConnectionSMResult};
use crate::{
error::{ProtoError, ProtoErrorKind, ProtoErrorResultExt},
message::{NowActivateMsg, NowCapabilitiesMsg, NowMessage},
sm::{ConnectionSMSharedData, ConnectionSMSharedDataRc, ConnectionState},
};
use log::info;
use std::{cell::RefCell, rc::Rc};
macro_rules! unexpected_call {
($sm_struct:ident, $self:ident, $method_name:literal) => {
ProtoError::new(ProtoErrorKind::ConnectionSequence($sm_struct::CONNECTION_STATE)).or_desc(format!(
concat!("unexpected call to `{}::", $method_name, "` in state {:?}"),
$sm_struct::NAME,
$self.state
))
};
}
macro_rules! unexpected_msg {
($sm_struct:ident, $self:ident, $unexpected_msg:ident) => {
ProtoError::new(ProtoErrorKind::UnexpectedMessage($unexpected_msg.get_type())).or_desc(format!(
"`{}` received an unexpected message in state {:?}: {:?}",
$sm_struct::NAME,
$self.state,
$unexpected_msg
))
};
}
#[derive(PartialEq, Debug)]
enum BasicState {
Initial,
Ready,
Terminated,
}
// handshake
pub struct HandshakeSM {
state: BasicState,
}
impl HandshakeSM {
const CONNECTION_STATE: ConnectionState = ConnectionState::Handshake;
const NAME: &'static str = "HandshakeSM";
pub fn new() -> Self {
Self {
state: BasicState::Initial,
}
}
}
impl ConnectionSM for HandshakeSM {
fn set_shared_data(&mut self, _: ConnectionSMSharedDataRc) {}
fn get_shared_data(&self) -> Option<ConnectionSMSharedDataRc> {
None
}
fn is_terminated(&self) -> bool {
self.state == BasicState::Terminated
}
fn waiting_for_packet(&self) -> bool {
self.state == BasicState::Ready
}
fn update_without_message<'msg>(&mut self) -> ConnectionSMResult<'msg> {
use wayk_proto::message::NowHandshakeMsg;
match &self.state {
BasicState::Initial => {
self.state = BasicState::Ready;
Ok(Some(NowHandshakeMsg::new_success().into()))
}
_ => unexpected_call!(Self, self, "update_without_message"),
}
}
fn update_with_message<'msg: 'a, 'a>(&mut self, msg: &'a NowMessage<'msg>) -> ConnectionSMResult<'msg> {
use wayk_proto::message::status::HandshakeStatusCode;
match &self.state {
BasicState::Ready => match msg {
NowMessage::Handshake(msg) => match msg.status.code() {
HandshakeStatusCode::Success => {
log::trace!("handshake succeeded");
self.state = BasicState::Terminated;
Ok(None)
}
HandshakeStatusCode::Failure => {
ProtoError::new(ProtoErrorKind::ConnectionSequence(ConnectionState::Handshake))
.or_desc("handshake failed")
}
HandshakeStatusCode::Incompatible => {
ProtoError::new(ProtoErrorKind::ConnectionSequence(ConnectionState::Handshake))
.or_desc("version incompatible")
}
},
unexpected => unexpected_msg!(Self, self, unexpected),
},
_ => unexpected_call!(Self, self, "update_with_message"),
}
}
}
// negotiate
pub struct NegotiateSM {
state: BasicState,
shared_data: Rc<RefCell<ConnectionSMSharedData>>,
}
impl NegotiateSM {
const CONNECTION_STATE: ConnectionState = ConnectionState::Negotiate;
const NAME: &'static str = "NegotiateSM";
pub fn new(shared_data: Rc<RefCell<ConnectionSMSharedData>>) -> Self {
Self {
state: BasicState::Initial,
shared_data,
}
}
}
impl ConnectionSM for NegotiateSM {
fn set_shared_data(&mut self, shared_data: Rc<RefCell<ConnectionSMSharedData>>) {
self.shared_data = shared_data;
}
fn get_shared_data(&self) -> Option<ConnectionSMSharedDataRc> {
Some(Rc::clone(&self.shared_data))
}
fn is_terminated(&self) -> bool {
self.state == BasicState::Terminated
}
fn waiting_for_packet(&self) -> bool {
self.state == BasicState::Ready
}
fn update_without_message<'msg>(&mut self) -> ConnectionSMResult<'msg> {
use wayk_proto::message::{NegotiateFlags, NowNegotiateMsg};
match &self.state {
BasicState::Initial => {
self.state = BasicState::Ready;
let shared_data = self.shared_data.borrow();
Ok(Some(
NowNegotiateMsg::new_with_auth_list(
NegotiateFlags::new_empty().set_srp_extended(),
shared_data.available_auth_types.clone(),
)
.into(),
))
}
_ => unexpected_call!(Self, self, "update_without_message"),
}
}
fn update_with_message<'msg: 'a, 'a>(&mut self, msg: &'a NowMessage<'msg>) -> ConnectionSMResult<'msg> {
match &self.state {
BasicState::Ready => match msg {
NowMessage::Negotiate(msg) => {
info!("Available authentication methods on server: {:?}", msg.auth_list.0);
let mut shared_data = self.shared_data.borrow_mut();
let common_auth_types = msg
.auth_list
.iter()
.filter(|elem| shared_data.available_auth_types.contains(elem))
.copied()
.collect();
shared_data.available_auth_types = common_auth_types;
self.state = BasicState::Terminated;
Ok(None)
}
unexpected => unexpected_msg!(Self, self, unexpected),
},
_ => unexpected_call!(Self, self, "update_with_message"),
}
}
}
// associate
#[derive(PartialEq, Debug)]
enum AssociateState {
WaitInfo,
WaitResponse,
Terminated,
}
pub struct AssociateSM {
state: AssociateState,
}
impl AssociateSM {
const CONNECTION_STATE: ConnectionState = ConnectionState::Associate;
const NAME: &'static str = "AssociateSM";
pub fn new() -> Self {
Self {
state: AssociateState::WaitInfo,
}
}
}
impl ConnectionSM for AssociateSM {
fn set_shared_data(&mut self, _: ConnectionSMSharedDataRc) {}
fn get_shared_data(&self) -> Option<ConnectionSMSharedDataRc> {
None
}
fn is_terminated(&self) -> bool {
self.state == AssociateState::Terminated
}
fn waiting_for_packet(&self) -> bool {
!self.is_terminated()
}
fn update_without_message<'msg>(&mut self) -> ConnectionSMResult<'msg> {
unexpected_call!(Self, self, "update_without_message")
}
fn update_with_message<'msg: 'a, 'a>(&mut self, msg: &'a NowMessage<'msg>) -> ConnectionSMResult<'msg> {
use wayk_proto::message::{status::AssociateStatusCode, NowAssociateMsg};
match &self.state {
AssociateState::WaitInfo => match msg {
NowMessage::Associate(NowAssociateMsg::Info(msg)) => {
self.state = AssociateState::WaitResponse;
if msg.flags.active() {
log::trace!("associate process session is already active");
Ok(None)
} else {
Ok(Some(NowAssociateMsg::new_request().into()))
}
}
unexpected => unexpected_msg!(Self, self, unexpected),
},
AssociateState::WaitResponse => match msg {
NowMessage::Associate(NowAssociateMsg::Response(msg)) => match msg.status.code() {
AssociateStatusCode::Success => {
self.state = AssociateState::Terminated;
log::trace!("associate process succeeded");
Ok(None)
}
AssociateStatusCode::Failure => {
ProtoError::new(ProtoErrorKind::ConnectionSequence(ConnectionState::Handshake))
.or_desc(format!("Association failed {:?}", msg.status.status_type().to_string()))
}
},
unexpected => unexpected_msg!(Self, self, unexpected),
},
AssociateState::Terminated => unexpected_call!(Self, self, "update_with_message"),
}
}
}
// capabilities
pub struct CapabilitiesSM {
terminated: bool,
shared_data: ConnectionSMSharedDataRc,
}
impl CapabilitiesSM {
pub fn new(shared_data: ConnectionSMSharedDataRc) -> Self {
Self {
terminated: false,
shared_data,
}
}
}
impl ConnectionSM for CapabilitiesSM {
fn set_shared_data(&mut self, shared_data: ConnectionSMSharedDataRc) {
self.shared_data = shared_data;
}
fn get_shared_data(&self) -> Option<ConnectionSMSharedDataRc> {
Some(Rc::clone(&self.shared_data))
}
fn is_terminated(&self) -> bool {
self.terminated
}
fn waiting_for_packet(&self) -> bool {
!self.terminated
}
fn update_without_message<'msg>(&mut self) -> ConnectionSMResult<'msg> {
ProtoError::new(ProtoErrorKind::ConnectionSequence(ConnectionState::Capabilities))
.or_desc("unexpected call to `CapabilitiesSM::update_without_message`")
}
fn update_with_message<'msg: 'a, 'a>(&mut self, msg: &'a NowMessage<'msg>) -> ConnectionSMResult<'msg> {
if self.terminated {
ProtoError::new(ProtoErrorKind::ConnectionSequence(ConnectionState::Capabilities))
.or_desc("unexpected call to `CapabilitiesSM::update_with_message` in terminated state")
} else {
match msg {
NowMessage::Capabilities(msg) => {
log::info!(
"Server capabilities (short): {:?}",
msg.capabilities
.iter()
.map(|caps| caps.name_as_str())
.collect::<Vec<&str>>()
);
log::trace!("Server capabilities details: {:#?}", msg.capabilities.0);
self.terminated = true;
Ok(Some(
NowCapabilitiesMsg::new_with_capabilities(self.shared_data.borrow().capabilities.clone())
.into(),
))
}
unexpected => ProtoError::new(ProtoErrorKind::ConnectionSequence(ConnectionState::Capabilities))
.or_desc(format!("received an unexpected message: {:?}", unexpected)),
}
}
}
}
// channels
#[derive(PartialEq, Debug)]
enum ChannelPairingState {
SendListRequest,
WaitListResponse,
SendOpenRequest,
WaitOpenResponse,
Terminated,
}
pub struct ChannelsSM {
state: ChannelPairingState,
shared_data: ConnectionSMSharedDataRc,
}
impl ChannelsSM {
const CONNECTION_STATE: ConnectionState = ConnectionState::Channels;
const NAME: &'static str = "ChannelsSM";
pub fn new(shared_data: ConnectionSMSharedDataRc) -> Self {
Self {
state: ChannelPairingState::SendListRequest,
shared_data,
}
}
}
impl ConnectionSM for ChannelsSM {
fn set_shared_data(&mut self, shared_data: ConnectionSMSharedDataRc) {
self.shared_data = shared_data;
}
fn get_shared_data(&self) -> Option<ConnectionSMSharedDataRc> {
Some(Rc::clone(&self.shared_data))
}
fn is_terminated(&self) -> bool {
self.state == ChannelPairingState::Terminated
}
fn waiting_for_packet(&self) -> bool {
self.state == ChannelPairingState::WaitListResponse || self.state == ChannelPairingState::WaitOpenResponse
}
fn update_without_message<'msg>(&mut self) -> ConnectionSMResult<'msg> {
use crate::message::{ChannelMessageType, NowChannelMsg};
match self.state {
ChannelPairingState::SendListRequest => {
self.state = ChannelPairingState::WaitListResponse;
Ok(Some(
NowChannelMsg::new(
ChannelMessageType::ChannelListRequest,
self.shared_data.borrow().channels.clone(),
)
.into(),
))
}
ChannelPairingState::WaitListResponse => unexpected_call!(Self, self, "update_without_message"),
ChannelPairingState::SendOpenRequest => {
self.state = ChannelPairingState::WaitOpenResponse;
Ok(Some(
NowChannelMsg::new(
ChannelMessageType::ChannelOpenRequest,
self.shared_data.borrow().channels.clone(),
)
.into(),
))
}
ChannelPairingState::WaitOpenResponse => unexpected_call!(Self, self, "update_without_message"),
ChannelPairingState::Terminated => unexpected_call!(Self, self, "update_without_message"),
}
}
fn update_with_message<'msg: 'a, 'a>(&mut self, msg: &'a NowMessage<'msg>) -> ConnectionSMResult<'msg> {
use crate::message::ChannelName;
match self.state {
ChannelPairingState::SendListRequest => unexpected_call!(Self, self, "update_with_message"),
ChannelPairingState::WaitListResponse => match msg {
NowMessage::Channel(msg) => {
log::info!(
"Available channel(s) on server: {:?}",
msg.channel_list
.iter()
.map(|def| &def.name)
.collect::<Vec<&ChannelName>>()
);
let mut unavailable_channels = Vec::new();
for def in self.shared_data.borrow().channels.iter() {
if !msg.channel_list.iter().any(|d| d.name == def.name) {
unavailable_channels.push(def.name.clone())
}
}
if !unavailable_channels.is_empty() {
log::warn!("Unavailable channel(s) on server ignored: {:?}", unavailable_channels);
self.shared_data
.borrow_mut()
.channels
.retain(|def| !unavailable_channels.contains(&def.name));
}
self.state = ChannelPairingState::SendOpenRequest;
Ok(None)
}
unexpected => unexpected_msg!(Self, self, unexpected),
},
ChannelPairingState::SendOpenRequest => unexpected_call!(Self, self, "update_with_message"),
ChannelPairingState::WaitOpenResponse => match msg {
NowMessage::Channel(msg) => {
log::info!(
"Opened channel(s): {:?}",
msg.channel_list
.iter()
.map(|def| &def.name)
.collect::<Vec<&ChannelName>>()
);
self.state = ChannelPairingState::Terminated;
self.shared_data.borrow_mut().channels = msg.channel_list.0.clone();
Ok(Some(NowActivateMsg::default().into()))
}
unexpected => unexpected_msg!(Self, self, unexpected),
},
ChannelPairingState::Terminated => unexpected_call!(Self, self, "update_with_message"),
}
}
}
|
use super::Pixel;
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
/// A pixel with an RGB color representation.
pub struct RgbaPixel {
r: u8,
g: u8,
b: u8,
a: u8,
}
impl RgbaPixel {
/// Creates a new [`RgbaPixel`]
pub fn new(r: u8, g: u8, b: u8, a: u8) -> Self {
Self { r, g, b, a }
}
}
impl Pixel for RgbaPixel {
fn blend(&self, other: &Self) -> Self {
// Blend rbga pixels together
let Self {
r: r_a,
g: g_a,
b: b_a,
a: a_a,
} = self;
let Self {
r: r_b,
g: g_b,
b: b_b,
a: a_b,
} = other;
let a = a_a + (a_b * (255 - a_a) / 255);
let r = (r_a * a_a + r_b * a_b * (255 - a_a) / 255) / a;
let g = (g_a * a_a + g_b * a_b * (255 - a_a) / 255) / a;
let b = (b_a * a_a + b_b * a_b * (255 - a_a) / 255) / a;
Self { r, g, b, a }
}
}
impl From<(u8, u8, u8, u8)> for RgbaPixel {
fn from(value: (u8, u8, u8, u8)) -> Self {
Self {
r: value.0,
g: value.1,
b: value.2,
a: value.3,
}
}
}
|
use std::borrow::BorrowMut;
use std::cell::RefCell;
use std::collections::HashMap;
use std::env;
use std::ops::{Deref, DerefMut};
use std::process::Command;
use std::rc::Rc;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
use std::thread;
use std::thread::sleep;
use std::time::Duration;
use redis;
use redis::Commands;
use redis::ConnectionAddr;
use serial_test::serial;
use crate::support::*;
use redis_event::config::Config;
use redis_event::rdb::{ExpireType, Object};
use redis_event::{cmd, Event, EventHandler, RedisListener};
use redis_event::{listener, NoOpEventHandler};
mod support;
#[test]
#[serial]
fn test_hash() {
struct TestRdbHandler {}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::Hash(hash) => {
let key = String::from_utf8_lossy(hash.key);
assert_eq!("force_dictionary", key);
for field in hash.fields {
assert_eq!(50, field.name.len());
assert_eq!(50, field.value.len());
}
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test("dictionary.rdb", 10000, Rc::new(RefCell::new(TestRdbHandler {})));
}
#[test]
#[serial]
fn test_hash_1() {
struct TestRdbHandler {}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::Hash(hash) => {
let key = String::from_utf8_lossy(hash.key);
assert_eq!("zipmap_compresses_easily", key);
let mut map = HashMap::new();
for field in hash.fields {
let name = String::from_utf8_lossy(&field.name);
let value = String::from_utf8_lossy(&field.value);
map.insert(name, value);
}
assert_eq!("aa", map.get("a").unwrap());
assert_eq!("aaaa", map.get("aa").unwrap());
assert_eq!("aaaaaaaaaaaaaa", map.get("aaaaa").unwrap());
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test("hash_as_ziplist.rdb", 10001, Rc::new(RefCell::new(TestRdbHandler {})));
}
#[test]
#[serial]
fn test_string() {
struct TestRdbHandler {}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::String(kv) => {
let key = String::from_utf8_lossy(kv.key);
assert_eq!("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", key);
assert_eq!(
"Key that redis should compress easily",
String::from_utf8_lossy(kv.value)
)
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"easily_compressible_string_key.rdb",
10002,
Rc::new(RefCell::new(TestRdbHandler {})),
);
}
#[test]
#[serial]
fn test_integer() {
struct TestRdbHandler {
map: HashMap<String, String>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::String(kv) => {
self.map.insert(
String::from_utf8_lossy(kv.key).to_string(),
String::from_utf8_lossy(kv.value).to_string(),
);
}
Object::EOR => {
assert_eq!(self.map.get("125").unwrap(), "Positive 8 bit integer");
assert_eq!(self.map.get("43947").unwrap(), "Positive 16 bit integer");
assert_eq!(self.map.get("183358245").unwrap(), "Positive 32 bit integer");
assert_eq!(self.map.get("-123").unwrap(), "Negative 8 bit integer");
assert_eq!(self.map.get("-29477").unwrap(), "Negative 16 bit integer");
assert_eq!(self.map.get("-183358245").unwrap(), "Negative 32 bit integer");
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"integer_keys.rdb",
10003,
Rc::new(RefCell::new(TestRdbHandler { map: HashMap::new() })),
);
}
#[test]
#[serial]
fn test_intset16() {
struct TestRdbHandler {
map: HashMap<String, Vec<String>>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::Set(set) => {
let key = String::from_utf8_lossy(set.key).to_string();
let mut val = Vec::new();
for mem in set.members {
val.push(String::from_utf8_lossy(mem).to_string());
}
self.map.insert(key, val);
}
Object::EOR => {
let values = self.map.get("intset_16").unwrap();
let arr = ["32766", "32765", "32764"];
for val in values {
assert!(arr.contains(&val.as_str()));
}
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"intset_16.rdb",
10004,
Rc::new(RefCell::new(TestRdbHandler { map: HashMap::new() })),
);
}
#[test]
#[serial]
fn test_intset32() {
struct TestRdbHandler {
map: HashMap<String, Vec<String>>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::Set(set) => {
let key = String::from_utf8_lossy(set.key).to_string();
let mut val = Vec::new();
for mem in set.members {
val.push(String::from_utf8_lossy(mem).to_string());
}
self.map.insert(key, val);
}
Object::EOR => {
let values = self.map.get("intset_32").unwrap();
let arr = ["2147418110", "2147418109", "2147418108"];
for val in values {
assert!(arr.contains(&val.as_str()));
}
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"intset_32.rdb",
10005,
Rc::new(RefCell::new(TestRdbHandler { map: HashMap::new() })),
);
}
#[test]
#[serial]
fn test_intset64() {
struct TestRdbHandler {
map: HashMap<String, Vec<String>>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::Set(set) => {
let key = String::from_utf8_lossy(set.key).to_string();
let mut val = Vec::new();
for mem in set.members {
val.push(String::from_utf8_lossy(mem).to_string());
}
self.map.insert(key, val);
}
Object::EOR => {
let values = self.map.get("intset_64").unwrap();
let arr = ["9223090557583032318", "9223090557583032317", "9223090557583032316"];
for val in values {
assert!(arr.contains(&val.as_str()));
}
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"intset_64.rdb",
10006,
Rc::new(RefCell::new(TestRdbHandler { map: HashMap::new() })),
);
}
#[test]
#[serial]
fn test_keys_with_expiry() {
struct TestRdbHandler {}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::String(kv) => {
let key = String::from_utf8_lossy(kv.key).to_string();
let val = String::from_utf8_lossy(kv.value).to_string();
assert_eq!("expires_ms_precision", key);
assert_eq!("2022-12-25 10:11:12.573 UTC", val);
if let Some((ExpireType::Millisecond, val)) = kv.meta.expire {
assert_eq!(1671963072573, val);
} else {
panic!("错误的过期类型")
}
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test("keys_with_expiry.rdb", 10007, Rc::new(RefCell::new(TestRdbHandler {})));
}
#[test]
#[serial]
fn test_linked_list() {
struct TestRdbHandler {
list: Vec<String>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::List(list) => {
assert_eq!("force_linkedlist", String::from_utf8_lossy(list.key));
for val in list.values {
let value = String::from_utf8_lossy(val).to_string();
self.list.push(value);
}
}
Object::EOR => {
assert_eq!(1000, self.list.len());
assert_eq!(
"41PJSO2KRV6SK1WJ6936L06YQDPV68R5J2TAZO3YAR5IL5GUI8",
self.list.get(0).unwrap()
);
assert_eq!(
"E41JRQX2DB4P1AQZI86BAT7NHPBHPRIIHQKA4UXG94ELZZ7P3Y",
self.list.get(1).unwrap()
);
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"linkedlist.rdb",
10008,
Rc::new(RefCell::new(TestRdbHandler { list: vec![] })),
);
}
#[test]
#[serial]
fn test_multiple_database() {
struct TestRdbHandler {}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::String(kv) => {
let key = String::from_utf8_lossy(kv.key);
if "key_in_zeroth_database".eq(&key) {
assert_eq!(0, kv.meta.db);
assert_eq!("zero", String::from_utf8_lossy(kv.value))
} else if "key_in_second_database".eq(&key) {
assert_eq!(2, kv.meta.db);
assert_eq!("second", String::from_utf8_lossy(kv.value))
} else {
panic!("key名错误")
}
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"multiple_databases.rdb",
10009,
Rc::new(RefCell::new(TestRdbHandler {})),
);
}
#[test]
#[serial]
fn test_regular_set() {
struct TestRdbHandler {
map: HashMap<String, Vec<String>>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::Set(set) => {
let key = String::from_utf8_lossy(set.key).to_string();
let mut val = Vec::new();
for mem in set.members {
val.push(String::from_utf8_lossy(mem).to_string());
}
self.map.insert(key, val);
}
Object::EOR => {
let values = self.map.get("regular_set").unwrap();
let arr = ["alpha", "beta", "gamma", "delta", "phi", "kappa"];
for val in values {
assert!(arr.contains(&val.as_str()));
}
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"regular_set.rdb",
11110,
Rc::new(RefCell::new(TestRdbHandler { map: HashMap::new() })),
);
}
#[test]
#[serial]
fn test_regular_sorted_set() {
struct TestRdbHandler {
map: HashMap<String, f64>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::SortedSet(set) => {
let key = String::from_utf8_lossy(set.key).to_string();
assert_eq!("force_sorted_set", key);
for item in set.items {
self.map
.insert(String::from_utf8_lossy(&item.member).to_string(), item.score);
}
}
Object::EOR => {
assert_eq!(500, self.map.len());
assert_eq!(
3.19,
self.map
.get("G72TWVWH0DY782VG0H8VVAR8RNO7BS9QGOHTZFJU67X7L0Z3PR")
.unwrap()
.clone()
);
assert_eq!(
0.76,
self.map
.get("N8HKPIK4RC4I2CXVV90LQCWODW1DZYD0DA26R8V5QP7UR511M8")
.unwrap()
.clone()
);
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"regular_sorted_set.rdb",
10011,
Rc::new(RefCell::new(TestRdbHandler { map: HashMap::new() })),
);
}
#[test]
#[serial]
fn test_zipmap_big_values() {
struct TestRdbHandler {
map: HashMap<String, Vec<u8>>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::Hash(hash) => {
assert_eq!("zipmap_with_big_values", String::from_utf8_lossy(hash.key));
for field in hash.fields {
let name = String::from_utf8_lossy(&field.name).to_string();
self.map.insert(name, field.value.to_vec());
}
}
Object::EOR => {
assert_eq!(253, self.map.get("253bytes").unwrap().len());
assert_eq!(254, self.map.get("254bytes").unwrap().len());
assert_eq!(255, self.map.get("255bytes").unwrap().len());
assert_eq!(300, self.map.get("300bytes").unwrap().len());
assert_eq!(20000, self.map.get("20kbytes").unwrap().len());
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"zipmap_with_big_values.rdb",
10012,
Rc::new(RefCell::new(TestRdbHandler { map: HashMap::new() })),
);
}
#[test]
#[serial]
fn test_zipmap_compress() {
struct TestRdbHandler {
map: HashMap<String, String>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::Hash(hash) => {
assert_eq!("zipmap_compresses_easily", String::from_utf8_lossy(hash.key));
for field in hash.fields {
let name = String::from_utf8_lossy(&field.name).to_string();
let val = String::from_utf8_lossy(&field.value).to_string();
self.map.insert(name, val);
}
}
Object::EOR => {
assert_eq!("aa", self.map.get("a").unwrap());
assert_eq!("aaaa", self.map.get("aa").unwrap());
assert_eq!("aaaaaaaaaaaaaa", self.map.get("aaaaa").unwrap());
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"zipmap_that_compresses_easily.rdb",
10013,
Rc::new(RefCell::new(TestRdbHandler { map: HashMap::new() })),
);
}
#[test]
#[serial]
fn test_zipmap_not_compress() {
struct TestRdbHandler {
map: HashMap<String, String>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::Hash(hash) => {
assert_eq!("zimap_doesnt_compress", String::from_utf8_lossy(hash.key));
for field in hash.fields {
let name = String::from_utf8_lossy(&field.name).to_string();
let val = String::from_utf8_lossy(&field.value).to_string();
self.map.insert(name, val);
}
}
Object::EOR => {
assert_eq!("2", self.map.get("MKD1G6").unwrap());
assert_eq!("F7TI", self.map.get("YNNXK").unwrap());
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"zipmap_that_doesnt_compress.rdb",
10014,
Rc::new(RefCell::new(TestRdbHandler { map: HashMap::new() })),
);
}
#[test]
#[serial]
fn test_ziplist() {
struct TestRdbHandler {
list: Vec<String>,
}
impl EventHandler for TestRdbHandler {
fn handle(&mut self, data: Event) {
match data {
Event::RDB(rdb) => match rdb {
Object::List(list) => {
assert_eq!("ziplist_compresses_easily", String::from_utf8_lossy(list.key));
for val in list.values {
let value = String::from_utf8_lossy(val).to_string();
self.list.push(value);
}
}
Object::EOR => {
assert_eq!(6, self.list.len());
assert_eq!("aaaaaa", self.list.get(0).unwrap());
assert_eq!("aaaaaaaaaaaa", self.list.get(1).unwrap());
assert_eq!("aaaaaaaaaaaaaaaaaa", self.list.get(2).unwrap());
assert_eq!("aaaaaaaaaaaaaaaaaaaaaaaa", self.list.get(3).unwrap());
assert_eq!("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", self.list.get(4).unwrap());
assert_eq!("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", self.list.get(5).unwrap());
}
_ => {}
},
Event::AOF(_) => {}
}
}
}
start_redis_test(
"ziplist_that_compresses_easily.rdb",
10015,
Rc::new(RefCell::new(TestRdbHandler { list: vec![] })),
);
}
#[test]
#[serial]
fn test_aof() {
let port = 10016;
let pid = Command::new("redis-server")
.arg("--port")
.arg(port.to_string())
.arg("--requirepass")
.arg("123456")
.arg("--daemonize")
.arg("no")
.arg("--loglevel")
.arg("warning")
.arg("--logfile")
.arg(port.to_string())
.spawn()
.expect("failed to start redis-server")
.id();
// wait redis to start
sleep(Duration::from_secs(2));
struct TestCmdHandler {
pid: u32,
count: Arc<Mutex<i32>>,
}
impl EventHandler for TestCmdHandler {
fn handle(&mut self, cmd: Event) {
match cmd {
Event::RDB(_) => {}
Event::AOF(cmd) => {
println!("{:?}", cmd);
if let Ok(mut count) = self.count.lock() {
let c = count.borrow_mut();
let c = c.deref_mut();
*c += 1;
}
match cmd {
cmd::Command::FLUSHDB(flushdb) => {
assert_eq!(true, flushdb._async.expect("no async field"));
}
cmd::Command::FLUSHALL(_) => {
shutdown_redis(self.pid);
}
cmd::Command::EXPIRE(expire) => {
assert_eq!(b"aa", expire.key);
assert_eq!(b"1", expire.seconds);
}
cmd::Command::LINSERT(linsert) => {
assert_eq!(b"list", linsert.key);
if let cmd::lists::POSITION::AFTER = linsert.position {
panic!("wrong position");
}
}
cmd::Command::RPOPLPUSH(rpoplpush) => {
assert_eq!(b"list", rpoplpush.source);
assert_eq!(b"destlist", rpoplpush.destination);
}
cmd::Command::RPUSH(rpush) => {
assert_eq!(b"list", rpush.key);
assert_eq!(1, rpush.elements.len());
assert_eq!(b"hello", rpush.elements.get(0).unwrap());
}
cmd::Command::SELECT(select) => {
assert_eq!(0, select.db);
}
cmd::Command::SET(set) => {
assert_eq!(b"aa", set.key);
assert_eq!(b"bb", set.value);
}
_ => {}
}
}
}
}
}
let cmd_count = Arc::new(Mutex::new(0));
let rc = cmd_count.clone();
let t = thread::spawn(move || {
let cmd_handler = TestCmdHandler { pid, count: rc };
let ip = String::from("127.0.0.1");
let conf = Config {
is_discard_rdb: false,
is_aof: true,
host: ip,
port,
password: String::from("123456"),
repl_id: String::from("?"),
repl_offset: -1,
read_timeout: None,
write_timeout: None,
is_tls_enabled: false,
is_tls_insecure: false,
identity: None,
username: "".to_string(),
identity_passwd: None,
};
let running = Arc::new(AtomicBool::new(true));
let mut builder = listener::Builder::new();
builder.with_config(conf);
builder.with_control_flag(running);
builder.with_event_handler(Rc::new(RefCell::new(cmd_handler)));
let mut redis_listener = builder.build();
if let Err(_) = redis_listener.start() {
println!("redis-server closed");
}
});
// wait thread start
thread::sleep(Duration::from_secs(2));
let uri = format!("redis://:123456@127.0.0.1:{}", port);
if let Ok(client) = redis::Client::open(uri.as_str()) {
if let Ok(mut conn) = client.get_connection() {
let _: () = conn.set("aa", "bb").unwrap();
let _: () = conn.expire("aa", 1).unwrap();
let _: () = redis::cmd("SET")
.arg("aa")
.arg("bb")
.arg("EX")
.arg("100")
.arg("XX")
.query(&mut conn)
.unwrap();
let _: () = conn.rpush("list", "hello").unwrap();
let _: () = redis::cmd("LINSERT")
.arg("list")
.arg("BEFORE")
.arg("hello")
.arg("world")
.query(&mut conn)
.unwrap();
let _: () = redis::cmd("SORT")
.arg(&[
"list",
"ALPHA",
"ASC",
"LIMIT",
"0",
"10",
"BY",
"weight_*",
"GET",
"nosort",
"STORE",
"storelidst",
])
.query(&mut conn)
.unwrap();
let _: () = conn.rpoplpush("list", "destlist").unwrap();
let _: () = conn.rename_nx("destlist", "destlist2").unwrap();
let _: () = conn.pexpire("destlist2", 1000).unwrap();
let _: () = conn.expire_at("list", 1000).unwrap();
// flush all, end the test
let _: () = redis::cmd("FLUSHDB").arg("ASYNC").query(&mut conn).unwrap();
let _: () = redis::cmd("FLUSHALL").arg("ASYNC").query(&mut conn).unwrap();
t.join().expect("thread error");
} else {
shutdown_redis(pid);
}
}
assert_eq!(13, *cmd_count.lock().unwrap().deref());
}
#[test]
#[serial]
fn test_tls() {
env::set_var("REDISRS_SERVER_TYPE", "tcp+tls");
let mut context = TestContext::new();
let addr = context.server.get_client_addr();
let (host, port) = match addr {
ConnectionAddr::TcpTls { ref host, port, .. } => (host, port),
_ => panic!("wrong mode"),
};
println!("redis-server: {}:{}", host, port);
let conf = Config {
is_discard_rdb: true,
is_aof: false,
host: host.to_string(),
port: *port,
password: String::new(),
repl_id: String::from("?"),
repl_offset: -1,
read_timeout: None,
write_timeout: None,
is_tls_enabled: true,
is_tls_insecure: true,
identity: None,
username: "".to_string(),
identity_passwd: None,
};
let running = Arc::new(AtomicBool::new(true));
let mut builder = listener::Builder::new();
builder.with_config(conf);
builder.with_control_flag(running);
builder.with_event_handler(Rc::new(RefCell::new(NoOpEventHandler {})));
let mut redis_listener = builder.build();
println!("connect to redis-server");
if let Err(err) = redis_listener.start() {
println!("error: {}", err);
panic!(err);
}
println!("done");
context.stop_server();
}
fn start_redis_test(rdb: &str, port: u16, rdb_handler: Rc<RefCell<dyn EventHandler>>) {
let pid = start_redis_server(rdb, port);
// wait redis to start
sleep(Duration::from_secs(2));
let ip = String::from("127.0.0.1");
let conf = Config {
is_discard_rdb: false,
is_aof: false,
host: ip,
port: port,
username: "".to_string(),
password: String::new(),
repl_id: String::from("?"),
repl_offset: -1,
read_timeout: None,
write_timeout: None,
is_tls_enabled: false,
is_tls_insecure: false,
identity: None,
identity_passwd: None,
};
let running = Arc::new(AtomicBool::new(true));
let mut builder = listener::Builder::new();
builder.with_config(conf);
builder.with_control_flag(running);
builder.with_event_handler(rdb_handler);
let mut redis_listener = builder.build();
if let Err(error) = redis_listener.start() {
eprintln!("error: {}", error);
panic!(error)
}
shutdown_redis(pid);
}
fn start_redis_server(rdb: &str, port: u16) -> u32 {
// redis-server --port 6379 --daemonize no --dbfilename rdb --dir ./tests/rdb
let child = Command::new("redis-server")
.arg("--port")
.arg(port.to_string())
.arg("--daemonize")
.arg("no")
.arg("--dbfilename")
.arg(rdb)
.arg("--dir")
.arg("./tests/rdb")
.arg("--loglevel")
.arg("warning")
.arg("--logfile")
.arg(port.to_string())
.spawn()
.expect("failed to start redis-server");
return child.id();
}
fn shutdown_redis(pid: u32) {
Command::new("kill")
.arg("-9")
.arg(pid.to_string())
.status()
.expect("kill redis failed");
}
|
use std::sync::Arc;
use datafusion::physical_plan::SendableRecordBatchStream;
use iox_query::{
exec::{Executor, ExecutorType},
frontend::reorg::ReorgPlanner,
QueryChunk,
};
use schema::sort::{adjust_sort_key_columns, compute_sort_key, SortKey};
use crate::{buffer_tree::table::TableName, query_adaptor::QueryAdaptor};
/// Result of calling [`compact_persisting_batch`]
pub(super) struct CompactedStream {
/// A stream of compacted, deduplicated
/// [`RecordBatch`](arrow::record_batch::RecordBatch)es
pub(super) stream: SendableRecordBatchStream,
/// The sort key value the catalog should be updated to, if any.
///
/// If returned, the compaction required extending the partition's
/// [`SortKey`] (typically because new columns were in this parquet file
/// that were not in previous files).
pub(super) catalog_sort_key_update: Option<SortKey>,
/// The sort key to be used for compaction.
///
/// This should be used in the [`IoxMetadata`] for the compacted data, and
/// may be a subset of the full sort key contained in
/// [`Self::catalog_sort_key_update`] (or the existing sort key in the
/// catalog).
///
/// [`IoxMetadata`]: parquet_file::metadata::IoxMetadata
pub(super) data_sort_key: SortKey,
}
impl std::fmt::Debug for CompactedStream {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CompactedStream")
.field("stream", &"<SendableRecordBatchStream>")
.field("data_sort_key", &self.data_sort_key)
.field("catalog_sort_key_update", &self.catalog_sort_key_update)
.finish()
}
}
/// Compact a given batch into a [`CompactedStream`] or `None` if there is no
/// data to compact, returning an updated sort key, if any.
pub(super) async fn compact_persisting_batch(
executor: &Executor,
sort_key: Option<SortKey>,
table_name: TableName,
batch: QueryAdaptor,
) -> Result<CompactedStream, ()> {
assert!(!batch.record_batches().is_empty());
// Get sort key from the catalog or compute it from
// cardinality.
let (data_sort_key, catalog_sort_key_update) = match sort_key {
Some(sk) => {
// Remove any columns not present in this data from the
// sort key that will be used to compact this parquet file
// (and appear in its metadata)
//
// If there are any new columns, add them to the end of the sort key in the catalog and
// return that to be updated in the catalog.
adjust_sort_key_columns(&sk, &batch.schema().primary_key())
}
None => {
let sort_key = compute_sort_key(batch.schema(), batch.record_batches().iter());
// Use the sort key computed from the cardinality as the sort key for this parquet
// file's metadata, also return the sort key to be stored in the catalog
(sort_key.clone(), Some(sort_key))
}
};
let batch = Arc::new(batch);
// Build logical plan for compaction
let ctx = executor.new_context(ExecutorType::Reorg);
let logical_plan = ReorgPlanner::new()
.compact_plan(
table_name.into(),
batch.schema(),
[Arc::clone(&batch) as Arc<dyn QueryChunk>],
data_sort_key.clone(),
)
.unwrap();
// Build physical plan
let physical_plan = ctx.create_physical_plan(&logical_plan).await.unwrap();
// Execute the plan and return the compacted stream
let output_stream = ctx.execute_stream(physical_plan).await.unwrap();
Ok(CompactedStream {
stream: output_stream,
catalog_sort_key_update,
data_sort_key,
})
}
#[cfg(test)]
mod tests {
use arrow::record_batch::RecordBatch;
use arrow_util::assert_batches_eq;
use iox_query::test::{raw_data, TestChunk};
use mutable_batch_lp::lines_to_batches;
use schema::Projection;
use super::*;
use crate::test_util::ARBITRARY_TRANSITION_PARTITION_ID;
// this test was added to guard against https://github.com/influxdata/influxdb_iox/issues/3782
// where if sending in a single row it would compact into an output of two batches, one of
// which was empty, which would cause this to panic.
#[tokio::test]
async fn test_compact_batch_on_one_record_batch_with_one_row() {
// create input data
let batch = lines_to_batches("cpu bar=2 20", 0)
.unwrap()
.get("cpu")
.unwrap()
.to_arrow(Projection::All)
.unwrap();
let batch = QueryAdaptor::new(ARBITRARY_TRANSITION_PARTITION_ID.clone(), vec![batch]);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["time"];
assert_eq!(expected_pk, pk);
// compact
let exc = Executor::new_testing();
let CompactedStream { stream, .. } =
compact_persisting_batch(&exc, Some(SortKey::empty()), "test_table".into(), batch)
.await
.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.expect("should execute plan");
// verify compacted data
// should be the same as the input but sorted on tag1 & time
let expected_data = vec![
"+-----+--------------------------------+",
"| bar | time |",
"+-----+--------------------------------+",
"| 2.0 | 1970-01-01T00:00:00.000000020Z |",
"+-----+--------------------------------+",
];
assert_batches_eq!(&expected_data, &output_batches);
}
#[tokio::test]
async fn test_compact_batch_on_one_record_batch_no_dupilcates() {
// create input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_one_record_batch_with_influxtype_no_duplicates().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "time"];
assert_eq!(expected_pk, pk);
// compact
let exc = Executor::new_testing();
let CompactedStream {
stream,
data_sort_key,
catalog_sort_key_update,
} = compact_persisting_batch(&exc, Some(SortKey::empty()), "test_table".into(), batch)
.await
.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.expect("should execute plan");
// verify compacted data
// should be the same as the input but sorted on tag1 & time
let expected_data = vec![
"+-----------+------+-----------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+-----------------------------+",
"| 70 | UT | 1970-01-01T00:00:00.000020Z |",
"| 10 | VT | 1970-01-01T00:00:00.000010Z |",
"| 1000 | WA | 1970-01-01T00:00:00.000008Z |",
"+-----------+------+-----------------------------+",
];
assert_batches_eq!(&expected_data, &output_batches);
assert_eq!(data_sort_key, SortKey::from_columns(["tag1", "time"]));
assert_eq!(
catalog_sort_key_update.unwrap(),
SortKey::from_columns(["tag1", "time"])
);
}
#[tokio::test]
async fn test_compact_batch_no_sort_key() {
// create input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_batches_with_influxtype_different_cardinality().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag3", "time"];
assert_eq!(expected_pk, pk);
let exc = Executor::new_testing();
// NO SORT KEY from the catalog here, first persisting batch
let CompactedStream {
stream,
data_sort_key,
catalog_sort_key_update,
} = compact_persisting_batch(&exc, Some(SortKey::empty()), "test_table".into(), batch)
.await
.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.expect("should execute plan");
// verify compacted data
// should be the same as the input but sorted on the computed sort key of tag1, tag3, & time
let expected_data = vec![
"+-----------+------+------+-----------------------------+",
"| field_int | tag1 | tag3 | time |",
"+-----------+------+------+-----------------------------+",
"| 70 | UT | OR | 1970-01-01T00:00:00.000220Z |",
"| 50 | VT | AL | 1970-01-01T00:00:00.000210Z |",
"| 10 | VT | PR | 1970-01-01T00:00:00.000210Z |",
"| 1000 | WA | TX | 1970-01-01T00:00:00.000028Z |",
"+-----------+------+------+-----------------------------+",
];
assert_batches_eq!(&expected_data, &output_batches);
assert_eq!(
data_sort_key,
SortKey::from_columns(["tag1", "tag3", "time"])
);
assert_eq!(
catalog_sort_key_update.unwrap(),
SortKey::from_columns(["tag1", "tag3", "time"])
);
}
#[tokio::test]
async fn test_compact_batch_with_specified_sort_key() {
// create input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_batches_with_influxtype_different_cardinality().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag3", "time"];
assert_eq!(expected_pk, pk);
let exc = Executor::new_testing();
// SPECIFY A SORT KEY HERE to simulate a sort key being stored in the catalog
// this is NOT what the computed sort key would be based on this data's cardinality
let CompactedStream {
stream,
data_sort_key,
catalog_sort_key_update,
} = compact_persisting_batch(
&exc,
Some(SortKey::from_columns(["tag3", "tag1", "time"])),
"test_table".into(),
batch,
)
.await
.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.expect("should execute plan");
// verify compacted data
// should be the same as the input but sorted on the specified sort key of tag3, tag1, &
// time
let expected_data = vec![
"+-----------+------+------+-----------------------------+",
"| field_int | tag1 | tag3 | time |",
"+-----------+------+------+-----------------------------+",
"| 50 | VT | AL | 1970-01-01T00:00:00.000210Z |",
"| 70 | UT | OR | 1970-01-01T00:00:00.000220Z |",
"| 10 | VT | PR | 1970-01-01T00:00:00.000210Z |",
"| 1000 | WA | TX | 1970-01-01T00:00:00.000028Z |",
"+-----------+------+------+-----------------------------+",
];
assert_batches_eq!(&expected_data, &output_batches);
assert_eq!(
data_sort_key,
SortKey::from_columns(["tag3", "tag1", "time"])
);
// The sort key does not need to be updated in the catalog
assert!(catalog_sort_key_update.is_none());
}
#[tokio::test]
async fn test_compact_batch_new_column_for_sort_key() {
// create input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_batches_with_influxtype_different_cardinality().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag3", "time"];
assert_eq!(expected_pk, pk);
let exc = Executor::new_testing();
// SPECIFY A SORT KEY HERE to simulate a sort key being stored in the catalog
// this is NOT what the computed sort key would be based on this data's cardinality
// The new column, tag1, should get added just before the time column
let CompactedStream {
stream,
data_sort_key,
catalog_sort_key_update,
} = compact_persisting_batch(
&exc,
Some(SortKey::from_columns(["tag3", "time"])),
"test_table".into(),
batch,
)
.await
.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.expect("should execute plan");
// verify compacted data
// should be the same as the input but sorted on the specified sort key of tag3, tag1, &
// time
let expected_data = vec![
"+-----------+------+------+-----------------------------+",
"| field_int | tag1 | tag3 | time |",
"+-----------+------+------+-----------------------------+",
"| 50 | VT | AL | 1970-01-01T00:00:00.000210Z |",
"| 70 | UT | OR | 1970-01-01T00:00:00.000220Z |",
"| 10 | VT | PR | 1970-01-01T00:00:00.000210Z |",
"| 1000 | WA | TX | 1970-01-01T00:00:00.000028Z |",
"+-----------+------+------+-----------------------------+",
];
assert_batches_eq!(&expected_data, &output_batches);
assert_eq!(
data_sort_key,
SortKey::from_columns(["tag3", "tag1", "time"])
);
// The sort key in the catalog needs to be updated to include the new column
assert_eq!(
catalog_sort_key_update.unwrap(),
SortKey::from_columns(["tag3", "tag1", "time"])
);
}
#[tokio::test]
async fn test_compact_batch_missing_column_for_sort_key() {
// create input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_batches_with_influxtype_different_cardinality().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag3", "time"];
assert_eq!(expected_pk, pk);
let exc = Executor::new_testing();
// SPECIFY A SORT KEY HERE to simulate a sort key being stored in the catalog
// this is NOT what the computed sort key would be based on this data's cardinality
// This contains a sort key, "tag4", that doesn't appear in the data.
let CompactedStream {
stream,
data_sort_key,
catalog_sort_key_update,
} = compact_persisting_batch(
&exc,
Some(SortKey::from_columns(["tag3", "tag1", "tag4", "time"])),
"test_table".into(),
batch,
)
.await
.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.expect("should execute plan");
// verify compacted data
// should be the same as the input but sorted on the specified sort key of tag3, tag1, &
// time
let expected_data = vec![
"+-----------+------+------+-----------------------------+",
"| field_int | tag1 | tag3 | time |",
"+-----------+------+------+-----------------------------+",
"| 50 | VT | AL | 1970-01-01T00:00:00.000210Z |",
"| 70 | UT | OR | 1970-01-01T00:00:00.000220Z |",
"| 10 | VT | PR | 1970-01-01T00:00:00.000210Z |",
"| 1000 | WA | TX | 1970-01-01T00:00:00.000028Z |",
"+-----------+------+------+-----------------------------+",
];
assert_batches_eq!(&expected_data, &output_batches);
assert_eq!(
data_sort_key,
SortKey::from_columns(["tag3", "tag1", "time"])
);
// The sort key in the catalog should NOT get a new value
assert!(catalog_sort_key_update.is_none());
}
#[tokio::test]
async fn test_compact_one_row_batch() {
test_helpers::maybe_start_logging();
// create input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_one_row_record_batch_with_influxtype().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "time"];
assert_eq!(expected_pk, pk);
let sort_key = compute_sort_key(schema, batch.record_batches().iter());
assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"]));
// compact
let exc = Executor::new_testing();
let stream = compact_persisting_batch(&exc, Some(sort_key), "test_table".into(), batch)
.await
.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream.stream)
.await
.unwrap();
// verify no empty record batches - bug #3782
assert_eq!(output_batches.len(), 1);
// verify compacted data
let expected = vec![
"+-----------+------+-----------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+-----------------------------+",
"| 1000 | MA | 1970-01-01T00:00:00.000001Z |",
"+-----------+------+-----------------------------+",
];
assert_batches_eq!(&expected, &output_batches);
}
#[tokio::test]
async fn test_compact_one_batch_with_duplicates() {
// create input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_one_record_batch_with_influxtype_duplicates().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "time"];
assert_eq!(expected_pk, pk);
let sort_key = compute_sort_key(schema, batch.record_batches().iter());
assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"]));
// compact
let exc = Executor::new_testing();
let stream = compact_persisting_batch(&exc, Some(sort_key), "test_table".into(), batch)
.await
.unwrap();
let output_batches = datafusion::physical_plan::common::collect(stream.stream)
.await
.unwrap();
// verify no empty record bacthes - bug #3782
assert_eq!(output_batches.len(), 2);
assert_eq!(output_batches[0].num_rows(), 6);
assert_eq!(output_batches[1].num_rows(), 1);
// verify compacted data
// data is sorted and all duplicates are removed
let expected = vec![
"+-----------+------+--------------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+--------------------------------+",
"| 10 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000100Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000500Z |",
"| 30 | MT | 1970-01-01T00:00:00.000000005Z |",
"| 1000 | MT | 1970-01-01T00:00:00.000001Z |",
"| 1000 | MT | 1970-01-01T00:00:00.000002Z |",
"| 20 | MT | 1970-01-01T00:00:00.000007Z |",
"+-----------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &output_batches);
}
#[tokio::test]
async fn test_compact_many_batches_same_columns_with_duplicates() {
// create many-batches input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_batches_with_influxtype().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "time"];
assert_eq!(expected_pk, pk);
let sort_key = compute_sort_key(schema, batch.record_batches().iter());
assert_eq!(sort_key, SortKey::from_columns(["tag1", "time"]));
// compact
let exc = Executor::new_testing();
let stream = compact_persisting_batch(&exc, Some(sort_key), "test_table".into(), batch)
.await
.unwrap()
.stream;
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.unwrap();
// verify compacted data
// data is sorted and all duplicates are removed
let expected = vec![
"+-----------+------+--------------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+--------------------------------+",
"| 100 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000100Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000500Z |",
"| 30 | MT | 1970-01-01T00:00:00.000000005Z |",
"| 1000 | MT | 1970-01-01T00:00:00.000001Z |",
"| 1000 | MT | 1970-01-01T00:00:00.000002Z |",
"| 5 | MT | 1970-01-01T00:00:00.000005Z |",
"| 10 | MT | 1970-01-01T00:00:00.000007Z |",
"+-----------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &output_batches);
}
#[tokio::test]
async fn test_compact_many_batches_different_columns_with_duplicates() {
// create many-batches input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_batches_with_influxtype_different_columns().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag2", "time"];
assert_eq!(expected_pk, pk);
let sort_key = compute_sort_key(schema, batch.record_batches().iter());
assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"]));
// compact
let exc = Executor::new_testing();
let stream = compact_persisting_batch(&exc, Some(sort_key), "test_table".into(), batch)
.await
.unwrap()
.stream;
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.unwrap();
// verify compacted data
// data is sorted and all duplicates are removed
let expected = vec![
"+-----------+------------+------+------+--------------------------------+",
"| field_int | field_int2 | tag1 | tag2 | time |",
"+-----------+------------+------+------+--------------------------------+",
"| 10 | | AL | | 1970-01-01T00:00:00.000000050Z |",
"| 100 | 100 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
"| 70 | | CT | | 1970-01-01T00:00:00.000000100Z |",
"| 70 | | CT | | 1970-01-01T00:00:00.000000500Z |",
"| 70 | 70 | CT | CT | 1970-01-01T00:00:00.000000100Z |",
"| 30 | | MT | | 1970-01-01T00:00:00.000000005Z |",
"| 1000 | | MT | | 1970-01-01T00:00:00.000001Z |",
"| 1000 | | MT | | 1970-01-01T00:00:00.000002Z |",
"| 20 | | MT | | 1970-01-01T00:00:00.000007Z |",
"| 5 | 5 | MT | AL | 1970-01-01T00:00:00.000005Z |",
"| 10 | 10 | MT | AL | 1970-01-01T00:00:00.000007Z |",
"| 1000 | 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
"+-----------+------------+------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &output_batches);
}
#[tokio::test]
async fn test_compact_many_batches_different_columns_different_order_with_duplicates() {
// create many-batches input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_batches_with_influxtype_different_columns_different_order().await,
);
// verify PK
let schema = batch.schema();
let pk = schema.primary_key();
let expected_pk = vec!["tag1", "tag2", "time"];
assert_eq!(expected_pk, pk);
let sort_key = compute_sort_key(schema, batch.record_batches().iter());
assert_eq!(sort_key, SortKey::from_columns(["tag1", "tag2", "time"]));
// compact
let exc = Executor::new_testing();
let stream = compact_persisting_batch(&exc, Some(sort_key), "test_table".into(), batch)
.await
.unwrap()
.stream;
let output_batches = datafusion::physical_plan::common::collect(stream)
.await
.unwrap();
// verify compacted data
// data is sorted and all duplicates are removed
// CORRECT RESULT
let expected = vec![
"+-----------+------+------+--------------------------------+",
"| field_int | tag1 | tag2 | time |",
"+-----------+------+------+--------------------------------+",
"| 5 | | AL | 1970-01-01T00:00:00.000005Z |",
"| 10 | | AL | 1970-01-01T00:00:00.000007Z |",
"| 70 | | CT | 1970-01-01T00:00:00.000000100Z |",
"| 1000 | | CT | 1970-01-01T00:00:00.000001Z |",
"| 100 | | MA | 1970-01-01T00:00:00.000000050Z |",
"| 10 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
"| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z |",
"| 70 | CT | CT | 1970-01-01T00:00:00.000000500Z |",
"| 30 | MT | AL | 1970-01-01T00:00:00.000000005Z |",
"| 20 | MT | AL | 1970-01-01T00:00:00.000007Z |",
"| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
"| 1000 | MT | CT | 1970-01-01T00:00:00.000002Z |",
"+-----------+------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &output_batches);
}
#[tokio::test]
#[should_panic(expected = "Schemas compatible")]
async fn test_compact_many_batches_same_columns_different_types() {
// create many-batches input data
let batch = QueryAdaptor::new(
ARBITRARY_TRANSITION_PARTITION_ID.clone(),
create_batches_with_influxtype_same_columns_different_type().await,
);
// the schema merge should throw a panic
batch.schema();
}
async fn create_one_row_record_batch_with_influxtype() -> Vec<RecordBatch> {
let chunk1 = Arc::new(
TestChunk::new("t")
.with_id(1)
.with_time_column()
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_one_row_of_data(),
);
let batches = raw_data(&[chunk1]).await;
// Make sure all data in one record batch
assert_eq!(batches.len(), 1);
// verify data
let expected = vec![
"+-----------+------+-----------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+-----------------------------+",
"| 1000 | MA | 1970-01-01T00:00:00.000001Z |",
"+-----------+------+-----------------------------+",
];
assert_batches_eq!(&expected, &batches);
batches
}
async fn create_one_record_batch_with_influxtype_no_duplicates() -> Vec<RecordBatch> {
let chunk1 = Arc::new(
TestChunk::new("t")
.with_id(1)
.with_time_column()
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_three_rows_of_data(),
);
let batches = raw_data(&[chunk1]).await;
// Make sure all data in one record batch
assert_eq!(batches.len(), 1);
// verify data
let expected = vec![
"+-----------+------+-----------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+-----------------------------+",
"| 1000 | WA | 1970-01-01T00:00:00.000008Z |",
"| 10 | VT | 1970-01-01T00:00:00.000010Z |",
"| 70 | UT | 1970-01-01T00:00:00.000020Z |",
"+-----------+------+-----------------------------+",
];
assert_batches_eq!(&expected, &batches);
batches
}
async fn create_one_record_batch_with_influxtype_duplicates() -> Vec<RecordBatch> {
let chunk1 = Arc::new(
TestChunk::new("t")
.with_id(1)
.with_time_column() //_with_full_stats(
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_ten_rows_of_data_some_duplicates(),
);
let batches = raw_data(&[chunk1]).await;
// Make sure all data in one record batch
assert_eq!(batches.len(), 1);
// verify data
let expected = vec![
"+-----------+------+--------------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+--------------------------------+",
"| 1000 | MT | 1970-01-01T00:00:00.000001Z |",
"| 10 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000100Z |",
"| 100 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 5 | MT | 1970-01-01T00:00:00.000000005Z |",
"| 1000 | MT | 1970-01-01T00:00:00.000002Z |",
"| 20 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000500Z |",
"| 10 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 30 | MT | 1970-01-01T00:00:00.000000005Z |",
"+-----------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &batches);
batches
}
/// RecordBatches with knowledge of influx metadata
async fn create_batches_with_influxtype() -> Vec<RecordBatch> {
// Use the available TestChunk to create chunks and then convert them to raw RecordBatches
let mut batches = vec![];
// chunk1 with 10 rows and 3 columns
let chunk1 = Arc::new(
TestChunk::new("t")
.with_id(1)
.with_time_column()
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_ten_rows_of_data_some_duplicates(),
);
let batch1 = raw_data(&[chunk1]).await[0].clone();
let expected = vec![
"+-----------+------+--------------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+--------------------------------+",
"| 1000 | MT | 1970-01-01T00:00:00.000001Z |",
"| 10 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000100Z |",
"| 100 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 5 | MT | 1970-01-01T00:00:00.000000005Z |",
"| 1000 | MT | 1970-01-01T00:00:00.000002Z |",
"| 20 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000500Z |",
"| 10 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 30 | MT | 1970-01-01T00:00:00.000000005Z |",
"+-----------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &[batch1.clone()]);
batches.push(batch1);
// chunk2 having duplicate data with chunk 1
let chunk2 = Arc::new(
TestChunk::new("t")
.with_id(2)
.with_time_column()
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_five_rows_of_data(),
);
let batch2 = raw_data(&[chunk2]).await[0].clone();
let expected = vec![
"+-----------+------+--------------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+--------------------------------+",
"| 1000 | MT | 1970-01-01T00:00:00.000001Z |",
"| 10 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000100Z |",
"| 100 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 5 | MT | 1970-01-01T00:00:00.000005Z |",
"+-----------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &[batch2.clone()]);
batches.push(batch2);
// verify data from both batches
let expected = vec![
"+-----------+------+--------------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+--------------------------------+",
"| 1000 | MT | 1970-01-01T00:00:00.000001Z |",
"| 10 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000100Z |",
"| 100 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 5 | MT | 1970-01-01T00:00:00.000000005Z |",
"| 1000 | MT | 1970-01-01T00:00:00.000002Z |",
"| 20 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000500Z |",
"| 10 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 30 | MT | 1970-01-01T00:00:00.000000005Z |",
"| 1000 | MT | 1970-01-01T00:00:00.000001Z |",
"| 10 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000100Z |",
"| 100 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 5 | MT | 1970-01-01T00:00:00.000005Z |",
"+-----------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &batches);
batches
}
/// RecordBatches with knowledge of influx metadata
async fn create_batches_with_influxtype_different_columns() -> Vec<RecordBatch> {
// Use the available TestChunk to create chunks and then convert them to raw RecordBatches
let mut batches = vec![];
// chunk1 with 10 rows and 3 columns
let chunk1 = Arc::new(
TestChunk::new("t")
.with_id(1)
.with_time_column()
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_ten_rows_of_data_some_duplicates(),
);
let batch1 = raw_data(&[chunk1]).await[0].clone();
let expected = vec![
"+-----------+------+--------------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+--------------------------------+",
"| 1000 | MT | 1970-01-01T00:00:00.000001Z |",
"| 10 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000100Z |",
"| 100 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 5 | MT | 1970-01-01T00:00:00.000000005Z |",
"| 1000 | MT | 1970-01-01T00:00:00.000002Z |",
"| 20 | MT | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000500Z |",
"| 10 | AL | 1970-01-01T00:00:00.000000050Z |",
"| 30 | MT | 1970-01-01T00:00:00.000000005Z |",
"+-----------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &[batch1.clone()]);
batches.push(batch1);
// chunk2 having duplicate data with chunk 1
// mmore columns
let chunk2 = Arc::new(
TestChunk::new("t")
.with_id(2)
.with_time_column()
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_tag_column("tag2")
.with_i64_field_column("field_int2")
.with_five_rows_of_data(),
);
let batch2 = raw_data(&[chunk2]).await[0].clone();
let expected = vec![
"+-----------+------------+------+------+--------------------------------+",
"| field_int | field_int2 | tag1 | tag2 | time |",
"+-----------+------------+------+------+--------------------------------+",
"| 1000 | 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
"| 10 | 10 | MT | AL | 1970-01-01T00:00:00.000007Z |",
"| 70 | 70 | CT | CT | 1970-01-01T00:00:00.000000100Z |",
"| 100 | 100 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
"| 5 | 5 | MT | AL | 1970-01-01T00:00:00.000005Z |",
"+-----------+------------+------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &[batch2.clone()]);
batches.push(batch2);
batches
}
/// RecordBatches with knowledge of influx metadata
async fn create_batches_with_influxtype_different_columns_different_order() -> Vec<RecordBatch>
{
// Use the available TestChunk to create chunks and then convert them to raw RecordBatches
let mut batches = vec![];
// chunk1 with 10 rows and 3 columns
let chunk1 = Arc::new(
TestChunk::new("t")
.with_id(1)
.with_time_column()
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_tag_column("tag2")
.with_ten_rows_of_data_some_duplicates(),
);
let batch1 = raw_data(&[chunk1]).await[0].clone();
let expected = vec![
"+-----------+------+------+--------------------------------+",
"| field_int | tag1 | tag2 | time |",
"+-----------+------+------+--------------------------------+",
"| 1000 | MT | CT | 1970-01-01T00:00:00.000001Z |",
"| 10 | MT | AL | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | CT | 1970-01-01T00:00:00.000000100Z |",
"| 100 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
"| 5 | MT | AL | 1970-01-01T00:00:00.000000005Z |",
"| 1000 | MT | CT | 1970-01-01T00:00:00.000002Z |",
"| 20 | MT | AL | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | CT | 1970-01-01T00:00:00.000000500Z |",
"| 10 | AL | MA | 1970-01-01T00:00:00.000000050Z |",
"| 30 | MT | AL | 1970-01-01T00:00:00.000000005Z |",
"+-----------+------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &[batch1.clone()]);
batches.push(batch1.clone());
// chunk2 having duplicate data with chunk 1
// mmore columns
let chunk2 = Arc::new(
TestChunk::new("t")
.with_id(2)
.with_time_column()
.with_tag_column("tag2")
.with_i64_field_column("field_int")
.with_five_rows_of_data(),
);
let batch2 = raw_data(&[chunk2]).await[0].clone();
let expected = vec![
"+-----------+------+--------------------------------+",
"| field_int | tag2 | time |",
"+-----------+------+--------------------------------+",
"| 1000 | CT | 1970-01-01T00:00:00.000001Z |",
"| 10 | AL | 1970-01-01T00:00:00.000007Z |",
"| 70 | CT | 1970-01-01T00:00:00.000000100Z |",
"| 100 | MA | 1970-01-01T00:00:00.000000050Z |",
"| 5 | AL | 1970-01-01T00:00:00.000005Z |",
"+-----------+------+--------------------------------+",
];
assert_batches_eq!(&expected, &[batch2.clone()]);
batches.push(batch2);
batches
}
/// Has 2 tag columns; tag1 has a lower cardinality (3) than tag3 (4)
async fn create_batches_with_influxtype_different_cardinality() -> Vec<RecordBatch> {
// Use the available TestChunk to create chunks and then convert them to raw RecordBatches
let mut batches = vec![];
let chunk1 = Arc::new(
TestChunk::new("t")
.with_id(1)
.with_time_column()
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_tag_column("tag3")
.with_four_rows_of_data(),
);
let batch1 = raw_data(&[chunk1]).await[0].clone();
let expected = vec![
"+-----------+------+------+-----------------------------+",
"| field_int | tag1 | tag3 | time |",
"+-----------+------+------+-----------------------------+",
"| 1000 | WA | TX | 1970-01-01T00:00:00.000028Z |",
"| 10 | VT | PR | 1970-01-01T00:00:00.000210Z |",
"| 70 | UT | OR | 1970-01-01T00:00:00.000220Z |",
"| 50 | VT | AL | 1970-01-01T00:00:00.000210Z |",
"+-----------+------+------+-----------------------------+",
];
assert_batches_eq!(&expected, &[batch1.clone()]);
batches.push(batch1.clone());
let chunk2 = Arc::new(
TestChunk::new("t")
.with_id(2)
.with_time_column()
.with_tag_column("tag1")
.with_tag_column("tag3")
.with_i64_field_column("field_int")
.with_four_rows_of_data(),
);
let batch2 = raw_data(&[chunk2]).await[0].clone();
let expected = vec![
"+-----------+------+------+-----------------------------+",
"| field_int | tag1 | tag3 | time |",
"+-----------+------+------+-----------------------------+",
"| 1000 | WA | TX | 1970-01-01T00:00:00.000028Z |",
"| 10 | VT | PR | 1970-01-01T00:00:00.000210Z |",
"| 70 | UT | OR | 1970-01-01T00:00:00.000220Z |",
"| 50 | VT | AL | 1970-01-01T00:00:00.000210Z |",
"+-----------+------+------+-----------------------------+",
];
assert_batches_eq!(&expected, &[batch2.clone()]);
batches.push(batch2);
batches
}
/// RecordBatches with knowledge of influx metadata
async fn create_batches_with_influxtype_same_columns_different_type() -> Vec<RecordBatch> {
// Use the available TestChunk to create chunks and then convert them to raw RecordBatches
let mut batches = vec![];
// chunk1
let chunk1 = Arc::new(
TestChunk::new("t")
.with_id(1)
.with_time_column()
.with_tag_column("tag1")
.with_i64_field_column("field_int")
.with_three_rows_of_data(),
);
let batch1 = raw_data(&[chunk1]).await[0].clone();
let expected = vec![
"+-----------+------+-----------------------------+",
"| field_int | tag1 | time |",
"+-----------+------+-----------------------------+",
"| 1000 | WA | 1970-01-01T00:00:00.000008Z |",
"| 10 | VT | 1970-01-01T00:00:00.000010Z |",
"| 70 | UT | 1970-01-01T00:00:00.000020Z |",
"+-----------+------+-----------------------------+",
];
assert_batches_eq!(&expected, &[batch1.clone()]);
batches.push(batch1);
// chunk2 having duplicate data with chunk 1
// mmore columns
let chunk2 = Arc::new(
TestChunk::new("t")
.with_id(2)
.with_time_column()
.with_u64_column("field_int") // u64 type but on existing name "field_int" used for i64 in chunk 1
.with_tag_column("tag2")
.with_three_rows_of_data(),
);
let batch2 = raw_data(&[chunk2]).await[0].clone();
let expected = vec![
"+-----------+------+-----------------------------+",
"| field_int | tag2 | time |",
"+-----------+------+-----------------------------+",
"| 1000 | SC | 1970-01-01T00:00:00.000008Z |",
"| 10 | NC | 1970-01-01T00:00:00.000010Z |",
"| 70 | RI | 1970-01-01T00:00:00.000020Z |",
"+-----------+------+-----------------------------+",
];
assert_batches_eq!(&expected, &[batch2.clone()]);
batches.push(batch2);
batches
}
}
|
#[doc = "Reader of register BUS_ERRORS0"]
pub type R = crate::R<u8, super::BUS_ERRORS0>;
#[doc = "Reader of field `bus_errors`"]
pub type BUS_ERRORS_R = crate::R<u8, u8>;
impl R {
#[doc = "Bits 0:7"]
#[inline(always)]
pub fn bus_errors(&self) -> BUS_ERRORS_R {
BUS_ERRORS_R::new((self.bits & 0xff) as u8)
}
}
|
pub struct Solution;
impl Solution {
pub fn is_interleave(s1: String, s2: String, s3: String) -> bool {
let s1 = s1.into_bytes();
let s2 = s2.into_bytes();
let s3 = s3.into_bytes();
if s1.len() + s2.len() != s3.len() {
return false;
}
let (s1, s2) = if s1.len() < s2.len() {
(s1, s2)
} else {
(s2, s1)
};
let l1 = s1.len();
let l2 = s2.len();
let mut ok = vec![false; l1 + 1];
ok[0] = true;
for i1 in 1..=l1 {
ok[i1] = ok[i1 - 1] && s1[i1 - 1] == s3[i1 + 0 - 1];
}
for i2 in 1..=l2 {
ok[0] = ok[0] && s2[i2 - 1] == s3[0 + i2 - 1];
for i1 in 1..=l1 {
ok[i1] = (ok[i1 - 1] && s1[i1 - 1] == s3[i1 + i2 - 1])
|| (ok[i1] && s2[i2 - 1] == s3[i1 + i2 - 1])
}
}
ok[l1]
}
}
#[test]
fn test0097() {
assert_eq!(
Solution::is_interleave(
"aabcc".to_string(),
"dbbca".to_string(),
"aadbbcbcac".to_string()
),
true
);
assert_eq!(
Solution::is_interleave(
"aabcc".to_string(),
"dbbca".to_string(),
"aadbbbaccc".to_string()
),
false
);
}
|
// So far the implementations we have derived for our NewStruct object will require us to return the value of the fields
// themselves. When the fields implement Copy this will not be a particularly expensive operation, but when working with
// more complex fields we may not want to Clone the objects. This next implementation will show how we can return references
// to the fields, rather than the fields themselves.
// NOTE: To be comfortable with the implementation you will need to have a basic understanding of the Rust lifetime system.
use std::iter::IntoIterator;
use std::iter::Iterator;
// Since we are returning references to the field values we don't need to constrain the type T.
pub struct NewStruct<T> {
field1: T,
field2: T,
field3: T,
field4: T,
field5: T,
}
// The syntax T: 'a is read as T must outlive 'a
// This means that any reference contained in T must outlive 'a
// Note: I changed the name to NewStructIterRef since we are returning references, we could have used NewStructIntoIter just as before
pub struct NewStructIterRef<'a,T>
where T: 'a {
count: usize,
new_struct: &'a NewStruct<T>,
}
/* Here is the IntoIterator trait definition for reference
* pub trait IntoIterator
* where Self::IntoIter::Item == Self::Item {
*
* type Item;
* type IntoIter: Iterator;
*
* fn into_iter( self ) -> Self::IntoIter;
* }
*/
impl<'a,T> IntoIterator for &'a NewStruct<T>
where T: 'a {
type Item = &'a T; // now our iterator will return references to the fields instead of the field values themselves
type IntoIter = NewStructIterRef<'a,T>;
fn into_iter( self: Self ) -> NewStructIterRef<'a,T> {
NewStructIterRef {
count: 0 as usize,
new_struct: self, // self is of type &'a NewStruct<T>, so our type now contains a reference to the whole structure
}
}
}
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Procedural macro to generate finite field types
//!
//! This is just the procedural macro, for more information look at [g2p](https://docs.rs/g2p).
#![recursion_limit = "128"]
extern crate proc_macro;
use proc_macro::TokenStream as P1TokenStream;
use proc_macro2::{TokenStream as P2TokenStream, Ident, Span};
use g2poly::{G2Poly, extended_gcd};
use quote::quote;
use syn::{
parse::{
Parse,
ParseStream,
},
parse_macro_input,
Token,
};
/// Generate a newtype of the given name and implement finite field arithmetic on it.
///
/// The generated type have implementations for [`Add`](::core::ops::Add),
/// [`Sub`](::core::ops::Sub), [`Mul`](::core::ops::Mul) and [`Div`](::core::ops::Div).
///
/// There are also implementations for equality, copy and debug. Conversion from and to the base
/// type are implemented via the From trait.
/// Depending on the size of `p` the underlying type is u8, u16 or u32.
///
/// # Example
/// ```ignore
/// g2gen::g2p!(
/// GF256, // Name of the newtype
/// 8, // The power of 2 specifying the field size 2^8 = 256 in this
/// // case.
/// modulus: 0b1_0001_1101, // The reduction polynomial to use, each bit is a coefficient.
/// // Can be left out in case it is not needed.
/// );
///
/// # fn main() {
/// let a: GF256 = 255.into(); // Conversion from the base type
/// assert_eq!(a - a, a + a); // Finite field arithmetic.
/// assert_eq!(format!("{}", a), "255_GF256");
/// # }
/// ```
#[proc_macro]
pub fn g2p(input: P1TokenStream) -> P1TokenStream {
let args = parse_macro_input!(input as ParsedInput);
let settings = Settings::from_input(args).unwrap();
let ident = settings.ident;
let ident_name = settings.ident_name;
let modulus = settings.modulus;
let generator = settings.generator;
let p = settings.p_val;
let field_size = 1_usize << p;
let mask = (1_u64 << p).wrapping_sub(1);
let ty = match p {
0 => panic!("p must be > 0"),
1..=8 => quote!(u8),
9..=16 => quote!(u16),
17..=32 => quote!(u32),
_ => unimplemented!("p > 32 is not implemented right now"),
};
let mod_name = Ident::new(&format!("{}_mod", ident_name), Span::call_site());
let struct_def = quote![
pub struct #ident(pub #ty);
];
let struct_impl = quote![
impl #ident {
pub const MASK: #ty = #mask as #ty;
}
];
let from = quote![
impl ::core::convert::From<#ident> for #ty {
fn from(v: #ident) -> #ty {
v.0
}
}
];
let into = quote![
impl ::core::convert::From<#ty> for #ident {
fn from(v: #ty) -> #ident {
#ident(v & #ident::MASK)
}
}
];
let eq = quote![
impl ::core::cmp::PartialEq<#ident> for #ident {
fn eq(&self, other: &#ident) -> bool {
self.0 == other.0
}
}
impl ::core::cmp::Eq for #ident {}
];
let tmpl = format!("{{}}_{}", ident_name);
let debug = quote![
impl ::core::fmt::Debug for #ident {
fn fmt<'a>(&self, f: &mut ::core::fmt::Formatter<'a>) -> ::core::fmt::Result {
write!(f, #tmpl, self.0)
}
}
];
let display = quote![
impl ::core::fmt::Display for #ident {
fn fmt<'a>(&self, f: &mut ::core::fmt::Formatter<'a>) -> ::core::fmt::Result {
write!(f, #tmpl, self.0)
}
}
];
let clone = quote![
impl ::core::clone::Clone for #ident {
fn clone(&self) -> Self {
*self
}
}
];
let copy = quote![
impl ::core::marker::Copy for #ident {}
];
let add = quote![
impl ::core::ops::Add for #ident {
type Output = #ident;
#[allow(clippy::suspicious_arithmetic_impl)]
fn add(self, rhs: #ident) -> #ident {
#ident(self.0 ^ rhs.0)
}
}
impl ::core::ops::AddAssign for #ident {
fn add_assign(&mut self, rhs: #ident) {
*self = *self + rhs;
}
}
];
let sub = quote![
impl ::core::ops::Sub for #ident {
type Output = #ident;
#[allow(clippy::suspicious_arithmetic_impl)]
fn sub(self, rhs: #ident) -> #ident {
#ident(self.0 ^ rhs.0)
}
}
impl ::core::ops::SubAssign for #ident {
fn sub_assign(&mut self, rhs: #ident) {
*self = *self - rhs;
}
}
];
let gen = generator.0;
let modulus_val = modulus.0;
let galois_trait_impl = quote![
impl ::g2p::GaloisField for #ident {
const SIZE: usize = #field_size;
const MODULUS: ::g2p::G2Poly = ::g2p::G2Poly(#modulus_val);
const ZERO: #ident = #ident(0);
const ONE: #ident = #ident(1);
const GENERATOR: #ident = #ident(#gen as #ty);
}
];
let (tables, mul, div) = generate_mul_impl(
ident.clone(),
&ident_name,
modulus,
ty,
field_size,
mask,
);
P1TokenStream::from(quote![
#struct_def
mod #mod_name {
use super::#ident;
#struct_impl
#tables
#from
#into
#eq
#debug
#display
#clone
#copy
#add
#sub
#mul
#div
#galois_trait_impl
}
])
}
struct ParsedInput {
ident: syn::Ident,
p: syn::LitInt,
modulus: Option<syn::LitInt>,
}
impl Parse for ParsedInput {
fn parse(input: ParseStream) -> syn::Result<Self> {
let ident = input.parse()?;
let _sep: Token![,] = input.parse()?;
let p = input.parse()?;
let mut modulus = None;
loop {
let sep: Option<Token![,]> = input.parse()?;
if sep.is_none() || input.is_empty() {
break;
}
let ident: syn::Ident = input.parse()?;
let ident_name = ident.to_string();
let _sep: Token![:] = input.parse()?;
match ident_name.as_str() {
"modulus" => {
if modulus.is_some() {
Err(syn::parse::Error::new(ident.span(), "Double declaration of 'modulus'"))?
}
modulus = Some(input.parse()?);
}
_ => {
Err(syn::parse::Error::new(ident.span(), "Expected 'modulus'"))?
}
}
}
Ok(ParsedInput {
ident,
p,
modulus,
})
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
struct Settings {
ident: syn::Ident,
ident_name: String,
p_val: u64,
modulus: G2Poly,
generator: G2Poly,
}
fn find_modulus_poly(p: u64) -> G2Poly {
assert!(p < 64);
let start = (1 << p) + 1;
let end = (1_u64 << (p + 1)).wrapping_sub(1);
for m in start..=end {
let p = G2Poly(m);
if p.is_irreducible() {
return p;
}
}
unreachable!("There are irreducible polynomial for any degree!")
}
fn find_generator(m: G2Poly) -> G2Poly {
let max = m.degree().expect("Modulus must have positive degree");
for g in 1..(2 << max) {
let g = G2Poly(g);
if g.is_generator(m) {
return g;
}
}
unreachable!("There must be a generator element")
}
/// Calculate the log base 256, rounded up
///
/// Given a number n, calculate the log base 256, rounded up. This can be though of as the number
/// of bytes needed to represent this number.
fn ceil_log256(mut n: usize) -> usize {
if n == 0 {
return 0;
}
let mut c = 1;
while n > 256 {
c += 1;
// NB: This is the rounding up part. If n is a proper power of 256, adding 255 will not
// change the result. In the other cases, this ensures that we round up in the division.
n = (n + 255) >> 8;
}
c
}
/// Generate multiplication array
///
/// Generate a string representing a 5d multiplication array. This array uses the associativity
/// of multiplication `(a + b) * (c + d) == a*c + a*d + b*c + b*d` to reduce table size.
///
/// The input is split into bit chunks e.g. for a GF_1024 number we take the lower 8 bit and the
/// remaining 2 and calculate the multiplications for each separately. Then we can cheaply add them
/// together to get the the result with requiring a full 1024 * 1024 input.
fn generate_mul_table_string(modulus: G2Poly) -> String {
assert!(modulus.is_irreducible());
let field_size = 1 << modulus.degree().expect("Irreducible polynomial has positive degree");
let nparts = ceil_log256(field_size as usize);
let mut mul_table = Vec::with_capacity(nparts);
for left in 0..nparts {
let mut left_parts = Vec::with_capacity(nparts);
for right in 0..nparts {
let mut right_parts = Vec::with_capacity(256);
for i in 0..256 {
let i = i << (8 * left);
let mut row = Vec::with_capacity(256);
for j in 0..256 {
let j = j << (8 * right);
let v = if i < field_size && j < field_size {
G2Poly(i as u64) * G2Poly(j as u64) % modulus
} else {
G2Poly(0)
};
row.push(format!("{}", v.0));
}
right_parts.push(format!("[{}]", row.join(",")));
}
left_parts.push(format!("[{}]", right_parts.join(",")));
}
mul_table.push(format!("[{}]", left_parts.join(",")));
}
format!("[{}]", mul_table.join(","))
}
fn generate_inv_table_string(modulus: G2Poly) -> String {
assert!(modulus.is_irreducible());
let field_size = 1 << modulus.degree().expect("Irreducible polynomial has positive degree");
let mut inv_table = vec![0; field_size as usize];
// Inverse table is small enough to compute directly
for i in 1..field_size {
if inv_table[i as usize] != 0 {
// Already computed inverse
continue;
}
let a = G2Poly(i);
// Returns (gcd, x, y) such that gcd(a, m) == a * x + y * m
// Since we know that gcd(a, m) == 1 and that we operate modulo m, y * m === 0 mod m
// So we have 1 === a * x mod m
let (_gcd, x, _y) = extended_gcd(a, modulus);
inv_table[i as usize] = x.0;
inv_table[x.0 as usize] = i;
}
use std::fmt::Write;
let mut res = String::with_capacity(3 * field_size as usize);
write!(&mut res, "[").unwrap();
for v in inv_table {
write!(&mut res, "{},", v).unwrap();
}
write!(&mut res, "]").unwrap();
res
}
fn generate_mul_impl(ident: syn::Ident, ident_name: &str, modulus: G2Poly, ty: P2TokenStream, field_size: usize, mask: u64) -> (P2TokenStream, P2TokenStream, P2TokenStream) {
let mul_table = generate_mul_table_string(modulus);
let inv_table = generate_inv_table_string(modulus);
// Faster generation than using quote
let mul_table_string: proc_macro2::TokenStream = mul_table.parse().unwrap();
let inv_table_string: proc_macro2::TokenStream = inv_table.parse().unwrap();
let nparts = ceil_log256(field_size);
// NB: We generate static arrays, as they are guaranteed to have a fixed location in memory.
// Using const would mean the compiler is free to create copies on the stack etc. Since
// The arrays are quite large, this could lead to stack overflows.
let tables = quote! {
pub static MUL_TABLE: [[[[#ty; 256]; 256]; #nparts]; #nparts] = #mul_table_string;
pub static INV_TABLE: [#ty; #field_size] = #inv_table_string;
};
let mut mul_ops = Vec::with_capacity(nparts * nparts);
for left in 0..nparts {
for right in 0..nparts {
mul_ops.push(quote![
#ident(MUL_TABLE[#left][#right][(((self.0 & #mask as #ty) >> (8*#left)) & 255) as usize][(((rhs.0 & #mask as #ty) >> (8*#right)) & 255) as usize])
]);
}
}
let mul = quote![
impl ::core::ops::Mul for #ident {
type Output = #ident;
fn mul(self, rhs: #ident) -> #ident {
#(#mul_ops)+*
}
}
impl ::core::ops::MulAssign for #ident {
fn mul_assign(&mut self, rhs: #ident) {
*self = *self * rhs;
}
}
];
let err_msg = format!("Division by 0 in {}", ident_name);
let div = quote![
impl ::core::ops::Div for #ident {
type Output = #ident;
fn div(self, rhs: #ident) -> #ident {
if (rhs.0 & #mask as #ty) == 0 {
panic!(#err_msg);
}
self * #ident(INV_TABLE[(rhs.0 & #mask as #ty) as usize])
}
}
impl ::core::ops::DivAssign for #ident {
fn div_assign(&mut self, rhs: #ident) {
*self = *self / rhs;
}
}
];
(tables, mul, div)
}
impl Settings {
pub fn from_input(input: ParsedInput) -> syn::Result<Self> {
let ident = input.ident;
let ident_name = ident.to_string();
let p_val = input.p.base10_parse()?;
let modulus = match input.modulus {
Some(lit) => G2Poly(lit.base10_parse()?),
None => find_modulus_poly(p_val),
};
if !modulus.is_irreducible() {
Err(syn::Error::new(Span::call_site(), format!("Modulus {} is not irreducible", modulus)))?;
}
let generator = find_generator(modulus);
if !generator.is_generator(modulus) {
Err(syn::Error::new(Span::call_site(), format!("{} is not a generator", generator)))?;
}
Ok(Settings {
ident,
ident_name,
p_val,
modulus,
generator,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_settings_parser() {
let span = Span::call_site();
let input = ParsedInput {
ident: Ident::new("foo", span),
p: syn::LitInt::new("3", span),
modulus: None,
};
let r = Settings::from_input(input);
assert!(r.is_ok());
assert_eq!(r.unwrap(), Settings {
ident: syn::Ident::new("foo", span),
ident_name: "foo".to_string(),
p_val: 3,
modulus: G2Poly(0b1011),
generator: G2Poly(0b10),
});
}
#[test]
fn test_generate_mul_table() {
let m = G2Poly(0b111);
assert_eq!(include_str!("../tests/mul_table.txt").trim(), generate_mul_table_string(m));
}
#[test]
fn test_generate_inv_table_string() {
let m = G2Poly(0b1_0001_1011);
assert_eq!(include_str!("../tests/inv_table.txt").trim(), generate_inv_table_string(m));
}
#[test]
fn test_ceil_log256() {
assert_eq!(0, ceil_log256(0));
assert_eq!(1, ceil_log256(1));
assert_eq!(1, ceil_log256(256));
assert_eq!(2, ceil_log256(257));
assert_eq!(2, ceil_log256(65536));
assert_eq!(3, ceil_log256(65537));
assert_eq!(3, ceil_log256(131072));
assert_eq!(3, ceil_log256(16777216));
assert_eq!(4, ceil_log256(16777217));
}
}
|
extern crate crypto;
extern crate rusqlite;
extern crate chrono;
mod database;
mod shared;
mod budget;
mod transaction;
mod can_access_budget;
mod budget_period;
mod api;
mod util;
mod config;
use actix_web::{web, App, HttpResponse, HttpServer, Responder};
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
use database::*;
use config::Config;
use std::sync::{Mutex};
// Constants
const DB_PATH: &str = "budget.db";
// Shares database connection with all web server workers
struct AppState {
database: Mutex<Database>
}
fn main() {
println!("Budget Tracker server starting...");
// Load config
println!("Loading config...");
let config = Config::load();
println!("Loading database...");
let database = match Database::new(config.secret.clone(), DB_PATH) {
Ok(database) => database,
Err(err) => {
panic!("Error occurred while loading database: {:?}", err);
}
};
let state = web::Data::new(AppState {
database: Mutex::new(database)
});
println!("Loading SSL keys...");
let mut builder =
SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file(&config.ssl_key_path, SslFiletype::PEM)
.unwrap();
builder.set_certificate_chain_file(&config.ssl_cert_path).unwrap();
println!("Starting HTTPS server using address \"{}\"...", &config.binding);
HttpServer::new(move || {
App::new()
.service(api::get_service())
.service(
actix_files::Files::new("/", "public/.")
.index_file("index.html")
)
.register_data(state.clone())
})
.bind_ssl(&config.binding, builder)
.unwrap()
.run()
.unwrap();
}
|
#![allow(clippy::comparison_chain)]
#![allow(clippy::collapsible_if)]
use std::cmp::Reverse;
use std::cmp::{max, min};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::fmt::Debug;
use itertools::Itertools;
use whiteread::parse_line;
const ten97: usize = 1000_000_007;
/// 2の逆元 mod ten97.割りたいときに使う
const inv2ten97: u128 = 500_000_004;
fn main() {
let n: usize = parse_line().unwrap();
let mut nichis: HashMap<usize, isize> = HashMap::new();
for _ in 0..n {
let (a, b): (usize, usize) = parse_line().unwrap();
let e = nichis.entry(a).or_default();
*e += 1;
let e = nichis.entry(a + b).or_default();
*e += -1;
}
// dbg!(&nichis);
let mut logins: Vec<isize> = vec![];
let mut hi = nichis.keys().collect_vec();
hi.sort();
// dbg!(&hi);
let mut now = 0;
for ni in nichis.iter().sorted() {
now += ni.1;
logins.push(now);
}
// dbg!(&logins);
let mut ans = vec![0; n + 1];
for i in 0..logins.len() - 1 {
let l = logins[i];
ans[l as usize] += hi[i + 1] - hi[i];
}
for i in 1..ans.len() - 1 {
print!("{} ", ans[i]);
}
println!("{}", ans.last().unwrap());
}
|
use bulletproofs::r1cs::Variable;
use curve25519_dalek::scalar::Scalar;
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Value {
pub q: u64, // quantity
pub a: Scalar, // issuer
pub t: Scalar, // tag
}
/// Helper struct for ease of working with
/// 3-tuples of variables and assignments
#[derive(Copy, Clone, Debug)]
pub struct AllocatedValue {
pub q: Variable, // quantity
pub a: Variable, // issuer
pub t: Variable, // tag
pub assignment: Option<Value>,
}
/// Represents a variable for quantity, along with its assignment.
#[derive(Copy, Clone, Debug)]
pub struct AllocatedQuantity {
pub variable: Variable,
pub assignment: Option<u64>,
}
impl Value {
/// Returns a zero quantity with a zero flavor.
pub fn zero() -> Value {
Value {
q: 0,
a: Scalar::zero(),
t: Scalar::zero(),
}
}
}
impl AllocatedValue {
/// Returns a quantity variable with its assignment.
pub fn quantity(&self) -> AllocatedQuantity {
AllocatedQuantity {
variable: self.q,
assignment: self.assignment.map(|v| v.q),
}
}
}
|
use crate::types::*;
use neo4rs_macros::BoltStruct;
#[derive(Debug, PartialEq, Clone, BoltStruct)]
#[signature(0xB1, 0x11)]
pub struct Begin {
extra: BoltMap,
}
impl Begin {
pub fn new(extra: BoltMap) -> Begin {
Begin { extra }
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::version::Version;
use bytes::*;
#[test]
fn should_serialize_begin() {
let begin = Begin::new(
vec![("tx_timeout".into(), 2000.into())]
.into_iter()
.collect(),
);
let bytes: Bytes = begin.into_bytes(Version::V4_1).unwrap();
assert_eq!(
bytes,
Bytes::from_static(&[
0xB1,
0x11,
map::TINY | 1,
string::TINY | 10,
b't',
b'x',
b'_',
b't',
b'i',
b'm',
b'e',
b'o',
b'u',
b't',
integer::INT_16,
0x07,
0xD0
])
);
}
}
|
#![deny(warnings)]
#[macro_use]
extern crate futures;
#[macro_use]
extern crate loom;
#[path = "../src/semaphore.rs"]
#[allow(warnings)]
mod semaphore;
use semaphore::*;
use futures::{future, Async, Future, Poll};
use loom::futures::block_on;
use loom::thread;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
#[test]
fn basic_usage() {
const NUM: usize = 2;
struct Actor {
waiter: Permit,
shared: Arc<Shared>,
}
struct Shared {
semaphore: Semaphore,
active: AtomicUsize,
}
impl Future for Actor {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
try_ready!(self
.waiter
.poll_acquire(&self.shared.semaphore)
.map_err(|_| ()));
let actual = self.shared.active.fetch_add(1, SeqCst);
assert!(actual <= NUM - 1);
let actual = self.shared.active.fetch_sub(1, SeqCst);
assert!(actual <= NUM);
self.waiter.release(&self.shared.semaphore);
Ok(Async::Ready(()))
}
}
loom::fuzz(|| {
let shared = Arc::new(Shared {
semaphore: Semaphore::new(NUM),
active: AtomicUsize::new(0),
});
for _ in 0..NUM {
let shared = shared.clone();
thread::spawn(move || {
block_on(Actor {
waiter: Permit::new(),
shared,
})
.unwrap();
});
}
block_on(Actor {
waiter: Permit::new(),
shared,
})
.unwrap();
});
}
#[test]
fn release() {
loom::fuzz(|| {
let semaphore = Arc::new(Semaphore::new(1));
{
let semaphore = semaphore.clone();
thread::spawn(move || {
let mut permit = Permit::new();
block_on(future::lazy(|| {
permit.poll_acquire(&semaphore).unwrap();
Ok::<_, ()>(())
}))
.unwrap();
permit.release(&semaphore);
});
}
let mut permit = Permit::new();
block_on(future::poll_fn(|| permit.poll_acquire(&semaphore))).unwrap();
permit.release(&semaphore);
});
}
#[test]
fn basic_closing() {
const NUM: usize = 2;
loom::fuzz(|| {
let semaphore = Arc::new(Semaphore::new(1));
for _ in 0..NUM {
let semaphore = semaphore.clone();
thread::spawn(move || {
let mut permit = Permit::new();
for _ in 0..2 {
block_on(future::poll_fn(|| {
permit.poll_acquire(&semaphore).map_err(|_| ())
}))?;
permit.release(&semaphore);
}
Ok::<(), ()>(())
});
}
semaphore.close();
});
}
#[test]
fn concurrent_close() {
const NUM: usize = 3;
loom::fuzz(|| {
let semaphore = Arc::new(Semaphore::new(1));
for _ in 0..NUM {
let semaphore = semaphore.clone();
thread::spawn(move || {
let mut permit = Permit::new();
block_on(future::poll_fn(|| {
permit.poll_acquire(&semaphore).map_err(|_| ())
}))?;
permit.release(&semaphore);
semaphore.close();
Ok::<(), ()>(())
});
}
});
}
|
use sgx_types::{
sgx_status_t,
sgx_enclave_id_t,
};
extern {
pub fn run_sample(
eid: sgx_enclave_id_t,
retval: *mut sgx_status_t,
scratch_pad_pointer: *mut u8,
scratch_pad_size: *const u8,
) -> sgx_status_t;
}
|
struct Color {
red: u8,
green: u8,
blue: u8
}
fn main() {
let _a: Vec<i32> = Vec::new();
let mut b: Vec<i32> = vec![5, 4, 3, 2, 1, 0];
b.push(523);
println!("{:?}", b);
let mut e: Vec<i32> = Vec::with_capacity(10);
println!("length: {}, capacity: {}", e.len(), e.capacity());
for i in 0..10 {
e.push(i);
}
println!("length: {}, capacity: {}", e.len(), e.capacity());
e.push(11);
println!("length: {}, capacity: {}", e.len(), e.capacity());
let x = e.pop();
println!("x = {}, length: {}, capacity: {}", x.unwrap(), e.len(), e.capacity());
let mut v = vec![1,2 ,23, 43,243,423];
for i in &v {
println!("A reference to {}", i);
}
for i in &mut v {
println!("A mutable reference to {}", i);
}
for i in v {
println!("Take ownership of the vector and its element {}", i);
}
let black = Color { red: 0, green: 0, blue: 0 };
println!("Black = rgb({}, {}, {})", black.red, black.green, black.blue);
let Color { red: r, green: g, blue: b } = get_midnightblue_color();
println!("Midnight blue = rgb({}, {}, {})", r, g, b);
}
fn get_midnightblue_color() -> Color {
Color {red: 25, green: 25, blue: 112 }
} |
#![feature(
plugin,
test,
const_slice_len,
never_type,
alloc_layout_extra,
try_from,
try_trait,
bind_by_move_pattern_guards,
fnbox,
copysign
)]
#![plugin(dynasm)]
extern crate test;
#[macro_use]
extern crate smallvec;
extern crate capstone;
extern crate either;
extern crate failure;
pub extern crate wasmparser;
#[macro_use]
extern crate failure_derive;
#[macro_use]
extern crate memoffset;
extern crate dynasmrt;
extern crate itertools;
#[cfg(test)]
#[macro_use]
extern crate lazy_static;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
extern crate wabt;
// Just so we can implement `Signature` for `cranelift_codegen::ir::Signature`
extern crate cranelift_codegen;
extern crate multi_mut;
mod backend;
mod disassemble;
mod error;
mod function_body;
mod microwasm;
mod module;
mod translate_sections;
#[cfg(test)]
mod tests;
pub use crate::backend::CodeGenSession;
pub use crate::function_body::translate_wasm as translate_function;
pub use crate::module::{translate, ExecutableModule, ModuleContext, Signature, TranslatedModule};
|
use url::Url;
use vostok_core::request::Request;
#[derive(Debug)]
pub struct GeminiRequest {
url: Url,
}
impl Request for GeminiRequest {
fn path(&self) -> &str {
match self.url.path() {
"" => "/",
s => s,
}
}
}
impl GeminiRequest {
pub fn from_line(line: &str) -> Option<Self> {
let url = Url::parse(line).ok()?;
Some(GeminiRequest { url })
}
}
|
#[path = "spawn_3/with_atom_module.rs"]
mod with_atom_module;
// `without_atom_module_errors_badarg` in unit tests
|
#![cfg_attr(
all(not(debug_assertions), target_os = "windows"),
windows_subsystem = "windows"
)]
#[tauri::command]
fn say_hello(name: String) -> String {
let response = format!("Hello, {}!", name);
println!("{:?}", response);
response.into()
}
use tauri::Manager;
#[derive(Clone, serde::Serialize)]
struct Payload {
message: String,
}
fn main() {
println!("** Tauri app about to start");
tauri::Builder::default()
.setup(|app| {
// listen to the `event-name` (emitted on any window)
let id = app.listen_global("user-click", |event| {
println!("got event with payload {:?}", event.payload());
});
// unlisten to the event using the `id` returned on the `listen_global` function
// an `once_global` API is also exposed on the `App` struct
app.unlisten(id);
// emit the `event-name` event to all webview windows on the frontend
app.emit_all(
"user-click",
Payload {
message: "Tauri is awesome! (coming from back-end)".into(),
},
)
.unwrap();
Ok(())
})
.invoke_handler(tauri::generate_handler![say_hello,])
.run(tauri::generate_context!())
.expect("error while running tauri application");
}
|
// Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg(not(tarpaulin_include))]
use crate::pipeline;
use crate::sink::prelude::*;
use crate::url::TremorUrl;
use async_channel::Sender;
use halfbrown::HashMap;
pub(crate) mod amqp;
pub(crate) mod blackhole;
pub(crate) mod cb;
pub(crate) mod debug;
pub(crate) mod dns;
pub(crate) mod elastic;
pub(crate) mod exit;
pub(crate) mod file;
pub(crate) mod gcs;
pub(crate) mod gpub;
pub(crate) mod kafka;
pub(crate) mod kv;
pub(crate) mod nats;
pub(crate) mod newrelic;
pub(crate) mod otel;
pub(crate) mod postgres;
pub(crate) mod prelude;
pub(crate) mod rest;
pub(crate) mod stderr;
pub(crate) mod stdout;
pub(crate) mod tcp;
pub(crate) mod udp;
pub(crate) mod ws;
#[derive(Debug)]
pub enum Reply {
Insight(Event),
// out port and the event
// TODO better name for this?
Response(Cow<'static, str>, Event),
}
/// Result for a sink function that may provide insights or response.
///
/// It can return None or Some(vec![]) if no insights/response were generated.
///
/// An insight is a contraflowevent containing control information for the runtime like
/// circuit breaker events, guaranteed delivery events, etc.
///
/// A response is an event generated from the sink delivery.
pub(crate) type ResultVec = Result<Option<Vec<Reply>>>;
#[async_trait::async_trait]
pub(crate) trait Sink {
/// Handles an incoming event.
///
/// ## Error handling
///
/// The circuit-breaker (CB) and guaranteed deliver (GD) mechanics require this function to exhibit certain behaviour:
/// if `auto_ack()` returns `false`:
/// * This function should catch __ALL__ errors and send appropriate insights in the returned `ResultVec` (or via the `reply_channel` it received in `init`).
/// * For returned `Err()`s, no insight will be sent, this violates the GD requirements for some upstream onramps/operators and will lead to sneaky bugs. Do not do that!
///
/// if `auto_ack()` returns `true`:
/// * Errors can be bubbled up from this function using `?`, if the event requires GD events (`event.transactional == true`) the `OfframpManager` will take care of this.
/// * CB events like `trigger` or `restore` need to be sent via the `ResultVec` or `reply_channel`.
async fn on_event(
&mut self,
input: &str,
codec: &mut dyn Codec,
codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec;
async fn on_signal(&mut self, signal: Event) -> ResultVec;
/// This function should be implemented to be idempotent
///
/// The passed reply_channel is for fast-tracking sink-replies going back to the connected pipelines.
/// It is an additional way to returning them in a ResultVec via on_event, on_signal.
#[allow(clippy::too_many_arguments)]
async fn init(
&mut self,
sink_uid: u64,
sink_url: &TremorUrl,
codec: &dyn Codec,
codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()>;
// this empty function passed manual inspect, it is bug free
#[cfg(not(tarpaulin_include))]
/// Callback for graceful shutdown (default behaviour: do nothing)
async fn terminate(&mut self) {}
/// Is the sink active and ready to process events
fn is_active(&self) -> bool;
/// Is the sink automatically acknowledging events or engaged in some form of delivery
/// guarantee
///
/// If this sink returns `false` here, it needs to adhere to the following protocol for CB ack/fail insights:
/// send one `ack` or `fail` CB insight for each incoming event if `event.transaction == true`.
/// Otherwise dont send any.
/// For `ack` insights include a `time` field in the insight metadata with duration it took for handling the event in milliseconds, if it makes sense.
fn auto_ack(&self) -> bool;
fn default_codec(&self) -> &str;
}
pub(crate) struct SinkManager<T>
where
T: Sink,
{
sink_url: Option<TremorUrl>,
sink: T,
pipelines: HashMap<TremorUrl, pipeline::Addr>,
// for linked offramps
dest_pipelines: HashMap<Cow<'static, str>, Vec<(TremorUrl, pipeline::Addr)>>,
}
impl<T> SinkManager<T>
where
T: Sink + Send,
{
fn new(sink: T) -> Self {
Self {
sink_url: None,
sink,
pipelines: HashMap::new(),
dest_pipelines: HashMap::new(),
}
}
fn new_box(sink: T) -> Box<Self> {
Box::new(Self::new(sink))
}
fn has_dest_pipelines(&self) -> bool {
self.dest_pipelines.values().any(|xs| !xs.is_empty())
}
}
#[async_trait::async_trait]
impl<T> Offramp for SinkManager<T>
where
T: Sink + Send,
{
async fn terminate(&mut self) {
self.sink.terminate().await;
}
#[allow(clippy::too_many_arguments)]
async fn start(
&mut self,
offramp_uid: u64,
offramp_url: &TremorUrl,
codec: &dyn Codec,
codec_map: &HashMap<String, Box<dyn Codec>>,
processors: Processors<'_>,
is_linked: bool,
reply_channel: Sender<Reply>,
) -> Result<()> {
self.sink_url = Some(offramp_url.clone());
self.sink
.init(
offramp_uid, // we treat offramp_uid and sink_uid as the same thing
offramp_url,
codec,
codec_map,
processors,
is_linked,
reply_channel,
)
.await
}
async fn on_event(
&mut self,
codec: &mut dyn Codec,
codec_map: &HashMap<String, Box<dyn Codec>>,
input: &str,
event: Event,
) -> Result<()> {
if let Some(mut replies) = self.sink.on_event(input, codec, codec_map, event).await? {
for reply in replies.drain(..) {
match reply {
Reply::Insight(e) => handle_insight(e, self.pipelines.values()).await?,
Reply::Response(port, event) => {
if let Some(pipelines) = self.dest_pipelines.get_mut(&port) {
handle_response(event, pipelines.iter()).await?;
}
}
}
}
}
Ok(())
}
fn default_codec(&self) -> &str {
self.sink.default_codec()
}
fn add_pipeline(&mut self, id: TremorUrl, addr: pipeline::Addr) {
self.pipelines.insert(id, addr);
}
fn add_dest_pipeline(&mut self, port: Cow<'static, str>, id: TremorUrl, addr: pipeline::Addr) {
let p = (id, addr);
if let Some(port_ps) = self.dest_pipelines.get_mut(&port) {
port_ps.push(p);
} else {
self.dest_pipelines.insert(port, vec![p]);
}
}
fn remove_pipeline(&mut self, id: TremorUrl) -> bool {
self.pipelines.remove(&id);
self.pipelines.is_empty() && !self.has_dest_pipelines()
}
fn remove_dest_pipeline(&mut self, port: Cow<'static, str>, id: TremorUrl) -> bool {
if let Some(port_ps) = self.dest_pipelines.get_mut(&port) {
port_ps.retain(|(url, _)| url != &id);
}
self.pipelines.is_empty() && !self.has_dest_pipelines()
}
async fn on_signal(&mut self, signal: Event) -> Option<Event> {
let replies = match self.sink.on_signal(signal).await {
Ok(results) => results?,
Err(e) => {
if let Some(sink_url) = &self.sink_url {
error!("[Sink::{}] Error processing signal: {}", sink_url, e);
}
return None;
}
};
for reply in replies {
match reply {
Reply::Insight(e) => {
if let Err(e) = handle_insight(e, self.pipelines.values()).await {
if let Some(sink_url) = &self.sink_url {
error!("[Sink::{}] Error handling insight in sink: {}", sink_url, e);
}
}
}
Reply::Response(port, event) => {
if let Some(pipelines) = self.dest_pipelines.get_mut(&port) {
if let Err(e) = handle_response(event, pipelines.iter()).await {
if let Some(sink_url) = &self.sink_url {
error!(
"[Sink::{}] Error handling response in sink: {}",
sink_url, e
);
}
}
}
}
}
}
None
}
fn is_active(&self) -> bool {
self.sink.is_active()
}
fn auto_ack(&self) -> bool {
self.sink.auto_ack()
}
}
/// we explicitly do not fail upon send errors, just log errors
pub(crate) async fn handle_insight<'iter, T>(insight: Event, mut pipelines: T) -> Result<()>
where
T: Iterator<Item = &'iter pipeline::Addr>,
{
if let Some(first) = pipelines.next() {
for p in pipelines {
if let Err(e) = p.send_insight(insight.clone()).await {
// TODO: is this wanted to not raise the error here?
error!("Error: {}", e);
};
}
if let Err(e) = first.send_insight(insight).await {
error!("Error: {}", e);
};
}
Ok(())
}
/// handle response back from sink e.g. in linked transport case
///
/// we explicitly do not fail upon send errors, just log errors
pub(crate) async fn handle_response<'iter, T>(response: Event, mut pipelines: T) -> Result<()>
where
T: Iterator<Item = &'iter (TremorUrl, pipeline::Addr)>,
{
if let Some((first_id, first_addr)) = pipelines.next() {
for (id, addr) in pipelines {
// TODO alt way here?
// pre-save this already in dest_pipelines?
let port = id.instance_port_required()?.to_owned();
if let Err(e) = addr
.send(pipeline::Msg::Event {
event: response.clone(),
input: port.into(),
})
.await
{
error!("Error: {}", e);
};
}
let first_port = first_id.instance_port_required()?.to_owned();
if let Err(e) = first_addr
.send(pipeline::Msg::Event {
event: response,
input: first_port.into(),
})
.await
{
error!("Error: {}", e);
};
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::pipeline::Msg;
#[async_std::test]
async fn test_send() -> Result<()> {
let e = Event::default();
let (t11, r11) = async_channel::unbounded();
let (t12, r12) = async_channel::unbounded();
let (t13, r13) = async_channel::unbounded();
let p1 = pipeline::Addr::new(
t11,
t12,
t13,
TremorUrl::parse("tremor://host/pipeline/name1/instance1/port1")?,
);
let (t21, r21) = async_channel::unbounded();
let (t22, r22) = async_channel::unbounded();
let (t23, r23) = async_channel::unbounded();
let p2 = pipeline::Addr::new(
t21,
t22,
t23,
TremorUrl::parse("tremor://host/pipeline/name2/instance2/port2")?,
);
let p = vec![(p1.id().clone(), p1), (p2.id().clone(), p2)];
handle_response(e.clone(), p.iter()).await?;
if let Msg::Event { event, input } = r11.recv().await? {
assert_eq!(event, e);
assert_eq!(input, "port1");
} else {
panic!("not an event");
}
assert!(r11.is_empty());
assert!(r12.is_empty());
assert!(r13.is_empty());
if let Msg::Event { event, input } = r21.recv().await? {
assert_eq!(event, e);
assert_eq!(input, "port2");
} else {
panic!("not an event");
}
assert!(r21.is_empty());
assert!(r22.is_empty());
assert!(r23.is_empty());
Ok(())
}
}
|
pub type IAction = *mut ::core::ffi::c_void;
pub type IActionCollection = *mut ::core::ffi::c_void;
pub type IBootTrigger = *mut ::core::ffi::c_void;
pub type IComHandlerAction = *mut ::core::ffi::c_void;
pub type IDailyTrigger = *mut ::core::ffi::c_void;
pub type IEmailAction = *mut ::core::ffi::c_void;
pub type IEnumWorkItems = *mut ::core::ffi::c_void;
pub type IEventTrigger = *mut ::core::ffi::c_void;
pub type IExecAction = *mut ::core::ffi::c_void;
pub type IExecAction2 = *mut ::core::ffi::c_void;
pub type IIdleSettings = *mut ::core::ffi::c_void;
pub type IIdleTrigger = *mut ::core::ffi::c_void;
pub type ILogonTrigger = *mut ::core::ffi::c_void;
pub type IMaintenanceSettings = *mut ::core::ffi::c_void;
pub type IMonthlyDOWTrigger = *mut ::core::ffi::c_void;
pub type IMonthlyTrigger = *mut ::core::ffi::c_void;
pub type INetworkSettings = *mut ::core::ffi::c_void;
pub type IPrincipal = *mut ::core::ffi::c_void;
pub type IPrincipal2 = *mut ::core::ffi::c_void;
pub type IProvideTaskPage = *mut ::core::ffi::c_void;
pub type IRegisteredTask = *mut ::core::ffi::c_void;
pub type IRegisteredTaskCollection = *mut ::core::ffi::c_void;
pub type IRegistrationInfo = *mut ::core::ffi::c_void;
pub type IRegistrationTrigger = *mut ::core::ffi::c_void;
pub type IRepetitionPattern = *mut ::core::ffi::c_void;
pub type IRunningTask = *mut ::core::ffi::c_void;
pub type IRunningTaskCollection = *mut ::core::ffi::c_void;
pub type IScheduledWorkItem = *mut ::core::ffi::c_void;
pub type ISessionStateChangeTrigger = *mut ::core::ffi::c_void;
pub type IShowMessageAction = *mut ::core::ffi::c_void;
pub type ITask = *mut ::core::ffi::c_void;
pub type ITaskDefinition = *mut ::core::ffi::c_void;
pub type ITaskFolder = *mut ::core::ffi::c_void;
pub type ITaskFolderCollection = *mut ::core::ffi::c_void;
pub type ITaskHandler = *mut ::core::ffi::c_void;
pub type ITaskHandlerStatus = *mut ::core::ffi::c_void;
pub type ITaskNamedValueCollection = *mut ::core::ffi::c_void;
pub type ITaskNamedValuePair = *mut ::core::ffi::c_void;
pub type ITaskScheduler = *mut ::core::ffi::c_void;
pub type ITaskService = *mut ::core::ffi::c_void;
pub type ITaskSettings = *mut ::core::ffi::c_void;
pub type ITaskSettings2 = *mut ::core::ffi::c_void;
pub type ITaskSettings3 = *mut ::core::ffi::c_void;
pub type ITaskTrigger = *mut ::core::ffi::c_void;
pub type ITaskVariables = *mut ::core::ffi::c_void;
pub type ITimeTrigger = *mut ::core::ffi::c_void;
pub type ITrigger = *mut ::core::ffi::c_void;
pub type ITriggerCollection = *mut ::core::ffi::c_void;
pub type IWeeklyTrigger = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const CLSID_CTask: ::windows_sys::core::GUID = ::windows_sys::core::GUID::from_u128(0x148bd520_a2ab_11ce_b11f_00aa00530503);
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const CLSID_CTaskScheduler: ::windows_sys::core::GUID = ::windows_sys::core::GUID::from_u128(0x148bd52a_a2ab_11ce_b11f_00aa00530503);
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_APRIL: u32 = 8u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_AUGUST: u32 = 128u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_DECEMBER: u32 = 2048u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FEBRUARY: u32 = 2u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FIRST_WEEK: u32 = 1u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_DELETE_WHEN_DONE: u32 = 2u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_DISABLED: u32 = 4u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_DONT_START_IF_ON_BATTERIES: u32 = 64u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_HIDDEN: u32 = 512u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_INTERACTIVE: u32 = 1u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_KILL_IF_GOING_ON_BATTERIES: u32 = 128u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_KILL_ON_IDLE_END: u32 = 32u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_RESTART_ON_IDLE_RESUME: u32 = 2048u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_RUN_IF_CONNECTED_TO_INTERNET: u32 = 1024u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_RUN_ONLY_IF_DOCKED: u32 = 256u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_RUN_ONLY_IF_LOGGED_ON: u32 = 8192u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_START_ONLY_IF_IDLE: u32 = 16u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FLAG_SYSTEM_REQUIRED: u32 = 4096u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FOURTH_WEEK: u32 = 4u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_FRIDAY: u32 = 32u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_JANUARY: u32 = 1u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_JULY: u32 = 64u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_JUNE: u32 = 32u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_LAST_WEEK: u32 = 5u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_MARCH: u32 = 4u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_MAX_RUN_TIMES: u32 = 1440u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_MAY: u32 = 16u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_MONDAY: u32 = 2u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_NOVEMBER: u32 = 1024u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_OCTOBER: u32 = 512u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_SATURDAY: u32 = 64u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_SECOND_WEEK: u32 = 2u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_SEPTEMBER: u32 = 256u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_SUNDAY: u32 = 1u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_THIRD_WEEK: u32 = 3u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_THURSDAY: u32 = 16u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_FLAG_DISABLED: u32 = 4u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_FLAG_HAS_END_DATE: u32 = 1u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_FLAG_KILL_AT_DURATION_END: u32 = 2u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TUESDAY: u32 = 4u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_WEDNESDAY: u32 = 8u32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TaskHandlerPS: ::windows_sys::core::GUID = ::windows_sys::core::GUID::from_u128(0xf2a69db7_da2c_4352_9066_86fee6dacac9);
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TaskHandlerStatusPS: ::windows_sys::core::GUID = ::windows_sys::core::GUID::from_u128(0x9f15266d_d7ba_48f0_93c1_e6895f6fe5ac);
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TaskScheduler: ::windows_sys::core::GUID = ::windows_sys::core::GUID::from_u128(0x0f87369f_a4e5_4cfc_bd3e_73e6154572dd);
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASKPAGE = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASKPAGE_TASK: TASKPAGE = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASKPAGE_SCHEDULE: TASKPAGE = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASKPAGE_SETTINGS: TASKPAGE = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_ACTION_TYPE = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_ACTION_EXEC: TASK_ACTION_TYPE = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_ACTION_COM_HANDLER: TASK_ACTION_TYPE = 5i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_ACTION_SEND_EMAIL: TASK_ACTION_TYPE = 6i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_ACTION_SHOW_MESSAGE: TASK_ACTION_TYPE = 7i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_COMPATIBILITY = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_COMPATIBILITY_AT: TASK_COMPATIBILITY = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_COMPATIBILITY_V1: TASK_COMPATIBILITY = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_COMPATIBILITY_V2: TASK_COMPATIBILITY = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_COMPATIBILITY_V2_1: TASK_COMPATIBILITY = 3i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_COMPATIBILITY_V2_2: TASK_COMPATIBILITY = 4i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_COMPATIBILITY_V2_3: TASK_COMPATIBILITY = 5i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_COMPATIBILITY_V2_4: TASK_COMPATIBILITY = 6i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_CREATION = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_VALIDATE_ONLY: TASK_CREATION = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_CREATE: TASK_CREATION = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_UPDATE: TASK_CREATION = 4i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_CREATE_OR_UPDATE: TASK_CREATION = 6i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_DISABLE: TASK_CREATION = 8i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_DONT_ADD_PRINCIPAL_ACE: TASK_CREATION = 16i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_IGNORE_REGISTRATION_TRIGGERS: TASK_CREATION = 32i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_ENUM_FLAGS = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_ENUM_HIDDEN: TASK_ENUM_FLAGS = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_INSTANCES_POLICY = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_INSTANCES_PARALLEL: TASK_INSTANCES_POLICY = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_INSTANCES_QUEUE: TASK_INSTANCES_POLICY = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_INSTANCES_IGNORE_NEW: TASK_INSTANCES_POLICY = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_INSTANCES_STOP_EXISTING: TASK_INSTANCES_POLICY = 3i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_LOGON_TYPE = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_LOGON_NONE: TASK_LOGON_TYPE = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_LOGON_PASSWORD: TASK_LOGON_TYPE = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_LOGON_S4U: TASK_LOGON_TYPE = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_LOGON_INTERACTIVE_TOKEN: TASK_LOGON_TYPE = 3i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_LOGON_GROUP: TASK_LOGON_TYPE = 4i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_LOGON_SERVICE_ACCOUNT: TASK_LOGON_TYPE = 5i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_LOGON_INTERACTIVE_TOKEN_OR_PASSWORD: TASK_LOGON_TYPE = 6i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_PROCESSTOKENSID_TYPE = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_PROCESSTOKENSID_NONE: TASK_PROCESSTOKENSID_TYPE = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_PROCESSTOKENSID_UNRESTRICTED: TASK_PROCESSTOKENSID_TYPE = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_PROCESSTOKENSID_DEFAULT: TASK_PROCESSTOKENSID_TYPE = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_RUNLEVEL_TYPE = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_RUNLEVEL_LUA: TASK_RUNLEVEL_TYPE = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_RUNLEVEL_HIGHEST: TASK_RUNLEVEL_TYPE = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_RUN_FLAGS = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_RUN_NO_FLAGS: TASK_RUN_FLAGS = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_RUN_AS_SELF: TASK_RUN_FLAGS = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_RUN_IGNORE_CONSTRAINTS: TASK_RUN_FLAGS = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_RUN_USE_SESSION_ID: TASK_RUN_FLAGS = 4i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_RUN_USER_SID: TASK_RUN_FLAGS = 8i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_SESSION_STATE_CHANGE_TYPE = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_CONSOLE_CONNECT: TASK_SESSION_STATE_CHANGE_TYPE = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_CONSOLE_DISCONNECT: TASK_SESSION_STATE_CHANGE_TYPE = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_REMOTE_CONNECT: TASK_SESSION_STATE_CHANGE_TYPE = 3i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_REMOTE_DISCONNECT: TASK_SESSION_STATE_CHANGE_TYPE = 4i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_SESSION_LOCK: TASK_SESSION_STATE_CHANGE_TYPE = 7i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_SESSION_UNLOCK: TASK_SESSION_STATE_CHANGE_TYPE = 8i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_STATE = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_STATE_UNKNOWN: TASK_STATE = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_STATE_DISABLED: TASK_STATE = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_STATE_QUEUED: TASK_STATE = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_STATE_READY: TASK_STATE = 3i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_STATE_RUNNING: TASK_STATE = 4i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_TRIGGER_TYPE = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TIME_TRIGGER_ONCE: TASK_TRIGGER_TYPE = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TIME_TRIGGER_DAILY: TASK_TRIGGER_TYPE = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TIME_TRIGGER_WEEKLY: TASK_TRIGGER_TYPE = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TIME_TRIGGER_MONTHLYDATE: TASK_TRIGGER_TYPE = 3i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TIME_TRIGGER_MONTHLYDOW: TASK_TRIGGER_TYPE = 4i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_EVENT_TRIGGER_ON_IDLE: TASK_TRIGGER_TYPE = 5i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_EVENT_TRIGGER_AT_SYSTEMSTART: TASK_TRIGGER_TYPE = 6i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_EVENT_TRIGGER_AT_LOGON: TASK_TRIGGER_TYPE = 7i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub type TASK_TRIGGER_TYPE2 = i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_EVENT: TASK_TRIGGER_TYPE2 = 0i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_TIME: TASK_TRIGGER_TYPE2 = 1i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_DAILY: TASK_TRIGGER_TYPE2 = 2i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_WEEKLY: TASK_TRIGGER_TYPE2 = 3i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_MONTHLY: TASK_TRIGGER_TYPE2 = 4i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_MONTHLYDOW: TASK_TRIGGER_TYPE2 = 5i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_IDLE: TASK_TRIGGER_TYPE2 = 6i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_REGISTRATION: TASK_TRIGGER_TYPE2 = 7i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_BOOT: TASK_TRIGGER_TYPE2 = 8i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_LOGON: TASK_TRIGGER_TYPE2 = 9i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_SESSION_STATE_CHANGE: TASK_TRIGGER_TYPE2 = 11i32;
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub const TASK_TRIGGER_CUSTOM_TRIGGER_01: TASK_TRIGGER_TYPE2 = 12i32;
#[repr(C)]
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub struct DAILY {
pub DaysInterval: u16,
}
impl ::core::marker::Copy for DAILY {}
impl ::core::clone::Clone for DAILY {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub struct MONTHLYDATE {
pub rgfDays: u32,
pub rgfMonths: u16,
}
impl ::core::marker::Copy for MONTHLYDATE {}
impl ::core::clone::Clone for MONTHLYDATE {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub struct MONTHLYDOW {
pub wWhichWeek: u16,
pub rgfDaysOfTheWeek: u16,
pub rgfMonths: u16,
}
impl ::core::marker::Copy for MONTHLYDOW {}
impl ::core::clone::Clone for MONTHLYDOW {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub struct TASK_TRIGGER {
pub cbTriggerSize: u16,
pub Reserved1: u16,
pub wBeginYear: u16,
pub wBeginMonth: u16,
pub wBeginDay: u16,
pub wEndYear: u16,
pub wEndMonth: u16,
pub wEndDay: u16,
pub wStartHour: u16,
pub wStartMinute: u16,
pub MinutesDuration: u32,
pub MinutesInterval: u32,
pub rgFlags: u32,
pub TriggerType: TASK_TRIGGER_TYPE,
pub Type: TRIGGER_TYPE_UNION,
pub Reserved2: u16,
pub wRandomMinutesInterval: u16,
}
impl ::core::marker::Copy for TASK_TRIGGER {}
impl ::core::clone::Clone for TASK_TRIGGER {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub union TRIGGER_TYPE_UNION {
pub Daily: DAILY,
pub Weekly: WEEKLY,
pub MonthlyDate: MONTHLYDATE,
pub MonthlyDOW: MONTHLYDOW,
}
impl ::core::marker::Copy for TRIGGER_TYPE_UNION {}
impl ::core::clone::Clone for TRIGGER_TYPE_UNION {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
#[doc = "*Required features: `\"Win32_System_TaskScheduler\"`*"]
pub struct WEEKLY {
pub WeeksInterval: u16,
pub rgfDaysOfTheWeek: u16,
}
impl ::core::marker::Copy for WEEKLY {}
impl ::core::clone::Clone for WEEKLY {
fn clone(&self) -> Self {
*self
}
}
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::common_operations::allowed_ops,
crate::target::{AvailableTargets, TargetOps},
clap::{App, Arg},
failure::Fail,
log::error,
std::ops::RangeInclusive,
};
#[derive(Debug, Fail, PartialEq)]
pub enum Error {
#[fail(display = "Operation not supported for the target.")]
OperationNotSupported,
}
#[derive(Debug)]
pub struct ParseArgs {
/// Limit on number of outstanding IOs - IOs that are generated but are not
/// complete.
pub queue_depth: usize,
/// odu writes a header at the beginning of each block boundary. When not
/// passed as a parameter an attempt may be made to guess the block size. On
/// failure to get block size, a default block size is chosen. This helps
/// odu to verify the success of the operation. See also `align` and
/// `max_io_size`.
pub block_size: u64,
/// Maximum size of the IO issued. This parameter specifies the number of
/// bytes read/written from/to the `target` in one operation.
pub max_io_size: u64,
/// If set to true, IOs are aligned to `block_size`. If set to false, a
/// random offset is chosen to issue IOs.
pub align: bool,
/// Number of IO operations to generate. This number does not include IOs
/// issued to verify.
pub max_io_count: u64,
/// IOs are issued on the `target` for a range of offset between
/// [0, `target_length`). The bytes from offset `target_length` till end of
/// target are not to operated on.
pub target_length: u64,
/// Configures number of IO issuing threads. IO issuing threads are
/// generally not cpu bound. There may be more threads in the process to
/// generate load and to verify the IO.
pub thread_count: usize,
/// Specifies how a `target` is opened, what type of IO functions are called
/// and how completions of those IOs will be delivered. For example fdio
/// might call posix-like pwrite, pread, etc. but blob target may have
/// completely different restrictions on issuing IOs.
pub target_type: AvailableTargets,
/// These are the set of operations for which generator will generate
/// io packets. These operation performance is what user is interested
/// in.
pub operations: TargetOps,
/// When true, the `target` access (read/write) are sequential with respect
/// to offsets within the `target`.
pub sequential: bool,
/// Parameters passed to odu gets written to `output_config_file`.
pub output_config_file: String,
/// A `target` can be a path in filesystem, a hash in blobfs, a path in
/// device tree, or a named pipe. These are pre-existing targets that have
/// a non-zero length. IOs are performed only on these targets. All threads
/// get exclusive access to certain parts of the `target`.
pub target: String,
}
const KIB: u64 = 1024;
const MIB: u64 = KIB * KIB;
// TODO(auradkar): Some of the default values/ranges are intentionally set low
// so that the tool runs for shorter duration.
// And some of the defaults and ranges have arbitrarily values.
// We need to come up with better ones.
const QUEUE_DEPTH_RANGE: RangeInclusive<usize> = 1..=256;
const QUEUE_DEPTH_DEFAULT: usize = 40;
const BLOCK_SIZE_RANGE: RangeInclusive<u64> = 1..=MIB;
const BLOCK_SIZE_DEFAULT: u64 = 8 * KIB;
const MAX_IO_SIZE_RANGE: RangeInclusive<u64> = 1..=MIB;
const MAX_IO_SIZE_DEFAULT: u64 = MIB;
const ALIGN_DEFAULT: bool = true;
const MAX_IO_COUNT_RANGE: RangeInclusive<u64> = 1..=2_000_000;
const MAX_IO_COUNT_DEFAULT: u64 = 1000;
const TARGET_SIZE_DEFAULT: u64 = 20 * MIB;
const THREAD_COUNT_RANGE: RangeInclusive<usize> = 1..=16;
const THREAD_COUNT_DEFAULT: usize = 3;
const TARGET_OPERATIONS_DEFAULT: &str = "write";
const TARGET_TYPE_DEFAULT: AvailableTargets = AvailableTargets::FileTarget;
const SEQUENTIAL_DEFAULT: bool = true;
const OUTPUT_CONFIG_FILE_DEFAULT: &str = "/tmp/output.config";
fn to_string_min_max<T: std::fmt::Debug>(val: RangeInclusive<T>) -> String {
format!("Min:{:?} Max:{:?}", val.start(), val.end())
}
fn validate_range<T: std::str::FromStr + std::cmp::PartialOrd>(
key: &str,
val: String,
range: RangeInclusive<T>,
) -> Result<(), String> {
let arg =
val.parse::<T>().map_err(|_| format!("{} expects a number. Found \"{}\"", key, val))?;
if !range.contains(&arg) {
return Err(format!(" {} value {} out of range.", key, val));
}
Ok(())
}
fn queue_depth_validator(val: String) -> Result<(), String> {
validate_range("queue_depth", val, QUEUE_DEPTH_RANGE)
}
fn block_size_validator(val: String) -> Result<(), String> {
validate_range("block_size", val, BLOCK_SIZE_RANGE)
}
fn max_io_size_validator(val: String) -> Result<(), String> {
validate_range("max_io_size", val, MAX_IO_SIZE_RANGE)
}
fn max_io_count_validator(val: String) -> Result<(), String> {
validate_range("max_io_count", val, MAX_IO_COUNT_RANGE)
}
fn thread_count_validator(val: String) -> Result<(), String> {
validate_range("thread_count", val, THREAD_COUNT_RANGE)
}
fn target_operations_validator(
target_type: AvailableTargets,
operations: &Vec<&str>,
) -> Result<TargetOps, Error> {
// Get the operations allowed by the target and see if the operations requested
// is subset of the operations allowed.
let allowed_ops = allowed_ops(target_type);
let mut ops = TargetOps { write: false, open: false };
for value in operations {
if !allowed_ops.enabled(value) {
error!(
"{:?} is not allowed for target: {}",
value,
AvailableTargets::value_to_friendly_name(target_type)
);
error!(
"For target: {}, supported operations are {:?}",
AvailableTargets::value_to_friendly_name(target_type),
allowed_ops.enabled_operation_names()
);
return Err(Error::OperationNotSupported);
} else {
ops.enable(value, true).unwrap();
}
}
return Ok(ops);
}
pub fn parse() -> Result<ParseArgs, Error> {
let queue_depth_default_str = &format!("{}", QUEUE_DEPTH_DEFAULT);
let block_size_default_str = &format!("{}", BLOCK_SIZE_DEFAULT);
let max_io_size_default_str = &format!("{}", MAX_IO_SIZE_DEFAULT);
let align_default_str = &format!("{}", ALIGN_DEFAULT);
let max_io_count_default_str = &format!("{}", MAX_IO_COUNT_DEFAULT);
let target_size_default_str = &format!("{}", TARGET_SIZE_DEFAULT);
let thread_count_default_str = &format!("{}", THREAD_COUNT_DEFAULT);
let target_operations_default_str = &format!("{}", TARGET_OPERATIONS_DEFAULT);
let target_type_default_str =
&format!("{}", AvailableTargets::value_to_friendly_name(TARGET_TYPE_DEFAULT));
let sequential_default_str = &format!("{}", SEQUENTIAL_DEFAULT);
let output_config_file_default_str = &format!("{}", OUTPUT_CONFIG_FILE_DEFAULT);
let matches = App::new("odu")
// TODO: We cannot get package version through `CARGO_PKG_VERSION`.
// Find out a way.
.version("0.1.0")
.about("IO benchmarking library and utility")
.arg(
Arg::with_name("queue_depth")
.short("q")
.long("queue_depth")
.value_name(&to_string_min_max(QUEUE_DEPTH_RANGE))
.default_value(queue_depth_default_str)
.validator(queue_depth_validator)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("block_size")
.short("b")
.long("block_size")
.value_name(&to_string_min_max(BLOCK_SIZE_RANGE))
.default_value(&block_size_default_str)
.validator(block_size_validator)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("max_io_size")
.short("i")
.long("max_io_size")
.value_name(&to_string_min_max(MAX_IO_SIZE_RANGE))
.default_value(&max_io_size_default_str)
.validator(max_io_size_validator)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("align")
.short("a")
.long("align")
.possible_values(&["true", "false"])
.default_value(&align_default_str)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("max_io_count")
.short("c")
.long("max_io_count")
.value_name(&to_string_min_max(MAX_IO_COUNT_RANGE))
.default_value(&max_io_count_default_str)
.validator(max_io_count_validator)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("target_length")
.short("l")
.long("target_length")
.value_name(&to_string_min_max(0..=std::u64::MAX))
.default_value(&target_size_default_str)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("thread_count")
.short("d")
.long("thread_count")
.value_name(&to_string_min_max(THREAD_COUNT_RANGE))
.default_value(&thread_count_default_str)
.validator(thread_count_validator)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("operations")
.short("n")
.long("operations")
.possible_values(&TargetOps::friendly_names())
.default_value(&target_operations_default_str)
.help(
"Types of operations to generate load for. Not all operations \
are allowed for all targets",
)
.takes_value(true)
.use_delimiter(true)
.multiple(true),
)
.arg(
Arg::with_name("target_type")
.short("p")
.long("target_type")
.possible_values(&AvailableTargets::friendly_names()[..])
.default_value(&target_type_default_str)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("sequential")
.short("s")
.long("sequential")
.possible_values(&["true", "false"])
.default_value(&sequential_default_str)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("output_config_file")
.short("o")
.long("output_config_file")
.value_name("FILE")
.default_value(&output_config_file_default_str)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.arg(
Arg::with_name("target")
.short("t")
.long("target")
.value_name("FILE")
.required(true)
.help("Maximum number of outstanding IOs per thread.")
.takes_value(true),
)
.get_matches();
let mut args = ParseArgs {
queue_depth: matches.value_of("queue_depth").unwrap().parse::<usize>().unwrap(),
block_size: matches.value_of("block_size").unwrap().parse::<u64>().unwrap(),
max_io_size: matches.value_of("max_io_size").unwrap().parse::<u64>().unwrap(),
align: matches.value_of("align").unwrap().parse::<bool>().unwrap(),
max_io_count: matches.value_of("max_io_count").unwrap().parse::<u64>().unwrap(),
target_length: matches.value_of("target_length").unwrap().parse::<u64>().unwrap(),
thread_count: matches.value_of("thread_count").unwrap().parse::<usize>().unwrap(),
target_type: AvailableTargets::friendly_name_to_value(
matches.value_of("target_type").unwrap(),
)
.unwrap(),
operations: Default::default(),
sequential: matches.value_of("sequential").unwrap().parse::<bool>().unwrap(),
output_config_file: matches.value_of("output_config_file").unwrap().to_string(),
target: matches.value_of("target").unwrap().to_string(),
};
args.operations = target_operations_validator(
args.target_type,
&matches.values_of("operations").unwrap().collect::<Vec<_>>(),
)?;
Ok(args)
}
#[cfg(test)]
mod tests {
use {crate::args, crate::common_operations::allowed_ops, crate::target::AvailableTargets};
#[test]
fn queue_depth_validator_test_default() {
assert!(args::queue_depth_validator(args::QUEUE_DEPTH_DEFAULT.to_string()).is_ok());
}
#[test]
fn queue_depth_validator_test_out_of_range() {
assert!(
args::queue_depth_validator((args::QUEUE_DEPTH_RANGE.end() + 1).to_string()).is_err()
);
}
#[test]
fn block_size_validator_test_default() {
assert!(args::block_size_validator(args::BLOCK_SIZE_DEFAULT.to_string()).is_ok());
}
#[test]
fn block_size_validator_test_out_of_range() {
assert!(args::block_size_validator((args::BLOCK_SIZE_RANGE.end() + 1).to_string()).is_err());
}
#[test]
fn max_io_size_validator_test_default() {
assert!(args::max_io_size_validator(args::MAX_IO_SIZE_DEFAULT.to_string()).is_ok());
}
#[test]
fn max_io_size_validator_test_out_of_range() {
assert!(
args::max_io_size_validator((args::MAX_IO_SIZE_RANGE.end() + 1).to_string()).is_err()
);
}
#[test]
fn max_io_count_validator_test_default() {
assert!(args::max_io_count_validator(args::MAX_IO_COUNT_DEFAULT.to_string()).is_ok());
}
#[test]
fn max_io_count_validator_test_out_of_range() {
assert!(
args::max_io_count_validator((args::MAX_IO_COUNT_RANGE.end() + 1).to_string()).is_err()
);
}
#[test]
fn thread_count_validator_test_default() {
assert!(args::thread_count_validator(args::THREAD_COUNT_DEFAULT.to_string()).is_ok());
}
#[test]
fn thread_count_validator_test_out_of_range() {
assert!(
args::thread_count_validator((args::THREAD_COUNT_RANGE.end() + 1).to_string()).is_err()
);
}
#[test]
fn target_operations_validator_test_valid_inputs() {
let allowed_ops = allowed_ops(AvailableTargets::FileTarget);
// We know that "write" is allowed for files. Input "write" to the
// function and expect success.
assert_eq!(allowed_ops.enabled("write"), true);
assert!(
args::target_operations_validator(AvailableTargets::FileTarget, &vec!["write"]).is_ok()
);
}
#[test]
fn target_operations_validator_test_invalid_input_nonexistant_operation() {
let ret = args::target_operations_validator(AvailableTargets::FileTarget, &vec!["hello"]);
assert!(ret.is_err());
assert_eq!(ret.err(), Some(args::Error::OperationNotSupported));
}
#[test]
fn target_operations_validator_test_invalid_input_disallowed_operation() {
let allowed_ops = allowed_ops(AvailableTargets::FileTarget);
// We know that "open" is not *yet* allowed for files. Input "open" to the
// function and expect success.
assert_eq!(allowed_ops.enabled("open"), false);
assert!(
args::target_operations_validator(AvailableTargets::FileTarget, &vec!["open"]).is_err()
);
}
}
|
use std::time::Instant;
use hardware::peripherals::time::Time;
pub struct SimulatedTime {
start: Instant,
}
impl SimulatedTime {
pub fn new() -> SimulatedTime {
SimulatedTime {
start: Instant::now(),
}
}
}
impl Time for SimulatedTime {
fn now(&self) -> u32{
let now = self.start.elapsed();
let now_micros = now.as_secs() as u32 * 1000_u32 * 1000_u32 + now.subsec_nanos() / 1000_u32;
//println!("Now: {}", now_micros);
now_micros
}
fn delay(&self, delay: u32){
unimplemented!();
}
}
|
use crate::d6_39_id_generator::GenData;
// We always need generation data; e.g. a drop of an old generation of and object, must not drop it!
//
pub trait EcsStore<T> {
fn add(&mut self, g: GenData, t: T);
fn drop(&mut self, g: GenData);
fn get(&self, g: GenData) -> Option<&T>;
fn get_mut(&mut self, g: GenData) -> Option<&mut T>;
fn for_each<F: FnMut(GenData, &T)>(&self, f: F);
fn for_each_mut<F: FnMut(GenData, &mut T)>(&mut self, f: F);
}
pub struct VecStore<T> {
items: Vec<Option<(u64, T)>>,
}
impl<T> VecStore<T> {
pub fn new() -> Self {
Self { items: vec![] }
}
}
impl<T> EcsStore<T> for VecStore<T> {
fn add(&mut self, g: GenData, t: T) {
if g.pos >= self.items.len() {
self.items.resize_with(g.pos + 1, || None);
}
self.items[g.pos] = Some((g.gen, t));
}
fn drop(&mut self, g: GenData) {
if let Some(i) = self.items.get_mut(g.pos) {
if let Some((ig, _)) = i {
if *ig == g.gen {
*i = None;
}
}
}
}
fn get(&self, g: GenData) -> Option<&T> {
if let Some(Some((ig, d))) = self.items.get(g.pos) {
if *ig == g.gen {
return Some(d);
}
}
None
}
fn get_mut(&mut self, g: GenData) -> Option<&mut T> {
if let Some(Some((ig, d))) = self.items.get_mut(g.pos) {
if *ig == g.gen {
return Some(d);
}
}
None
}
fn for_each<F: FnMut(GenData, &T)>(&self, mut f: F) {
for (pos, item) in self.items.iter().enumerate() {
if let Some((gen, d)) = item {
let g = GenData { pos, gen: *gen };
f(g, d)
}
}
}
fn for_each_mut<F: FnMut(GenData, &mut T)>(&mut self, mut f: F) {
for (pos, item) in self.items.iter_mut().enumerate() {
if let Some((gen, d)) = item {
let g = GenData { pos, gen: *gen };
f(g, d)
}
}
}
}
|
mod mock;
mod request;
mod responder;
mod response;
pub mod socks4;
pub use mock::Mock;
pub use request::Request;
pub use responder::Responder;
pub use response::Response;
/// Macro to define a mock endpoint using a more concise DSL.
#[macro_export]
macro_rules! mock {
(@response($response:expr) status: $status:expr, $($tail:tt)*) => {{
let mut response = $response;
response.status_code = $status as u16;
$crate::mock!(@response(response) $($tail)*)
}};
(@response($response:expr) body: $body:expr, $($tail:tt)*) => {{
let mut response = $response;
response = response.with_body_buf($body);
$crate::mock!(@response(response) $($tail)*)
}};
(@response($response:expr) body_reader: $body:expr, $($tail:tt)*) => {{
let mut response = $response;
response = response.with_body_reader($body);
$crate::mock!(@response(response) $($tail)*)
}};
(@response($response:expr) transfer_encoding: $value:expr, $($tail:tt)*) => {{
let mut response = $response;
if $value {
response.body_len = None;
}
$crate::mock!(@response(response) $($tail)*)
}};
(@response($response:expr) delay: $delay:tt, $($tail:tt)*) => {{
let duration = $crate::helpers::parse_duration(stringify!($delay));
::std::thread::sleep(duration);
$crate::mock!(@response($response) $($tail)*)
}};
(@response($response:expr) headers {
$(
$name:literal: $value:expr,
)*
} $($tail:tt)*) => {{
let mut response = $response;
$(
response.headers.push(($name.to_string(), $value.to_string()));
)*
$crate::mock!(@response(response) $($tail)*)
}};
(@response($response:expr)) => {{
$response
}};
($($inner:tt)*) => {{
struct Responder<F>(F);
impl<F> $crate::Responder for Responder<F>
where
F: Send + Sync + 'static + Fn($crate::Request) -> Option<$crate::Response>,
{
fn respond(&self, request: $crate::Request) -> Option<$crate::Response> {
(self.0)(request)
}
}
$crate::Mock::new(Responder(move |request| {
let mut response = $crate::Response::default();
let response = $crate::mock!(@response(response) $($inner)*);
Some(response)
}))
}};
}
#[doc(hidden)]
pub mod helpers {
use std::time::Duration;
pub fn parse_duration(s: &str) -> Duration {
humantime::parse_duration(s).unwrap()
}
}
|
use map::{RoomNum, MAP};
use rand::{thread_rng, Rng};
use std::collections::HashSet;
use std::io;
use std::io::Write;
// Reads a line from stdin, trims it, and returns it as upper case.
pub fn read_sanitized_line() -> String {
read_line().trim().to_uppercase()
}
pub fn read_line() -> String {
let mut input = String::new();
io::stdin()
.read_line(&mut input)
.expect("Failed to read line.");
input
}
// Print without new line and flush to force it to show up.
pub fn print(s: &str) {
print!("{}", s);
io::stdout().flush().unwrap();
}
pub fn gen_unique_rooms() -> (RoomNum, RoomNum, RoomNum, RoomNum, RoomNum, RoomNum) {
let mut taken_rooms = HashSet::new();
let player = gen_unique_rand_room(&taken_rooms);
taken_rooms.insert(player);
let pit1 = gen_unique_rand_room(&taken_rooms);
taken_rooms.insert(pit1);
let pit2 = gen_unique_rand_room(&taken_rooms);
taken_rooms.insert(pit2);
let bat1 = gen_unique_rand_room(&taken_rooms);
taken_rooms.insert(bat1);
let bat2 = gen_unique_rand_room(&taken_rooms);
taken_rooms.insert(bat2);
let wumpus = gen_unique_rand_room(&taken_rooms);
(player, wumpus, pit1, pit2, bat1, bat2)
}
pub fn gen_unique_rand_room(taken_rooms: &HashSet<RoomNum>) -> RoomNum {
let mut rng = thread_rng();
loop {
let room: RoomNum = rng.gen_range(1, MAP.len() + 1);
if !taken_rooms.contains(&room) {
return room;
}
}
}
|
mod results;
pub use self::results::Results;
use self::results::*;
use super::Display;
use std::cmp::max;
use item::Equipment;
/// Combat state, ie. information retained between combat rounds.
pub struct Combat {
pub duration: i32,
pub results: Results,
}
pub trait Combatant: Display {
fn life(&self) -> i32;
fn set_life(&mut self, amount: i32) -> i32;
fn can_combat(&self) -> bool;
fn action_buffer(&self) -> ActionBuffer;
fn damage(&self) -> i32;
// FIXME: teporary, used to find the item that the combatant most likely uses for hitting stuff
fn best_weapon(&self) -> &Equipment;
// TODO: do something about stamina
}
#[derive(Clone)]
pub struct ActionBuffer {
actions: Vec<Action>,
max_actions: usize,
}
impl ActionBuffer {
pub fn new(max_actions: usize) -> ActionBuffer {
ActionBuffer {
actions: vec![],
max_actions,
}
}
pub fn duration_reserved(&self) -> usize {
self.actions.iter().count()
}
pub fn duration_free(&self) -> usize {
self.max_actions - self.duration_reserved()
}
pub fn push(&mut self, act: &Action) -> bool {
// Early out if cannot add
if self.actions.iter().count() == self.max_actions {
return false;
}
// Add the action to buffer
self.actions.push(act.clone());
true
}
pub fn clear(&mut self) {
self.actions.clear();
}
pub fn count(&self, action: &Action) -> usize {
self.actions
.iter()
.filter(|&act| act == action)
.count()
}
pub fn duration_of(&self, action: &Action) -> usize {
self.actions
.iter()
.filter(|&act| act == action)
.count()
}
}
impl Default for ActionBuffer {
fn default() -> ActionBuffer {
ActionBuffer {
actions: vec![Action::Attack],
max_actions: 1,
}
}
}
/// Things that the combatants may do in the combat.
#[derive(Clone, Eq, PartialEq)]
pub enum Action {
//Evade,
//Block,
Attack,
}
impl<'a> From<&'a Action> for String {
fn from(action: &'a Action) -> String {
use Action::*;
match *action {
Attack => "Attack".to_owned(),
}
}
}
/// All that actually happened (to a target).
#[derive(Clone, Eq, PartialEq)]
enum Outcome {
Miss,
//Block,
Hit(i32),
//Crit(i32),
Killed,
}
impl Combat {
pub fn new<T: Combatant, U: Combatant>(combatant_a: &T, combatant_b: &U) -> Combat {
Combat {
duration: 0,
results: ResultsBuilder::new(combatant_a, combatant_b).build_begin(),
}
}
pub fn has_ended(&self) -> bool {
if let Results::End { .. } = self.results {
true
}
else {false}
}
/// Runs all remaining combat rounds and returns the end result
pub fn quick_combat<T: Combatant, U: Combatant>(
&mut self,
combatant_a: &mut T,
combatant_b: &mut T,
) -> &Results {
// Combat has already ended, return latest results
if let Results::End { .. } = self.results {
return &self.results;
}
// Fight until either party is unable to combat
while Combat::can_combat(combatant_a, combatant_b) {
// Apply rounds and discard results
self.apply_round(combatant_a, combatant_b);
}
// Return last results only (ie. end results)
&self.results
}
/// Resolves one combat round and records results to self.
pub fn apply_round<'a, T: Combatant, U: Combatant>(
&'a mut self,
a: &mut T,
b: &mut U,
) -> &'a Results {
// Combat has already ended, return latest results
if let Results::End { .. } = self.results {
return &self.results;
}
// Do combat calculations
let results = {
use Action::*;
let a_buffer = &a.action_buffer();
let b_buffer = &a.action_buffer();
// Count number of different actions
let num_atks_by_a = a_buffer.count(&Attack) as i32;
let num_atks_by_b = b_buffer.count(&Attack) as i32;
// Resolve outcomes
let num_hits_to_a = num_atks_by_b;
let num_hits_to_b = num_atks_by_a;
let num_misses_to_a = 0;
let num_misses_to_b = 0;
use self::Outcome::*;
let mut outcomes_a = vec![Miss; num_misses_to_a as usize];
for _ in 0..num_hits_to_a {
outcomes_a.push(Hit(b.damage()));
}
let mut outcomes_b = vec![Miss; num_misses_to_b as usize];
for _ in 0..num_hits_to_b {
outcomes_b.push(Hit(a.damage()));
}
// TODO: make combat cooler by taking into account hits with each item used as a weapon.
// TODO: use outcomes to do the calculation
// Resolve a -> b
let a_life = a.life();
a.set_life(a_life - num_hits_to_a * b.damage());
// Resolve b -> a
let b_life = b.life();
b.set_life(b_life - num_hits_to_b * a.damage());
if !a.can_combat() {
outcomes_a.push(Killed);
}
if !b.can_combat() {
outcomes_b.push(Killed);
}
let builder = ResultsBuilder::new(a, b).write_round(&outcomes_a, &outcomes_b);
match (a.can_combat(), b.can_combat()) {
(true, true) => builder.build_round(),
(true, false) => builder.build_end(CombatantId::A, self.duration),
(false, true) => builder.build_end(CombatantId::B, self.duration),
// TODO: improve handling of ties
(false, false) => builder.build_end(CombatantId::B, self.duration),
}
};
self.duration += 1;
self.results = results;
&self.results
}
pub fn can_combat<T: Combatant, U: Combatant>(a: &T, b: &U) -> bool {
let a_can = a.can_combat();
let b_can = b.can_combat();
a_can && b_can
}
}
// TODO: this shouldn't be a part of the public interface
#[derive(Clone, Copy)]
pub enum CombatantId {
A,
B,
}
impl CombatantId {
pub fn to_combatant<'a, T: Combatant, U: Combatant>(
&self,
a: &'a T,
b: &'a U,
) -> &'a Combatant {
match self {
&CombatantId::A => a,
&CombatantId::B => b,
}
}
}
|
mod process;
use std::fs;
use std::io::Read;
use process::Process;
fn get_process_info(entry: &fs::DirEntry) -> Option<Process> {
let cmdfile = entry.path().join("cmdline");
let name = String::from(cmdfile.to_str().unwrap_or("no-name"));
if !cmdfile.exists() {
println!("Could not find cmdfile - {}", name);
return None;
}
let mut cmdfile = match fs::File::open(cmdfile) {
Err(e) => {
println!("Failed to open file {} with error {}", name, e);
return None;
}
Ok(f) => f,
};
let mut s = String::new();
match cmdfile.read_to_string(&mut s) {
Err(e) => {
println!("Failed to read file {}: error: {}", name, e);
},
_ => ()
}
let v: Vec<&str> = s.split(|c: char| c == 0 as char).collect();
let s = v.join(" ");
if s.is_empty() {
return None;
}
let pid = entry.file_name().into_string().unwrap()
.parse::<u16>().unwrap();
let process = Process::new(pid, &s);
Some(process)
}
fn is_process_dir(d: &str) -> bool {
d.parse::<u16>().is_ok()
}
fn read_proc() -> Result<(), String> {
let r = fs::read_dir("/proc");
let r = try!(r.map_err(|e| e.to_string()));
for entry in r {
let entry = try!(entry.map_err(|e| e.to_string()));
let metadata = entry.metadata();
let metadata = try!(metadata.map_err(|e| e.to_string()));
if metadata.is_dir() {
let filename = entry.file_name()
.into_string()
.unwrap_or("unstringable".to_string());
if is_process_dir(&filename) {
let p = get_process_info(&entry);
match p {
Some(p) => println!("{}", p),
_ => ()
}
}
}
}
Ok(())
}
fn main() {
match read_proc() {
Err(e) => println!("Failed to read proc with error {}", e),
_ => ()
}
}
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::switchboard::base::{SettingRequest, SettingRequestResponder, SettingType};
use failure::Error;
use futures::channel::mpsc::UnboundedSender;
pub type Notifier = UnboundedSender<SettingType>;
/// An command represents messaging from the registry to take a
/// particular action.
pub enum Command {
ChangeState(State),
HandleRequest(SettingRequest, SettingRequestResponder),
}
/// A given state the registry entity can move into.
pub enum State {
Listen(Notifier),
EndListen,
}
/// The conductor of lifecycle and activity over a number of registered
/// entities.
pub trait Registry {
fn register(
&mut self,
setting_type: SettingType,
command_sender: UnboundedSender<Command>,
) -> Result<(), Error>;
}
|
use super::*;
/// Win32 handle types implement this trait to simplify error handling.
/// # Safety
pub unsafe trait Handle: Sized + PartialEq {
fn is_invalid(&self) -> bool {
*self == unsafe { core::mem::zeroed() }
}
fn ok(self) -> Result<Self> {
if !self.is_invalid() {
Ok(self)
} else {
Err(Error::from_win32())
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.