text stringlengths 8 4.13M |
|---|
use chainerror::*;
use std::io;
#[derive(Clone, PartialEq, Debug)]
pub enum ErrorKind {
Io(::std::io::ErrorKind),
SerdeJsonSer(::serde_json::error::Category),
SerdeJsonDe(String),
InterfaceNotFound(String),
InvalidParameter(String),
MethodNotFound(String),
MethodNotImplemented(String),
VarlinkErrorReply(crate::Reply),
CallContinuesMismatch,
MethodCalledAlready,
ConnectionBusy,
IteratorOldReply,
Server,
Timeout,
ConnectionClosed,
InvalidAddress,
Generic,
}
impl ::std::error::Error for ErrorKind {}
impl ::std::fmt::Display for ErrorKind {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match self {
ErrorKind::Io(_) => write!(f, "IO error"),
ErrorKind::SerdeJsonSer(_) => write!(f, "JSON Serialization Error"),
ErrorKind::SerdeJsonDe(v) => write!(f, "JSON Deserialization Error of '{}'", v),
ErrorKind::InterfaceNotFound(v) => write!(f, "Interface not found: '{}'", v),
ErrorKind::InvalidParameter(v) => write!(f, "Invalid parameter: '{}'", v),
ErrorKind::MethodNotFound(v) => write!(f, "Method not found: '{}'", v),
ErrorKind::MethodNotImplemented(v) => write!(f, "Method not implemented: '{}'", v),
ErrorKind::VarlinkErrorReply(v) => write!(f, "Varlink error reply: '{:#?}'", v),
ErrorKind::CallContinuesMismatch => write!(
f,
"Call::reply() called with continues, but without more in the request"
),
ErrorKind::MethodCalledAlready => write!(f, "Varlink: method called already"),
ErrorKind::ConnectionBusy => write!(f, "Varlink: connection busy with other method"),
ErrorKind::IteratorOldReply => write!(f, "Varlink: Iterator called on old reply"),
ErrorKind::Server => write!(f, "Server Error"),
ErrorKind::Timeout => write!(f, "Timeout Error"),
ErrorKind::ConnectionClosed => write!(f, "Connection Closed"),
ErrorKind::InvalidAddress => write!(f, "Invalid varlink address URI"),
ErrorKind::Generic => Ok(()),
}
}
}
impl ChainErrorFrom<std::io::Error> for ErrorKind {
fn chain_error_from(
e: io::Error,
line_filename: Option<(u32, &'static str)>,
) -> ChainError<Self> {
match e.kind() {
io::ErrorKind::BrokenPipe
| io::ErrorKind::ConnectionAborted
| io::ErrorKind::ConnectionReset => ChainError::<_>::new(
ErrorKind::ConnectionClosed,
Some(Box::from(e)),
line_filename,
),
kind => ChainError::<_>::new(ErrorKind::Io(kind), Some(Box::from(e)), line_filename),
}
}
}
impl ChainErrorFrom<serde_json::error::Error> for ErrorKind {
fn chain_error_from(
e: serde_json::error::Error,
line_filename: Option<(u32, &'static str)>,
) -> ChainError<Self> {
ChainError::<_>::new(
ErrorKind::SerdeJsonSer(e.classify()),
Some(Box::from(e)),
line_filename,
)
}
}
pub type Result<T> = ChainResult<T, ErrorKind>;
pub type Error = ChainError<ErrorKind>;
|
#[doc = r"Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r"Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::ISC {
#[doc = r"Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
self.register.set(f(&R { bits }, &mut W { bits }).bits);
}
#[doc = r"Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r"Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
self.register.set(
f(&mut W {
bits: Self::reset_value(),
})
.bits,
);
}
#[doc = r"Reset value of the register"]
#[inline(always)]
pub const fn reset_value() -> u32 {
0
}
#[doc = r"Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.register.set(Self::reset_value())
}
}
#[doc = r"Value of the field"]
pub struct QEI_ISC_INDEXR {
bits: bool,
}
impl QEI_ISC_INDEXR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _QEI_ISC_INDEXW<'a> {
w: &'a mut W,
}
impl<'a> _QEI_ISC_INDEXW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 0);
self.w.bits |= ((value as u32) & 1) << 0;
self.w
}
}
#[doc = r"Value of the field"]
pub struct QEI_ISC_TIMERR {
bits: bool,
}
impl QEI_ISC_TIMERR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _QEI_ISC_TIMERW<'a> {
w: &'a mut W,
}
impl<'a> _QEI_ISC_TIMERW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 1);
self.w.bits |= ((value as u32) & 1) << 1;
self.w
}
}
#[doc = r"Value of the field"]
pub struct QEI_ISC_DIRR {
bits: bool,
}
impl QEI_ISC_DIRR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _QEI_ISC_DIRW<'a> {
w: &'a mut W,
}
impl<'a> _QEI_ISC_DIRW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 2);
self.w.bits |= ((value as u32) & 1) << 2;
self.w
}
}
#[doc = r"Value of the field"]
pub struct QEI_ISC_ERRORR {
bits: bool,
}
impl QEI_ISC_ERRORR {
#[doc = r"Value of the field as raw bits"]
#[inline(always)]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r"Returns `true` if the bit is clear (0)"]
#[inline(always)]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r"Returns `true` if the bit is set (1)"]
#[inline(always)]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r"Proxy"]
pub struct _QEI_ISC_ERRORW<'a> {
w: &'a mut W,
}
impl<'a> _QEI_ISC_ERRORW<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits &= !(1 << 3);
self.w.bits |= ((value as u32) & 1) << 3;
self.w
}
}
impl R {
#[doc = r"Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - Index Pulse Interrupt"]
#[inline(always)]
pub fn qei_isc_index(&self) -> QEI_ISC_INDEXR {
let bits = ((self.bits >> 0) & 1) != 0;
QEI_ISC_INDEXR { bits }
}
#[doc = "Bit 1 - Velocity Timer Expired Interrupt"]
#[inline(always)]
pub fn qei_isc_timer(&self) -> QEI_ISC_TIMERR {
let bits = ((self.bits >> 1) & 1) != 0;
QEI_ISC_TIMERR { bits }
}
#[doc = "Bit 2 - Direction Change Interrupt"]
#[inline(always)]
pub fn qei_isc_dir(&self) -> QEI_ISC_DIRR {
let bits = ((self.bits >> 2) & 1) != 0;
QEI_ISC_DIRR { bits }
}
#[doc = "Bit 3 - Phase Error Interrupt"]
#[inline(always)]
pub fn qei_isc_error(&self) -> QEI_ISC_ERRORR {
let bits = ((self.bits >> 3) & 1) != 0;
QEI_ISC_ERRORR { bits }
}
}
impl W {
#[doc = r"Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - Index Pulse Interrupt"]
#[inline(always)]
pub fn qei_isc_index(&mut self) -> _QEI_ISC_INDEXW {
_QEI_ISC_INDEXW { w: self }
}
#[doc = "Bit 1 - Velocity Timer Expired Interrupt"]
#[inline(always)]
pub fn qei_isc_timer(&mut self) -> _QEI_ISC_TIMERW {
_QEI_ISC_TIMERW { w: self }
}
#[doc = "Bit 2 - Direction Change Interrupt"]
#[inline(always)]
pub fn qei_isc_dir(&mut self) -> _QEI_ISC_DIRW {
_QEI_ISC_DIRW { w: self }
}
#[doc = "Bit 3 - Phase Error Interrupt"]
#[inline(always)]
pub fn qei_isc_error(&mut self) -> _QEI_ISC_ERRORW {
_QEI_ISC_ERRORW { w: self }
}
}
|
use crate::disassembler::Instruction;
pub struct Cpu {
pub memory: [u8; 4096],
pub registers: [u8; 16],
pub stack: [u16; 16],
pub i: u16,
pub vf: u8,
pub delay_timer: u8,
pub sound_timer: u8,
pub pc: u16,
pub sp: u8,
}
impl Cpu {
pub fn load_program(&mut self, program: &Vec<Instruction>) {
let mut instructions: Vec<u8> = Vec::new();
for i in program {
instructions.push((i.opcode & 0xff00) as u8);
instructions.push((i.opcode & 0x00ff) as u8);
}
self.memory[0x200..0x200+instructions.len()].copy_from_slice(&instructions[..]);
println!("{:02x}", self.memory[0x205]);
}
}
|
use crate::config::config;
use crate::error::HandleError;
use crate::error::HandleError::SessionError;
use anyhow::Result;
use model::*;
use redis::{Commands, Connection, RedisResult};
use repository::AccountByGoogleId;
use warp::Rejection;
pub const SESSION_KEY: &str = "session-token";
const EXPIRE_SECONDS: usize = 30 * 24 * 60 * 60;
pub fn get_client() -> RedisResult<Connection> {
let client = redis::Client::open(config().redis_url)?;
client.get_connection()
}
pub fn save_user_id(user_id: GoogleId) -> Result<String> {
let mut redis_connection = get_client()?;
let key = generate_session_key();
let _: String = redis_connection.set_ex(key.clone(), user_id.to_string(), EXPIRE_SECONDS)?;
Ok(key)
}
pub fn remove_session(key: &str) -> Result<(), HandleError> {
let mut redis_connection = get_client()?;
let _ = redis_connection.del(key)?;
Ok(())
}
pub fn get_user_id(key: &str) -> Result<GoogleId, HandleError> {
let mut redis_connection = get_client()?;
Ok(GoogleId::new(
redis_connection.get(key).map_err(SessionError)?,
))
}
pub fn get_account<C: AccountByGoogleId>(
mut repos: C,
user_id: GoogleId,
) -> Result<Account, HandleError> {
Ok(repos.user(&user_id)?)
}
pub async fn get_account_by_session<C: AccountByGoogleId>(
repos: C,
key: String,
) -> Result<Account, Rejection> {
let user_id = get_user_id(&key)?;
Ok(get_account(repos, user_id)?)
}
fn generate_session_key() -> String {
use rand::prelude::*;
use rand_chacha::ChaCha20Rng;
let mut csp_rng = ChaCha20Rng::from_entropy();
let mut data = [0u8; 32];
csp_rng.fill_bytes(&mut data);
join(&data)
}
fn join(data: &[u8]) -> String {
data.iter().map(|u| u.to_string()).collect()
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test() {
let data = [1u8, 2, 3, 4, 5, 6, 7, 8, 123];
assert_eq!("12345678123".to_string(), join(&data))
}
}
|
/*!
All libsodacrypt apis will return an errors::Result.
*/
error_chain! {
links {
}
foreign_links {
Io(::std::io::Error);
}
errors {
InvalidPubKey
InvalidPrivKey
InvalidSeed
InvalidSignature
InvalidNonce
InvalidPresharedKey
InvalidClientPubKey
InvalidClientPrivKey
InvalidServerPubKey
InvalidServerPrivKey
FailedLibSodiumInit
FailedToDecrypt
}
}
|
pub mod algorithm_binary;
pub mod chip;
pub mod chip_family;
pub mod flash_device;
pub mod parser;
pub mod raw_flash_algorithm;
use crate::chip::Chip;
use crate::chip_family::ChipFamily;
use crate::raw_flash_algorithm::RawFlashAlgorithm;
use anyhow::{anyhow, bail, ensure, Context, Result};
use cmsis_pack::pdsc::{Core, Device, Package, Processors};
use cmsis_pack::utils::FromElem;
use log;
use pretty_env_logger;
use probe_rs::config::{FlashRegion, MemoryRegion, RamRegion};
use structopt::StructOpt;
use fs::create_dir;
use std::collections::HashMap;
use std::fs::{self, File};
use std::io::Read;
use std::path::{Path, PathBuf};
#[derive(StructOpt)]
struct Options {
#[structopt(
name = "INPUT",
parse(from_os_str),
help = "A Pack file or the unziped Pack directory."
)]
input: PathBuf,
#[structopt(
name = "OUTPUT",
parse(from_os_str),
help = "An output directory where all the generated .yaml files are put in."
)]
output_dir: PathBuf,
}
fn main() -> Result<()> {
pretty_env_logger::init();
let options = Options::from_args();
// The directory in which to look for the .pdsc file.
let input = options.input;
let out_dir = options.output_dir;
ensure!(
input.exists(),
"No such file or directory: {}",
input.display()
);
if !out_dir.exists() {
create_dir(&out_dir).context(format!(
"Failed to create output directory '{}'.",
out_dir.display()
))?;
}
let mut families = Vec::<ChipFamily>::new();
if input.is_file() {
visit_file(&input, &mut families)
.context(format!("Failed to process file {}.", input.display()))?;
} else {
// Look for the .pdsc file in the given dir and it's child directories.
visit_dirs(&input, &mut families).context("Failed to generate target configuration.")?;
// Check that we found at least a single .pdsc file
ensure!(
!families.is_empty(),
"Unable to find any .pdsc files in the provided input directory."
);
}
let mut generated_files = Vec::with_capacity(families.len());
for family in &families {
let path = out_dir.join(family.name.clone() + ".yaml");
let file = std::fs::File::create(&path)
.context(format!("Failed to create file '{}'.", path.display()))?;
serde_yaml::to_writer(file, &family)?;
generated_files.push(path);
}
println!("Generated {} target definition(s):", generated_files.len());
for file in generated_files {
println!("\t{}", file.display());
}
Ok(())
}
fn handle_package(
pdsc: Package,
mut archive: Option<&mut zip::ZipArchive<File>>,
input: &Path,
families: &mut Vec<ChipFamily>,
) -> Result<()> {
// Forge a definition file for each device in the .pdsc file.
let mut devices = pdsc.devices.0.into_iter().collect::<Vec<_>>();
devices.sort_by(|a, b| a.0.cmp(&b.0));
for (device_name, device) in devices {
// Extract the RAM info from the .pdsc file.
let ram = get_ram(&device);
// Extract the flash algorithm, block & sector size and the erased byte value from the ELF binary.
let variant_flash_algorithms = device
.algorithms
.iter()
.map(|flash_algorithm| {
let algo = if let Some(ref mut archive) = archive {
crate::parser::extract_flash_algo(
archive.by_name(&flash_algorithm.file_name.as_path().to_string_lossy())?,
&flash_algorithm.file_name,
flash_algorithm.default,
)
} else {
crate::parser::extract_flash_algo(
std::fs::File::open(input.join(&flash_algorithm.file_name))?,
&flash_algorithm.file_name,
flash_algorithm.default,
)
}?;
Ok(algo)
})
.filter_map(
|flash_algorithm: Result<RawFlashAlgorithm>| match flash_algorithm {
Ok(flash_algorithm) => Some(flash_algorithm),
Err(error) => {
log::warn!("Failed to parse flash algorithm.");
log::warn!("Reason: {:?}", error);
None
}
},
)
.collect::<Vec<_>>();
// Extract the flash info from the .pdsc file.
let mut flash = None;
for memory in device.memories.0.values() {
if memory.default && memory.access.read && memory.access.execute && !memory.access.write
{
flash = Some(FlashRegion {
range: memory.start as u32..memory.start as u32 + memory.size as u32,
is_boot_memory: memory.startup,
});
break;
}
}
// Get the core type.
let core = if let Processors::Symmetric(processor) = &device.processor {
match &processor.core {
Core::CortexM0 => "M0",
Core::CortexM0Plus => "M0",
Core::CortexM4 => "M4",
Core::CortexM3 => "M3",
Core::CortexM33 => "M33",
Core::CortexM7 => "M7",
c => {
bail!("Core '{:?}' is not yet supported for target generation.", c);
}
}
} else {
log::warn!("Asymmetric cores are not supported yet.");
""
};
// Check if this device family is already known.
let mut potential_family = families
.iter_mut()
.find(|family| family.name == device.family);
let family = if let Some(ref mut family) = potential_family {
family
} else {
families.push(ChipFamily::new(
device.family,
HashMap::new(),
core.to_owned(),
));
// This unwrap is always safe as we insert at least one item previously.
families.last_mut().unwrap()
};
let flash_algorithm_names: Vec<_> = variant_flash_algorithms
.iter()
.map(|fa| fa.name.clone().to_lowercase())
.collect();
for fa in variant_flash_algorithms {
family.flash_algorithms.insert(fa.name.clone(), fa);
}
let mut memory_map: Vec<MemoryRegion> = Vec::new();
if let Some(mem) = ram {
memory_map.push(MemoryRegion::Ram(mem));
}
if let Some(mem) = flash {
memory_map.push(MemoryRegion::Flash(mem));
}
family.variants.push(Chip {
name: device_name,
memory_map,
flash_algorithms: flash_algorithm_names,
});
}
Ok(())
}
// one possible implementation of walking a directory only visiting files
fn visit_dirs(path: &Path, families: &mut Vec<ChipFamily>) -> Result<()> {
// If we get a dir, look for all .pdsc files.
for entry in fs::read_dir(path)? {
let entry = entry?;
let entry_path = entry.path();
if entry_path.is_dir() {
visit_dirs(&entry_path, families)?;
} else if let Some(extension) = entry_path.extension() {
if extension == "pdsc" {
log::info!("Found .pdsc file: {}", path.display());
handle_package(
Package::from_path(&entry.path()).map_err(|e| e.compat())?,
None,
path,
families,
)
.context(format!(
"Failed to process .pdsc file {}.",
entry.path().display()
))?;
}
}
}
Ok(())
}
fn visit_file(path: &Path, families: &mut Vec<ChipFamily>) -> Result<()> {
log::info!("Trying to open pack file: {}.", path.display());
// If we get a file, try to unpack it.
let file = fs::File::open(&path)?;
let mut archive = zip::ZipArchive::new(file)?;
let mut pdsc_file = find_pdsc_in_archive(&mut archive)
.ok_or_else(|| anyhow!("Failed to find .pdsc file in archive {}", path.display()))?;
let mut pdsc = String::new();
pdsc_file.read_to_string(&mut pdsc)?;
let package = Package::from_string(&pdsc).map_err(|e| {
anyhow!(
"Failed to parse pdsc file '{}' in CMSIS Pack {}: {}",
pdsc_file.sanitized_name().display(),
path.display(),
e
)
})?;
drop(pdsc_file);
handle_package(package, Some(&mut archive), path, families)
}
/// Extracts the pdsc out of a ZIP archive.
fn find_pdsc_in_archive(archive: &mut zip::ZipArchive<File>) -> Option<zip::read::ZipFile> {
let mut index = None;
for i in 0..archive.len() {
let file = archive.by_index(i).unwrap();
let outpath = file.sanitized_name();
if let Some(extension) = outpath.extension() {
if extension == "pdsc" {
index = Some(i);
break;
}
}
}
if let Some(index) = index {
Some(archive.by_index(index).unwrap())
} else {
None
}
}
fn get_ram(device: &Device) -> Option<RamRegion> {
for memory in device.memories.0.values() {
if memory.default && memory.access.read && memory.access.write {
return Some(RamRegion {
range: memory.start as u32..memory.start as u32 + memory.size as u32,
is_boot_memory: memory.startup,
});
}
}
None
}
|
/* Podos System by Julius */
rs_prog podos:
[
input : [
start_button, /* inicia o processo do podos */
stop_button, /* termina o processo do podos */
heart_button, /* mostra a batidas do coracao por segundo */
veloc_button, /* mostra a velocidade atual */
distance_button, /* mostra a distancia percorrida */
acel(A), /* aceleracoes resultante */
pulso, /* sinal de batimento cardiaco */
sec /* segundo */
],
output: [
display_heart(X), /* mostra as batidas do coracao */
display_veloc(X), /* mostra a velocidade atual */
display_distance(X) /* mostra distancia percorrida */
],
module keyboard_module:
[ input : [
start_button,
stop_button,
heart_button,
veloc_button,
distance_button
],
output: [
cmd_heart,
cmd_veloc,
cmd_distance,
start_system,
stop_system
],
t_signal: [],
p_signal: [],
var: [],
initially: [
activate(rules)
],
on_exception: [],
start_button ===> [emit(start_system)],
stop_button ===> [emit(stop_system)],
heart_button ===> [emit(cmd_heart)],
veloc_button ===> [emit(cmd_veloc)],
distance_button ===> [emit(cmd_distance)]
],
module control_module:
[ input : [
start_system,
stop_system,
cmd_heart,
cmd_veloc,
cmd_distance
],
output: [
start_pulsacao,
start_integrator,
stop_pulsacao,
stop_integrator,
cmd_pulsacao,
cmd_integrator,
cmd_velocity
],
t_signal: [],
p_signal: [],
var: [],
initially: [activate(rules)],
on_exception: [],
start_system ===> [emit(start_pulsacao), emit(start_integrator)],
stop_system ===> [emit(stop_pulsacao), emit(stop_integrator)],
cmd_heart ===> [emit(cmd_pulsacao)],
cmd_distance ===> [emit(cmd_integrator)],
cmd_veloc ===> [emit(cmd_velocity)]
],
module pulsacao_module:
[ input : [
start_pulsacao,
stop_pulsacao,
pulso,
sec,
cmd_pulsacao
],
output: [
display_heart(X)
],
t_signal: [],
p_signal: [],
var: [bat_sec, batimentos],
initially: [activate(box_initial)],
on_exception: [],
box box_initial:
[
start_pulsacao ===> [activate(box_operation), bat_sec:=0, batimentos:=0],
cmd_pulsacao ===> [emit(display_heart(bat_sec))]
],
box box_operation:
[
sec ===> [bat_sec:=batimentos, batimentos:=0],
pulso ===> [batimentos:=batimentos+1],
stop_pulsacao ===> [deactivate]
]
],
module integrator_module:
[ input : [
start_integrator,
stop_integrator,
acel(A),
cmd_integrator,
cmd_velocity
],
output: [
display_veloc(X),
display_distance(X)
],
t_signal: [],
p_signal: [p1, p2, p3, p4],
var: [acel_result, acel_ant, vel_result, vel_ant, dist_result, dist_ant, dist],
initially: [activate(box_initial)],
on_exception: [],
box box_initial:
[
start_integrator ===> [activate(box_integrator), acel_ant:=0, vel_ant:=0, dist_ant:=0],
cmd_integrator ===> [emit(display_distance(dist_result))],
cmd_velocity ===> [emit(display_veloc(vel_result))]
],
box box_integrator:
[
acel(A) ===> [acel_result:=A, up(p1)],
#[p1] ===> [vel_result:=(acel_ant+acel_result)/2, up(p2)],
#[p2] ===> [dist:=(vel_result+vel_ant)/2, up(p3)],
#[p3] ===> [dist_result:=dist+dist_ant, up(p4)],
#[p4] ===> [dist_ant:=dist_result, vel_ant:=vel_result, acel_ant:=acel_result],
stop_integrator ===> [deactivate]
]
]
].
environment :- user_terminal.
|
use crate::Context;
/// Algorithms that can be used to scale the game's screen.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum ScalingMode {
/// The game will always be displayed at its native resolution, with no scaling applied.
/// If the window is bigger than the native resolution, letterboxing will be applied.
/// If the window is smaller than the native resolution, it will be cropped.
Fixed,
/// The screen will be stretched to fill the window, without trying to preserve the original
/// aspect ratio. Distortion/stretching/squashing may occur.
Stretch,
// /// The entire screen will be displayed as large as possible while maintaining the original
// /// aspect ratio. Letterboxing may occur.
// ShowAll,
// /// Works the same as ShowAll, but will only scale by integer values.
// ShowAllPixelPerfect,
// /// The screen will fill the entire window, maintaining the original aspect ratio but
// /// potentially being cropped.
// Crop,
// /// Works the same as Crop, but will only scale by integer values.
// CropPixelPerfect,
}
impl Default for ScalingMode {
fn default() -> Self {
Self::Fixed
}
}
#[allow(unused_variables)]
pub fn set_scaling_mode(ctx: &mut Context, mode: ScalingMode, scale: Option<f32>) {
use macroquad::prelude::*;
let scaling = scale.unwrap_or(1.0);
match mode {
ScalingMode::Fixed => set_default_camera(),
ScalingMode::Stretch => set_camera(&Camera2D::from_display_rect(Rect::new(
0.0,
0.0,
width(ctx) / scaling,
height(ctx) / scaling,
))),
}
ctx.scaling = (mode, scale).into();
}
#[allow(unused_variables)]
pub fn width(ctx: &Context) -> f32 {
macroquad::prelude::screen_width()
}
#[allow(unused_variables)]
pub fn height(ctx: &Context) -> f32 {
macroquad::prelude::screen_height()
}
|
#[doc = "Reader of register FRAMEFLTR"]
pub type R = crate::R<u32, super::FRAMEFLTR>;
#[doc = "Writer for register FRAMEFLTR"]
pub type W = crate::W<u32, super::FRAMEFLTR>;
#[doc = "Register FRAMEFLTR `reset()`'s with value 0"]
impl crate::ResetValue for super::FRAMEFLTR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `PR`"]
pub type PR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PR`"]
pub struct PR_W<'a> {
w: &'a mut W,
}
impl<'a> PR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `HUC`"]
pub type HUC_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `HUC`"]
pub struct HUC_W<'a> {
w: &'a mut W,
}
impl<'a> HUC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `HMC`"]
pub type HMC_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `HMC`"]
pub struct HMC_W<'a> {
w: &'a mut W,
}
impl<'a> HMC_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `DAIF`"]
pub type DAIF_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DAIF`"]
pub struct DAIF_W<'a> {
w: &'a mut W,
}
impl<'a> DAIF_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `PM`"]
pub type PM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PM`"]
pub struct PM_W<'a> {
w: &'a mut W,
}
impl<'a> PM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `DBF`"]
pub type DBF_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DBF`"]
pub struct DBF_W<'a> {
w: &'a mut W,
}
impl<'a> DBF_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Pass Control Frames\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PCF_A {
#[doc = "0: The MAC filters all control frames from reaching application"]
ALL = 0,
#[doc = "1: MAC forwards all control frames except PAUSE control frames to application even if they fail the address filter"]
PAUSE = 1,
#[doc = "2: MAC forwards all control frames to application even if they fail the address Filter"]
NONE = 2,
#[doc = "3: MAC forwards control frames that pass the address Filter"]
ADDR = 3,
}
impl From<PCF_A> for u8 {
#[inline(always)]
fn from(variant: PCF_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `PCF`"]
pub type PCF_R = crate::R<u8, PCF_A>;
impl PCF_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PCF_A {
match self.bits {
0 => PCF_A::ALL,
1 => PCF_A::PAUSE,
2 => PCF_A::NONE,
3 => PCF_A::ADDR,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `ALL`"]
#[inline(always)]
pub fn is_all(&self) -> bool {
*self == PCF_A::ALL
}
#[doc = "Checks if the value of the field is `PAUSE`"]
#[inline(always)]
pub fn is_pause(&self) -> bool {
*self == PCF_A::PAUSE
}
#[doc = "Checks if the value of the field is `NONE`"]
#[inline(always)]
pub fn is_none(&self) -> bool {
*self == PCF_A::NONE
}
#[doc = "Checks if the value of the field is `ADDR`"]
#[inline(always)]
pub fn is_addr(&self) -> bool {
*self == PCF_A::ADDR
}
}
#[doc = "Write proxy for field `PCF`"]
pub struct PCF_W<'a> {
w: &'a mut W,
}
impl<'a> PCF_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: PCF_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "The MAC filters all control frames from reaching application"]
#[inline(always)]
pub fn all(self) -> &'a mut W {
self.variant(PCF_A::ALL)
}
#[doc = "MAC forwards all control frames except PAUSE control frames to application even if they fail the address filter"]
#[inline(always)]
pub fn pause(self) -> &'a mut W {
self.variant(PCF_A::PAUSE)
}
#[doc = "MAC forwards all control frames to application even if they fail the address Filter"]
#[inline(always)]
pub fn none(self) -> &'a mut W {
self.variant(PCF_A::NONE)
}
#[doc = "MAC forwards control frames that pass the address Filter"]
#[inline(always)]
pub fn addr(self) -> &'a mut W {
self.variant(PCF_A::ADDR)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 6)) | (((value as u32) & 0x03) << 6);
self.w
}
}
#[doc = "Reader of field `SAIF`"]
pub type SAIF_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SAIF`"]
pub struct SAIF_W<'a> {
w: &'a mut W,
}
impl<'a> SAIF_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `SAF`"]
pub type SAF_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SAF`"]
pub struct SAF_W<'a> {
w: &'a mut W,
}
impl<'a> SAF_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `HPF`"]
pub type HPF_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `HPF`"]
pub struct HPF_W<'a> {
w: &'a mut W,
}
impl<'a> HPF_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Reader of field `VTFE`"]
pub type VTFE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `VTFE`"]
pub struct VTFE_W<'a> {
w: &'a mut W,
}
impl<'a> VTFE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `RA`"]
pub type RA_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RA`"]
pub struct RA_W<'a> {
w: &'a mut W,
}
impl<'a> RA_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bit 0 - Promiscuous Mode"]
#[inline(always)]
pub fn pr(&self) -> PR_R {
PR_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Hash Unicast"]
#[inline(always)]
pub fn huc(&self) -> HUC_R {
HUC_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Hash Multicast"]
#[inline(always)]
pub fn hmc(&self) -> HMC_R {
HMC_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Destination Address (DA) Inverse Filtering"]
#[inline(always)]
pub fn daif(&self) -> DAIF_R {
DAIF_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Pass All Multicast"]
#[inline(always)]
pub fn pm(&self) -> PM_R {
PM_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Disable Broadcast Frames"]
#[inline(always)]
pub fn dbf(&self) -> DBF_R {
DBF_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bits 6:7 - Pass Control Frames"]
#[inline(always)]
pub fn pcf(&self) -> PCF_R {
PCF_R::new(((self.bits >> 6) & 0x03) as u8)
}
#[doc = "Bit 8 - Source Address (SA) Inverse Filtering"]
#[inline(always)]
pub fn saif(&self) -> SAIF_R {
SAIF_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - Source Address Filter Enable"]
#[inline(always)]
pub fn saf(&self) -> SAF_R {
SAF_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - Hash or Perfect Filter"]
#[inline(always)]
pub fn hpf(&self) -> HPF_R {
HPF_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 16 - VLAN Tag Filter Enable"]
#[inline(always)]
pub fn vtfe(&self) -> VTFE_R {
VTFE_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 31 - Receive All"]
#[inline(always)]
pub fn ra(&self) -> RA_R {
RA_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Promiscuous Mode"]
#[inline(always)]
pub fn pr(&mut self) -> PR_W {
PR_W { w: self }
}
#[doc = "Bit 1 - Hash Unicast"]
#[inline(always)]
pub fn huc(&mut self) -> HUC_W {
HUC_W { w: self }
}
#[doc = "Bit 2 - Hash Multicast"]
#[inline(always)]
pub fn hmc(&mut self) -> HMC_W {
HMC_W { w: self }
}
#[doc = "Bit 3 - Destination Address (DA) Inverse Filtering"]
#[inline(always)]
pub fn daif(&mut self) -> DAIF_W {
DAIF_W { w: self }
}
#[doc = "Bit 4 - Pass All Multicast"]
#[inline(always)]
pub fn pm(&mut self) -> PM_W {
PM_W { w: self }
}
#[doc = "Bit 5 - Disable Broadcast Frames"]
#[inline(always)]
pub fn dbf(&mut self) -> DBF_W {
DBF_W { w: self }
}
#[doc = "Bits 6:7 - Pass Control Frames"]
#[inline(always)]
pub fn pcf(&mut self) -> PCF_W {
PCF_W { w: self }
}
#[doc = "Bit 8 - Source Address (SA) Inverse Filtering"]
#[inline(always)]
pub fn saif(&mut self) -> SAIF_W {
SAIF_W { w: self }
}
#[doc = "Bit 9 - Source Address Filter Enable"]
#[inline(always)]
pub fn saf(&mut self) -> SAF_W {
SAF_W { w: self }
}
#[doc = "Bit 10 - Hash or Perfect Filter"]
#[inline(always)]
pub fn hpf(&mut self) -> HPF_W {
HPF_W { w: self }
}
#[doc = "Bit 16 - VLAN Tag Filter Enable"]
#[inline(always)]
pub fn vtfe(&mut self) -> VTFE_W {
VTFE_W { w: self }
}
#[doc = "Bit 31 - Receive All"]
#[inline(always)]
pub fn ra(&mut self) -> RA_W {
RA_W { w: self }
}
}
|
use std::fmt::{Debug, Formatter, Result};
use std::iter::Enumerate;
use std::{
cmp::{Ordering, Reverse},
marker::PhantomData,
};
use std::{
collections::{BinaryHeap, HashMap},
hash::Hash,
};
use crate::{
properties::{WithRegion, WithRegionCore},
ChromName,
};
pub struct Point<C: ChromName, T: WithRegion<C>> {
pub is_open: bool,
pub index: usize,
pub depth: usize,
pub value: T,
_p: PhantomData<C>,
}
impl<C: ChromName, T: WithRegion<C>> Debug for Point<C, T> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
if self.is_open {
write!(f, "Open(")?;
} else {
write!(f, "Close(")?;
}
let (chrom, pos) = self.position();
write!(f, "{}, {}, {})", chrom.to_string(), pos, self.depth)
}
}
impl<C: ChromName, T: WithRegion<C>> Point<C, T> {
pub fn position(&self) -> (C, u32) {
if self.is_open {
(self.value.chrom().clone(), self.value.begin())
} else {
(self.value.chrom().clone(), self.value.end())
}
}
}
impl<C: ChromName, T: WithRegion<C>> PartialEq for Point<C, T> {
fn eq(&self, other: &Point<C, T>) -> bool {
self.position() == other.position()
}
}
impl<C: ChromName, T: WithRegion<C>> PartialOrd for Point<C, T> {
fn partial_cmp(&self, other: &Point<C, T>) -> Option<Ordering> {
let ret = self
.position()
.cmp(&other.position())
.then_with(|| self.is_open.cmp(&other.is_open));
Some(ret)
}
}
impl<C: ChromName, T: WithRegion<C>> Eq for Point<C, T> {}
impl<C: ChromName, T: WithRegion<C>> Ord for Point<C, T> {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
pub struct ComponentsIter<C, I>
where
C: ChromName,
I: Iterator,
I::Item: WithRegion<C> + Clone,
{
iter: Enumerate<I>,
peek_buffer: Option<(usize, I::Item)>,
heap: BinaryHeap<Reverse<Point<C, I::Item>>>,
}
pub trait Components
where
Self: Iterator + Sized,
{
fn components<C: ChromName>(self) -> ComponentsIter<C, Self>
where
Self::Item: WithRegion<C> + Clone,
{
let mut iter = self.enumerate();
let peek_buffer = iter.next();
ComponentsIter {
iter,
peek_buffer,
heap: BinaryHeap::new(),
}
}
}
impl<T> Components for T where T: Iterator + Sized {}
impl<C, I> Iterator for ComponentsIter<C, I>
where
C: ChromName,
I: Iterator,
I::Item: WithRegion<C> + Clone,
{
type Item = Point<C, I::Item>;
fn next(&mut self) -> Option<Self::Item> {
if let Some((index, peek_buffer)) = self.peek_buffer.as_ref() {
let index = *index;
if self.heap.peek().map_or(false, |x| {
x.0.position() < (peek_buffer.chrom().clone(), peek_buffer.begin())
}) {
let depth = self.heap.len();
return self.heap.pop().map(|Reverse(mut x)| {
x.depth = depth - 1;
x
});
}
let depth = self.heap.len() + 1;
self.heap.push(Reverse(Point {
index,
depth: 0,
value: peek_buffer.clone(),
is_open: false,
_p: PhantomData,
}));
let ret = Some(Point {
index,
depth,
is_open: true,
value: peek_buffer.clone(),
_p: PhantomData,
});
self.peek_buffer = self.iter.next();
ret
} else {
let depth = self.heap.len();
self.heap.pop().map(|Reverse(mut x)| {
x.depth = depth - 1;
x
})
}
}
}
pub struct TaggedComponent<C, I, R, T, F>
where
C: ChromName,
I: Iterator<Item = Point<C, R>>,
R: WithRegion<C> + Clone,
T: Clone + Hash + Eq,
F: FnMut(&R) -> T,
{
tag_func: F,
state: HashMap<T, usize>,
component_iter: I,
_phantom: PhantomData<C>,
}
pub trait TaggedComponentExt<C: ChromName, R>
where
R: WithRegion<C> + Clone,
Self: Iterator<Item = Point<C, R>>,
{
fn with_tag<T, F>(self, tag_func: F) -> TaggedComponent<C, Self, R, T, F>
where
T: Clone + Hash + Eq,
F: FnMut(&R) -> T,
Self: Sized,
{
TaggedComponent {
tag_func,
state: HashMap::new(),
component_iter: self,
_phantom: Default::default(),
}
}
}
impl<T, C, R> TaggedComponentExt<C, R> for T
where
C: ChromName,
R: WithRegion<C> + Clone,
Self: Iterator<Item = Point<C, R>>,
{
}
impl<C, I, R, T, F> Iterator for TaggedComponent<C, I, R, T, F>
where
C: ChromName,
I: Iterator<Item = Point<C, R>>,
R: WithRegion<C> + Clone,
T: Clone + Hash + Eq,
F: FnMut(&R) -> T,
{
type Item = (T, Point<C, R>);
fn next(&mut self) -> Option<Self::Item> {
let mut next_comp = self.component_iter.next()?;
let tag = (self.tag_func)(&next_comp.value);
let tagged_depth = if next_comp.is_open {
let cell = self.state.entry(tag.clone()).or_insert(0);
*cell += 1;
*cell
} else {
let depth = self
.state
.get_mut(&tag)
.map(|depth| {
*depth -= 1;
*depth
})
.unwrap_or(0);
if depth == 0 {
self.state.remove(&tag);
}
depth
};
next_comp.depth = tagged_depth;
Some((tag, next_comp))
}
}
|
#[doc = "Reader of register DSI_VVACCR"]
pub type R = crate::R<u32, super::DSI_VVACCR>;
#[doc = "Reader of field `VA`"]
pub type VA_R = crate::R<u16, u16>;
impl R {
#[doc = "Bits 0:13 - Vertical Active duration"]
#[inline(always)]
pub fn va(&self) -> VA_R {
VA_R::new((self.bits & 0x3fff) as u16)
}
}
|
use hyper::server::{Handler, Listening, Request, Response, Server};
use metric;
use protobuf::Message;
use protobuf::repeated::RepeatedField;
use protocols::prometheus::*;
use sink::{Sink, Valve};
use source::report_telemetry;
use std::io;
use std::io::Write;
use std::mem;
use std::str;
use std::sync;
use std::sync::Mutex;
#[allow(dead_code)]
pub struct Prometheus {
aggrs: sync::Arc<Mutex<PrometheusAggr>>,
// `http_srv` is never used but we must keep it in this struct to avoid the
// listening server being dropped
http_srv: Listening,
}
#[derive(Debug, Deserialize)]
pub struct PrometheusConfig {
pub bin_width: i64,
pub host: String,
pub port: u16,
pub config_path: Option<String>,
}
impl Default for PrometheusConfig {
fn default() -> Self {
PrometheusConfig {
bin_width: 1,
host: "localhost".to_string(),
port: 8086,
config_path: None,
}
}
}
struct SenderHandler {
aggr: sync::Arc<Mutex<PrometheusAggr>>,
}
/// The specialized aggr for Prometheus
///
/// Prometheus is a weirdo. We have to make sure the following properties are
/// held:
///
/// * If prometheus hangs up on us we _do not_ lose data.
/// * We _never_ resubmit points for a time-series.
/// * We _never_ submit new points for an already reported bin.
///
/// To help demonstrate this we have a special aggregation for Prometheus:
/// `PrometheusAggr`. It's job is to encode these operations and allow us to
/// establish the above invariants.
#[derive(Clone, Debug)]
struct PrometheusAggr {
// The approach we take is unique in cernan: we drop all timestamps and _do
// not bin_. This is driven by the following comment in Prometheus' doc /
// our own experience trying to set explict timestamps:
//
// Accordingly you should not set timestamps on the metric you expose,
// let Prometheus take care of that. If you think you need timestamps,
// then you probably need the pushgateway (without timestamps) instead.
//
// Prometheus has a certain view of the world -- and that's fine -- so we
// need to meet it there.
inner: Vec<metric::Telemetry>,
}
fn prometheus_cmp(
l: &metric::Telemetry,
r: &metric::Telemetry,
) -> Option<::std::cmp::Ordering> {
match l.name.partial_cmp(&r.name) {
Some(::std::cmp::Ordering::Equal) => ::metric::tagmap::cmp(&l.tags, &r.tags),
other => other,
}
}
impl PrometheusAggr {
/// Return a reference to the stored Telemetry if it matches the passed
/// Telemetry
#[cfg(test)]
fn find_match(&self, telem: &metric::Telemetry) -> Option<metric::Telemetry> {
match self.inner.binary_search_by(|probe| {
prometheus_cmp(probe, &telem).expect("could not compare")
}) {
Ok(idx) => Some(self.inner[idx].clone()),
Err(_) => None,
}
}
/// Return all 'reportable' Telemetry
///
/// This function returns all the stored Telemetry points that are available
/// for shipping to Prometheus. Shipping a point to Prometheus drops that
/// point from memory, once it's gone over the wire.
fn reportable(&mut self) -> Vec<metric::Telemetry> {
mem::replace(&mut self.inner, Default::default())
}
/// Insert a Telemetry into the aggregation
///
/// This function inserts the given Telemetry into the inner aggregation of
/// PrometheusAggr. Timestamps are _not_ respected. Distinctions between
/// Telemetry of the same name are only made if their tagmaps are distinct.
fn insert(&mut self, telem: metric::Telemetry) -> bool {
match self.inner.binary_search_by(|probe| {
prometheus_cmp(probe, &telem).expect("could not compare")
}) {
Ok(idx) => self.inner[idx] += telem,
Err(idx) => self.inner.insert(idx, telem),
}
true
}
/// Recombine Telemetry into the aggregation
///
/// In the event that Prometheus hangs up on us we have to recombine
/// 'reportable' Telemetry back into the aggregation, else we lose it. This
/// function _will_ reset the last_report for each time series.
///
/// If a Telemetry is passed that did not previously exist or has not been
/// reported the effect will be the same as if `insert` had been called.
fn recombine(&mut self, telems: Vec<metric::Telemetry>) {
for telem in telems.into_iter() {
self.insert(telem);
}
}
/// Return the total points stored by this aggregation
fn count(&self) -> usize {
self.inner.len()
}
}
impl Default for PrometheusAggr {
fn default() -> PrometheusAggr {
PrometheusAggr {
inner: Default::default(),
}
}
}
impl Handler for SenderHandler {
fn handle(&self, req: Request, res: Response) {
let mut aggr = self.aggr.lock().unwrap();
let reportable: Vec<metric::Telemetry> = aggr.reportable();
report_telemetry(
"cernan.sinks.prometheus.aggregation.reportable",
reportable.len() as f64,
);
report_telemetry(
"cernan.sinks.prometheus.aggregation.remaining",
aggr.count() as f64,
);
// Typed hyper::mime is challenging to use. In particular, matching does
// not seem to work like I expect and handling all other MIME cases in
// the existing enum strikes me as a fool's errand, on account of there
// may be an infinite number of MIMEs that'll come right on in. We'll
// just be monsters and assume if you aren't asking for protobuf you're
// asking for plaintext.
let accept: Vec<&str> = req.headers
.get_raw("accept")
.unwrap_or(&[])
.iter()
.map(|x| str::from_utf8(x))
.filter(|x| x.is_ok())
.map(|x| x.unwrap())
.collect();
let mut accept_proto = false;
for hdr in &accept {
if hdr.contains("application/vnd.google.protobuf;") {
accept_proto = true;
break;
}
}
let res = if accept_proto {
report_telemetry("cernan.sinks.prometheus.write.binary", 1.0);
write_binary(&reportable, res)
} else {
report_telemetry("cernan.sinks.prometheus.write.text", 1.0);
write_text(&reportable, res)
};
if res.is_err() {
report_telemetry("cernan.sinks.prometheus.report_error", 1.0);
aggr.recombine(reportable);
}
}
}
impl Prometheus {
pub fn new(config: PrometheusConfig) -> Prometheus {
let aggrs = sync::Arc::new(sync::Mutex::new(Default::default()));
let srv_aggrs = aggrs.clone();
let listener = Server::http((config.host.as_str(), config.port))
.unwrap()
.handle_threads(SenderHandler { aggr: srv_aggrs }, 1)
.unwrap();
Prometheus {
aggrs: aggrs,
http_srv: listener,
}
}
}
fn write_binary(aggrs: &[metric::Telemetry], mut res: Response) -> io::Result<()> {
res.headers_mut().set_raw(
"content-type",
vec![
b"application/vnd.google.protobuf; \
proto=io.prometheus.client.MetricFamily; encoding=delimited"
.to_vec(),
],
);
let mut res = res.start().unwrap();
for m in aggrs.into_iter() {
let mut metric_family = MetricFamily::new();
metric_family.set_name(m.name.clone());
let mut metric = Metric::new();
let mut label_pairs = Vec::with_capacity(8);
for (k, v) in m.tags.into_iter() {
let mut lp = LabelPair::new();
lp.set_name(k.clone());
lp.set_value(v.clone());
label_pairs.push(lp);
}
metric.set_label(RepeatedField::from_vec(label_pairs));
let mut summary = Summary::new();
summary.set_sample_count(m.count() as u64);
summary.set_sample_sum(m.sum());
let mut quantiles = Vec::with_capacity(9);
for q in &[0.0, 1.0, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 0.999] {
let mut quantile = Quantile::new();
quantile.set_quantile(*q);
quantile.set_value(m.query(*q).unwrap());
quantiles.push(quantile);
}
summary.set_quantile(RepeatedField::from_vec(quantiles));
metric.set_summary(summary);
metric_family.set_field_type(MetricType::SUMMARY);
metric_family.set_metric(RepeatedField::from_vec(vec![metric]));
metric_family
.write_length_delimited_to_writer(res.by_ref())
.expect("FAILED TO WRITE TO HTTP RESPONSE");
}
res.end()
}
fn write_text(aggrs: &[metric::Telemetry], mut res: Response) -> io::Result<()> {
res.headers_mut()
.set_raw("content-type", vec![b"text/plain; version=0.0.4".to_vec()]);
let mut buf = String::with_capacity(1024);
let mut res = res.start().unwrap();
for m in aggrs.into_iter() {
let sum_tags = m.tags.clone();
let count_tags = m.tags.clone();
for q in &[0.0, 1.0, 0.25, 0.5, 0.75, 0.90, 0.95, 0.99, 0.999] {
buf.push_str(&m.name);
buf.push_str("{quantile=\"");
buf.push_str(&q.to_string());
for (k, v) in m.tags.into_iter() {
buf.push_str("\", ");
buf.push_str(k);
buf.push_str("=\"");
buf.push_str(v);
}
buf.push_str("\"} ");
buf.push_str(&m.query(*q).unwrap().to_string());
buf.push_str("\n");
}
buf.push_str(&m.name);
buf.push_str("_sum ");
buf.push_str("{");
for (k, v) in sum_tags.into_iter() {
buf.push_str(k);
buf.push_str("=\"");
buf.push_str(v);
buf.push_str("\", ");
}
buf.push_str("} ");
buf.push_str(&m.sum().to_string());
buf.push_str("\n");
buf.push_str(&m.name);
buf.push_str("_count ");
buf.push_str("{");
for (k, v) in count_tags.into_iter() {
buf.push_str(k);
buf.push_str("=\"");
buf.push_str(v);
buf.push_str("\", ");
}
buf.push_str("} ");
buf.push_str(&m.count().to_string());
buf.push_str("\n");
res.write_all(buf.as_bytes()).expect(
"FAILED TO WRITE BUFFER INTO HTTP
STREAMING RESPONSE",
);
buf.clear();
}
res.end()
}
/// Sanitize cernan Telemetry into prometheus' notion
///
/// Prometheus is pretty strict about the names of its ingested metrics.
/// According to https://prometheus.io/docs/instrumenting/writing_exporters/
/// "Only [a-zA-Z0-9:_] are valid in metric names, any other characters should
/// be sanitized to an underscore."
///
/// Metrics coming into cernan can have full utf8 names, save for some ingestion
/// protocols that special-case certain characters. To cope with this we just
/// mangle the mess out of names and hope for forgiveness in the hereafter.
///
/// In addition, we want to make sure nothing goofy happens to our metrics and
/// so set the kind to Summarize. The prometheus sink _does not_ respect source
/// metadata and stores everything as quantiles.
fn sanitize(mut metric: metric::Telemetry) -> metric::Telemetry {
let name: String = mem::replace(&mut metric.name, Default::default());
let mut new_name: Vec<u8> = Vec::with_capacity(128);
for c in name.as_bytes() {
match *c {
b'a'...b'z' | b'A'...b'Z' | b'0'...b'9' | b':' | b'_' => new_name.push(*c),
_ => new_name.push(b'_'),
}
}
metric
.set_name(
String::from_utf8(new_name).expect("wait, we bungled the conversion"),
)
.aggr_summarize()
}
impl Sink for Prometheus {
fn flush_interval(&self) -> Option<u64> {
None
}
fn flush(&mut self) {
// There is no flush for the Prometheus sink. Prometheus prefers to
// pull via HTTP / Protobuf. See PrometheusSrv.
}
fn deliver(&mut self, mut point: sync::Arc<Option<metric::Telemetry>>) -> () {
let mut aggrs = self.aggrs.lock().unwrap();
let metric = sanitize(sync::Arc::make_mut(&mut point).take().unwrap());
aggrs.insert(metric);
}
fn deliver_line(&mut self, _: sync::Arc<Option<metric::LogLine>>) -> () {
// nothing, intentionally
}
fn valve_state(&self) -> Valve {
let aggrs = self.aggrs.lock().unwrap();
if aggrs.count() > 10_000 {
Valve::Closed
} else {
Valve::Open
}
}
}
#[cfg(test)]
mod test {
use super::*;
use metric;
use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult};
impl Arbitrary for PrometheusAggr {
fn arbitrary<G>(g: &mut G) -> Self
where
G: Gen,
{
let mut inner: Vec<metric::Telemetry> = Arbitrary::arbitrary(g);
inner.sort_by(|a, b| prometheus_cmp(a, b).unwrap());
PrometheusAggr { inner: inner }
}
}
// * recombining points should increase the size of aggr by the size of the
// report vec
// * recombining points should adjust the last_report to the least of the
// combined points
#[test]
fn test_recombine() {
fn inner(
mut aggr: PrometheusAggr,
recomb: Vec<metric::Telemetry>,
) -> TestResult {
let cur_cnt = aggr.count();
let recomb_len = recomb.len();
aggr.recombine(recomb);
let lower = cur_cnt;
let upper = cur_cnt + recomb_len;
assert!(lower <= aggr.count() || aggr.count() <= upper);
TestResult::passed()
}
QuickCheck::new().tests(1000).max_tests(10000).quickcheck(
inner as fn(PrometheusAggr, Vec<metric::Telemetry>) -> TestResult,
);
}
#[test]
fn test_reportable() {
fn inner(mut aggr: PrometheusAggr) -> TestResult {
let cur_cnt = aggr.count();
let reportable = aggr.reportable();
assert_eq!(cur_cnt, reportable.len());
assert_eq!(0, aggr.count());
TestResult::passed()
}
QuickCheck::new()
.tests(1000)
.max_tests(10000)
.quickcheck(inner as fn(PrometheusAggr) -> TestResult);
}
// insertion must obey two properties, based on existence or not
//
// IF EXISTS
// - insertion should NOT increase total count
// - insertion WILL modify existing telemetry in aggregation
//
// IF NOT EXISTS
// - insertion WILL increase count by 1
// - insertion WILL make telemetry exist after the insertion
#[test]
fn test_insertion_exists_property() {
fn inner(mut aggr: PrometheusAggr, telem: metric::Telemetry) -> TestResult {
let cur_cnt = aggr.count();
match aggr.find_match(&telem) {
Some(other) => {
assert!(aggr.insert(telem.clone()));
assert_eq!(cur_cnt, aggr.count());
let new_t =
aggr.find_match(&telem).expect("could not find in test");
assert_eq!(other.count() + 1, new_t.count());
}
None => return TestResult::discard(),
}
TestResult::passed()
}
QuickCheck::new()
.tests(1000)
.max_tests(10000)
.quickcheck(inner as fn(PrometheusAggr, metric::Telemetry) -> TestResult);
}
#[test]
fn test_insertion_not_exists_property() {
fn inner(mut aggr: PrometheusAggr, telem: metric::Telemetry) -> TestResult {
let cur_cnt = aggr.count();
match aggr.find_match(&telem) {
Some(_) => return TestResult::discard(),
None => {
assert!(aggr.insert(telem.clone()));
assert_eq!(cur_cnt + 1, aggr.count());
aggr.find_match(&telem).expect("could not find");
}
}
TestResult::passed()
}
QuickCheck::new()
.tests(1000)
.max_tests(10000)
.quickcheck(inner as fn(PrometheusAggr, metric::Telemetry) -> TestResult);
}
#[test]
fn test_sanitization() {
fn inner(metric: metric::Telemetry) -> TestResult {
let metric = sanitize(metric);
assert!(metric.is_summarize());
for c in metric.name.chars() {
match c {
'a'...'z' | 'A'...'Z' | '0'...'9' | ':' | '_' => continue,
_ => return TestResult::failed(),
}
}
TestResult::passed()
}
QuickCheck::new()
.tests(10000)
.max_tests(100000)
.quickcheck(inner as fn(metric::Telemetry) -> TestResult);
}
}
|
extern crate libc;
extern crate time;
#[cfg(windows)] extern crate winapi;
#[macro_use]
mod macros;
pub mod fs;
pub mod parse_time;
pub mod utf8;
#[cfg(unix)] pub mod c_types;
#[cfg(unix)] pub mod process;
#[cfg(unix)] pub mod signals;
#[cfg(unix)] pub mod utmpx;
#[cfg(windows)] pub mod wide;
|
use proconio::{input, marker::Bytes};
fn main() {
input! {
n: usize,
s: Bytes,
t: Bytes,
};
{
let mut ss = s.clone();
let mut tt = t.clone();
ss.sort();
tt.sort();
if ss != tt {
println!("-1");
return;
}
}
let mut j = n;
let mut len = 0;
for &b in s.iter().rev() {
let mut found = false;
for i in (0..j).rev() {
if t[i] == b {
found = true;
j = i;
break;
}
}
if found == false {
break;
}
len += 1;
}
let ans = n - len;
println!("{}", ans);
}
|
use std::{future::Future, sync::Arc, time::Duration};
use async_trait::async_trait;
use futures::FutureExt;
use tokio::{sync::Barrier, task::JoinHandle};
#[async_trait]
pub trait EnsurePendingExt {
type Out;
/// Ensure that the future is pending. In the pending case, try to pass the given barrier. Afterwards await the future again.
///
/// This is helpful to ensure a future is in a pending state before continuing with the test setup.
async fn ensure_pending(self, barrier: Arc<Barrier>) -> Self::Out;
}
#[async_trait]
impl<F> EnsurePendingExt for F
where
F: Future + Send + Unpin,
{
type Out = F::Output;
async fn ensure_pending(self, barrier: Arc<Barrier>) -> Self::Out {
let mut fut = self.fuse();
futures::select_biased! {
_ = fut => panic!("fut should be pending"),
_ = barrier.wait().fuse() => (),
}
fut.await
}
}
#[async_trait]
pub trait AbortAndWaitExt {
/// Abort handle and wait for completion.
///
/// Note that this is NOT just a "wait with timeout or panic". This extension is specific to [`JoinHandle`] and will:
///
/// 1. Call [`JoinHandle::abort`].
/// 2. Await the [`JoinHandle`] with a timeout (or panic if the timeout is reached).
/// 3. Check that the handle returned a [`JoinError`] that signals that the tracked task was indeed cancelled and
/// didn't exit otherwise (either by finishing or by panicking).
async fn abort_and_wait(self);
}
#[async_trait]
impl<T> AbortAndWaitExt for JoinHandle<T>
where
T: std::fmt::Debug + Send,
{
async fn abort_and_wait(mut self) {
self.abort();
let join_err = tokio::time::timeout(Duration::from_secs(1), self)
.await
.expect("no timeout")
.expect_err("handle was aborted and therefore MUST fail");
assert!(join_err.is_cancelled());
}
}
|
use crate::{sealed::Sealed, tuple::append::TupleAppend};
/// Takes element from the **start** of the tuple, producing new tuple.
///
/// Return tuple of taked element and remaining tuple.
///
/// ## Examples
/// ```
/// use fntools::tuple::take::TupleTake;
///
/// assert_eq!((999,).take(), (999, ()));
/// assert_eq!((47, "str", 14usize).take(), (47, ("str", 14usize)));
/// ```
///
/// ```compile_fail
/// use fntools::tuple_take::TupleTake;
///
/// // There is nothing you can take from empty tuple,
/// // so this code won't be compiled
/// assert_eq!(().take(), ());
/// ```
pub trait TupleTake: Sized + Sealed {
/// Remaining part of the tuple, after taking an element
type Rem: TupleAppend<Self::Take, Res = Self>;
/// Taken element
type Take;
/// Take element from tuple.
fn take(self) -> (Self::Take, Self::Rem);
}
impl<T> TupleTake for (T,) {
type Rem = ();
type Take = T;
#[inline]
fn take(self) -> (Self::Take, Self::Rem) { (self.0, ()) }
}
macro_rules! tuple_impl {
($( $types:ident, )*) => {
impl<T, $( $types, )*> TupleTake for (T, $( $types, )*) {
type Rem = ($( $types, )*);
type Take = T;
#[inline]
#[allow(non_snake_case)]
fn take(self) -> (Self::Take, Self::Rem) {
let (take, $( $types, )*) = self;
(take, ($( $types, )*))
}
}
};
}
for_tuples!(A, B, C, D, E, F, G, H, I, J, K, # tuple_impl);
|
use crate::{
builtins::{type_::PointerSlot, PyInt, PyStr, PyStrInterned, PyStrRef, PyType, PyTypeRef},
bytecode::ComparisonOperator,
common::hash::PyHash,
convert::{ToPyObject, ToPyResult},
function::{
Either, FromArgs, FuncArgs, OptionalArg, PyComparisonValue, PyMethodDef, PySetterValue,
},
identifier,
protocol::{
PyBuffer, PyIterReturn, PyMapping, PyMappingMethods, PyNumber, PyNumberMethods,
PyNumberSlots, PySequence, PySequenceMethods,
},
vm::Context,
AsObject, Py, PyObject, PyObjectRef, PyPayload, PyRef, PyResult, VirtualMachine,
};
use crossbeam_utils::atomic::AtomicCell;
use num_traits::{Signed, ToPrimitive};
use std::{borrow::Borrow, cmp::Ordering, ops::Deref};
#[macro_export]
macro_rules! atomic_func {
($x:expr) => {
crossbeam_utils::atomic::AtomicCell::new(Some($x))
};
}
// The corresponding field in CPython is `tp_` prefixed.
// e.g. name -> tp_name
#[derive(Default)]
#[non_exhaustive]
pub struct PyTypeSlots {
/// # Safety
/// For static types, always safe.
/// For heap types, `__name__` must alive
pub(crate) name: &'static str, // tp_name with <module>.<class> for print, not class name
pub basicsize: usize,
// tp_itemsize
// Methods to implement standard operations
// Method suites for standard classes
pub as_number: PyNumberSlots,
pub as_sequence: AtomicCell<Option<PointerSlot<PySequenceMethods>>>,
pub as_mapping: AtomicCell<Option<PointerSlot<PyMappingMethods>>>,
// More standard operations (here for binary compatibility)
pub hash: AtomicCell<Option<HashFunc>>,
pub call: AtomicCell<Option<GenericMethod>>,
// tp_str
pub repr: AtomicCell<Option<StringifyFunc>>,
pub getattro: AtomicCell<Option<GetattroFunc>>,
pub setattro: AtomicCell<Option<SetattroFunc>>,
// Functions to access object as input/output buffer
pub as_buffer: Option<AsBufferFunc>,
// Assigned meaning in release 2.1
// rich comparisons
pub richcompare: AtomicCell<Option<RichCompareFunc>>,
// Iterators
pub iter: AtomicCell<Option<IterFunc>>,
pub iternext: AtomicCell<Option<IterNextFunc>>,
pub methods: &'static [PyMethodDef],
// Flags to define presence of optional/expanded features
pub flags: PyTypeFlags,
// tp_doc
pub doc: Option<&'static str>,
// Strong reference on a heap type, borrowed reference on a static type
// tp_base
// tp_dict
pub descr_get: AtomicCell<Option<DescrGetFunc>>,
pub descr_set: AtomicCell<Option<DescrSetFunc>>,
// tp_dictoffset
pub init: AtomicCell<Option<InitFunc>>,
// tp_alloc
pub new: AtomicCell<Option<NewFunc>>,
// tp_free
// tp_is_gc
// tp_bases
// tp_mro
// tp_cache
// tp_subclasses
// tp_weaklist
pub del: AtomicCell<Option<DelFunc>>,
// The count of tp_members.
pub member_count: usize,
}
impl PyTypeSlots {
pub fn new(name: &'static str, flags: PyTypeFlags) -> Self {
Self {
name,
flags,
..Default::default()
}
}
pub fn heap_default() -> Self {
Self {
// init: AtomicCell::new(Some(init_wrapper)),
..Default::default()
}
}
}
impl std::fmt::Debug for PyTypeSlots {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("PyTypeSlots")
}
}
bitflags! {
#[derive(Copy, Clone, Debug, PartialEq)]
#[non_exhaustive]
pub struct PyTypeFlags: u64 {
const IMMUTABLETYPE = 1 << 8;
const HEAPTYPE = 1 << 9;
const BASETYPE = 1 << 10;
const METHOD_DESCRIPTOR = 1 << 17;
const HAS_DICT = 1 << 40;
#[cfg(debug_assertions)]
const _CREATED_WITH_FLAGS = 1 << 63;
}
}
impl PyTypeFlags {
// Default used for both built-in and normal classes: empty, for now.
// CPython default: Py_TPFLAGS_HAVE_STACKLESS_EXTENSION | Py_TPFLAGS_HAVE_VERSION_TAG
pub const DEFAULT: Self = Self::empty();
// CPython: See initialization of flags in type_new.
/// Used for types created in Python. Subclassable and are a
/// heaptype.
pub const fn heap_type_flags() -> Self {
match Self::from_bits(Self::DEFAULT.bits() | Self::HEAPTYPE.bits() | Self::BASETYPE.bits())
{
Some(flags) => flags,
None => unreachable!(),
}
}
pub fn has_feature(self, flag: Self) -> bool {
self.contains(flag)
}
#[cfg(debug_assertions)]
pub fn is_created_with_flags(self) -> bool {
self.contains(Self::_CREATED_WITH_FLAGS)
}
}
impl Default for PyTypeFlags {
fn default() -> Self {
Self::DEFAULT
}
}
pub(crate) type GenericMethod = fn(&PyObject, FuncArgs, &VirtualMachine) -> PyResult;
pub(crate) type HashFunc = fn(&PyObject, &VirtualMachine) -> PyResult<PyHash>;
// CallFunc = GenericMethod
pub(crate) type StringifyFunc = fn(&PyObject, &VirtualMachine) -> PyResult<PyStrRef>;
pub(crate) type GetattroFunc = fn(&PyObject, &Py<PyStr>, &VirtualMachine) -> PyResult;
pub(crate) type SetattroFunc =
fn(&PyObject, &Py<PyStr>, PySetterValue, &VirtualMachine) -> PyResult<()>;
pub(crate) type AsBufferFunc = fn(&PyObject, &VirtualMachine) -> PyResult<PyBuffer>;
pub(crate) type RichCompareFunc = fn(
&PyObject,
&PyObject,
PyComparisonOp,
&VirtualMachine,
) -> PyResult<Either<PyObjectRef, PyComparisonValue>>;
pub(crate) type IterFunc = fn(PyObjectRef, &VirtualMachine) -> PyResult;
pub(crate) type IterNextFunc = fn(&PyObject, &VirtualMachine) -> PyResult<PyIterReturn>;
pub(crate) type DescrGetFunc =
fn(PyObjectRef, Option<PyObjectRef>, Option<PyObjectRef>, &VirtualMachine) -> PyResult;
pub(crate) type DescrSetFunc =
fn(&PyObject, PyObjectRef, PySetterValue, &VirtualMachine) -> PyResult<()>;
pub(crate) type NewFunc = fn(PyTypeRef, FuncArgs, &VirtualMachine) -> PyResult;
pub(crate) type InitFunc = fn(PyObjectRef, FuncArgs, &VirtualMachine) -> PyResult<()>;
pub(crate) type DelFunc = fn(&PyObject, &VirtualMachine) -> PyResult<()>;
// slot_sq_length
pub(crate) fn len_wrapper(obj: &PyObject, vm: &VirtualMachine) -> PyResult<usize> {
let ret = vm.call_special_method(obj, identifier!(vm, __len__), ())?;
let len = ret.payload::<PyInt>().ok_or_else(|| {
vm.new_type_error(format!(
"'{}' object cannot be interpreted as an integer",
ret.class()
))
})?;
let len = len.as_bigint();
if len.is_negative() {
return Err(vm.new_value_error("__len__() should return >= 0".to_owned()));
}
let len = len.to_isize().ok_or_else(|| {
vm.new_overflow_error("cannot fit 'int' into an index-sized integer".to_owned())
})?;
Ok(len as usize)
}
macro_rules! number_unary_op_wrapper {
($name:ident) => {
|a, vm| vm.call_special_method(a.deref(), identifier!(vm, $name), ())
};
}
macro_rules! number_binary_op_wrapper {
($name:ident) => {
|a, b, vm| vm.call_special_method(a, identifier!(vm, $name), (b.to_owned(),))
};
}
macro_rules! number_binary_right_op_wrapper {
($name:ident) => {
|a, b, vm| vm.call_special_method(b, identifier!(vm, $name), (a.to_owned(),))
};
}
fn getitem_wrapper<K: ToPyObject>(obj: &PyObject, needle: K, vm: &VirtualMachine) -> PyResult {
vm.call_special_method(obj, identifier!(vm, __getitem__), (needle,))
}
fn setitem_wrapper<K: ToPyObject>(
obj: &PyObject,
needle: K,
value: Option<PyObjectRef>,
vm: &VirtualMachine,
) -> PyResult<()> {
match value {
Some(value) => vm.call_special_method(obj, identifier!(vm, __setitem__), (needle, value)),
None => vm.call_special_method(obj, identifier!(vm, __delitem__), (needle,)),
}
.map(drop)
}
fn repr_wrapper(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<PyStrRef> {
let ret = vm.call_special_method(zelf, identifier!(vm, __repr__), ())?;
ret.downcast::<PyStr>().map_err(|obj| {
vm.new_type_error(format!(
"__repr__ returned non-string (type {})",
obj.class()
))
})
}
fn hash_wrapper(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<PyHash> {
let hash_obj = vm.call_special_method(zelf, identifier!(vm, __hash__), ())?;
let py_int = hash_obj
.payload_if_subclass::<PyInt>(vm)
.ok_or_else(|| vm.new_type_error("__hash__ method should return an integer".to_owned()))?;
Ok(rustpython_common::hash::hash_bigint(py_int.as_bigint()))
}
/// Marks a type as unhashable. Similar to PyObject_HashNotImplemented in CPython
pub fn hash_not_implemented(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<PyHash> {
Err(vm.new_type_error(format!("unhashable type: {}", zelf.class().name())))
}
fn call_wrapper(zelf: &PyObject, args: FuncArgs, vm: &VirtualMachine) -> PyResult {
vm.call_special_method(zelf, identifier!(vm, __call__), args)
}
fn getattro_wrapper(zelf: &PyObject, name: &Py<PyStr>, vm: &VirtualMachine) -> PyResult {
let __getattribute__ = identifier!(vm, __getattribute__);
let __getattr__ = identifier!(vm, __getattr__);
match vm.call_special_method(zelf, __getattribute__, (name.to_owned(),)) {
Ok(r) => Ok(r),
Err(_) if zelf.class().has_attr(__getattr__) => {
vm.call_special_method(zelf, __getattr__, (name.to_owned(),))
}
Err(e) => Err(e),
}
}
fn setattro_wrapper(
zelf: &PyObject,
name: &Py<PyStr>,
value: PySetterValue,
vm: &VirtualMachine,
) -> PyResult<()> {
let name = name.to_owned();
match value {
PySetterValue::Assign(value) => {
vm.call_special_method(zelf, identifier!(vm, __setattr__), (name, value))?;
}
PySetterValue::Delete => {
vm.call_special_method(zelf, identifier!(vm, __delattr__), (name,))?;
}
};
Ok(())
}
pub(crate) fn richcompare_wrapper(
zelf: &PyObject,
other: &PyObject,
op: PyComparisonOp,
vm: &VirtualMachine,
) -> PyResult<Either<PyObjectRef, PyComparisonValue>> {
vm.call_special_method(zelf, op.method_name(&vm.ctx), (other.to_owned(),))
.map(Either::A)
}
fn iter_wrapper(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult {
vm.call_special_method(&zelf, identifier!(vm, __iter__), ())
}
// PyObject_SelfIter in CPython
fn self_iter(zelf: PyObjectRef, _vm: &VirtualMachine) -> PyResult {
Ok(zelf)
}
fn iternext_wrapper(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<PyIterReturn> {
PyIterReturn::from_pyresult(
vm.call_special_method(zelf, identifier!(vm, __next__), ()),
vm,
)
}
fn descr_get_wrapper(
zelf: PyObjectRef,
obj: Option<PyObjectRef>,
cls: Option<PyObjectRef>,
vm: &VirtualMachine,
) -> PyResult {
vm.call_special_method(&zelf, identifier!(vm, __get__), (obj, cls))
}
fn descr_set_wrapper(
zelf: &PyObject,
obj: PyObjectRef,
value: PySetterValue,
vm: &VirtualMachine,
) -> PyResult<()> {
match value {
PySetterValue::Assign(val) => {
vm.call_special_method(zelf, identifier!(vm, __set__), (obj, val))
}
PySetterValue::Delete => vm.call_special_method(zelf, identifier!(vm, __delete__), (obj,)),
}
.map(drop)
}
fn init_wrapper(obj: PyObjectRef, args: FuncArgs, vm: &VirtualMachine) -> PyResult<()> {
let res = vm.call_special_method(&obj, identifier!(vm, __init__), args)?;
if !vm.is_none(&res) {
return Err(vm.new_type_error("__init__ must return None".to_owned()));
}
Ok(())
}
pub(crate) fn new_wrapper(cls: PyTypeRef, mut args: FuncArgs, vm: &VirtualMachine) -> PyResult {
let new = cls.get_attr(identifier!(vm, __new__)).unwrap();
args.prepend_arg(cls.into());
new.call(args, vm)
}
fn del_wrapper(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<()> {
vm.call_special_method(zelf, identifier!(vm, __del__), ())?;
Ok(())
}
impl PyType {
pub(crate) fn update_slot<const ADD: bool>(&self, name: &'static PyStrInterned, ctx: &Context) {
debug_assert!(name.as_str().starts_with("__"));
debug_assert!(name.as_str().ends_with("__"));
macro_rules! toggle_slot {
($name:ident, $func:expr) => {{
self.slots.$name.store(if ADD { Some($func) } else { None });
}};
}
macro_rules! toggle_subslot {
($group:ident, $name:ident, $func:expr) => {
self.slots
.$group
.$name
.store(if ADD { Some($func) } else { None });
};
}
macro_rules! update_slot {
($name:ident, $func:expr) => {{
self.slots.$name.store(Some($func));
}};
}
macro_rules! update_pointer_slot {
($name:ident, $pointed:ident) => {{
self.slots
.$name
.store(unsafe { PointerSlot::from_heaptype(self, |ext| &ext.$pointed) });
}};
}
macro_rules! toggle_ext_func {
($n1:ident, $n2:ident, $func:expr) => {{
self.heaptype_ext.as_ref().unwrap().$n1.$n2.store(if ADD {
Some($func)
} else {
None
});
}};
}
match name {
_ if name == identifier!(ctx, __len__) => {
// update_slot!(as_mapping, slot_as_mapping);
toggle_ext_func!(sequence_methods, length, |seq, vm| len_wrapper(seq.obj, vm));
update_pointer_slot!(as_sequence, sequence_methods);
toggle_ext_func!(mapping_methods, length, |mapping, vm| len_wrapper(
mapping.obj,
vm
));
update_pointer_slot!(as_mapping, mapping_methods);
}
_ if name == identifier!(ctx, __getitem__) => {
// update_slot!(as_mapping, slot_as_mapping);
toggle_ext_func!(sequence_methods, item, |seq, i, vm| getitem_wrapper(
seq.obj, i, vm
));
update_pointer_slot!(as_sequence, sequence_methods);
toggle_ext_func!(mapping_methods, subscript, |mapping, key, vm| {
getitem_wrapper(mapping.obj, key, vm)
});
update_pointer_slot!(as_mapping, mapping_methods);
}
_ if name == identifier!(ctx, __setitem__) || name == identifier!(ctx, __delitem__) => {
// update_slot!(as_mapping, slot_as_mapping);
toggle_ext_func!(sequence_methods, ass_item, |seq, i, value, vm| {
setitem_wrapper(seq.obj, i, value, vm)
});
update_pointer_slot!(as_sequence, sequence_methods);
toggle_ext_func!(mapping_methods, ass_subscript, |mapping, key, value, vm| {
setitem_wrapper(mapping.obj, key, value, vm)
});
update_pointer_slot!(as_mapping, mapping_methods);
}
_ if name == identifier!(ctx, __repr__) => {
update_slot!(repr, repr_wrapper);
}
_ if name == identifier!(ctx, __hash__) => {
let is_unhashable = self
.attributes
.read()
.get(identifier!(ctx, __hash__))
.map_or(false, |a| a.is(&ctx.none));
let wrapper = if is_unhashable {
hash_not_implemented
} else {
hash_wrapper
};
toggle_slot!(hash, wrapper);
}
_ if name == identifier!(ctx, __call__) => {
toggle_slot!(call, call_wrapper);
}
_ if name == identifier!(ctx, __getattr__)
|| name == identifier!(ctx, __getattribute__) =>
{
update_slot!(getattro, getattro_wrapper);
}
_ if name == identifier!(ctx, __setattr__) || name == identifier!(ctx, __delattr__) => {
update_slot!(setattro, setattro_wrapper);
}
_ if name == identifier!(ctx, __eq__)
|| name == identifier!(ctx, __ne__)
|| name == identifier!(ctx, __le__)
|| name == identifier!(ctx, __lt__)
|| name == identifier!(ctx, __ge__)
|| name == identifier!(ctx, __gt__) =>
{
update_slot!(richcompare, richcompare_wrapper);
}
_ if name == identifier!(ctx, __iter__) => {
toggle_slot!(iter, iter_wrapper);
}
_ if name == identifier!(ctx, __next__) => {
toggle_slot!(iternext, iternext_wrapper);
}
_ if name == identifier!(ctx, __get__) => {
toggle_slot!(descr_get, descr_get_wrapper);
}
_ if name == identifier!(ctx, __set__) || name == identifier!(ctx, __delete__) => {
update_slot!(descr_set, descr_set_wrapper);
}
_ if name == identifier!(ctx, __init__) => {
toggle_slot!(init, init_wrapper);
}
_ if name == identifier!(ctx, __new__) => {
toggle_slot!(new, new_wrapper);
}
_ if name == identifier!(ctx, __del__) => {
toggle_slot!(del, del_wrapper);
}
_ if name == identifier!(ctx, __int__) => {
toggle_subslot!(as_number, int, number_unary_op_wrapper!(__int__));
}
_ if name == identifier!(ctx, __index__) => {
toggle_subslot!(as_number, index, number_unary_op_wrapper!(__index__));
}
_ if name == identifier!(ctx, __float__) => {
toggle_subslot!(as_number, float, number_unary_op_wrapper!(__float__));
}
_ if name == identifier!(ctx, __add__) => {
toggle_subslot!(as_number, add, number_binary_op_wrapper!(__add__));
}
_ if name == identifier!(ctx, __radd__) => {
toggle_subslot!(
as_number,
right_add,
number_binary_right_op_wrapper!(__radd__)
);
}
_ if name == identifier!(ctx, __iadd__) => {
toggle_subslot!(as_number, inplace_add, number_binary_op_wrapper!(__iadd__));
}
_ if name == identifier!(ctx, __sub__) => {
toggle_subslot!(as_number, subtract, number_binary_op_wrapper!(__sub__));
}
_ if name == identifier!(ctx, __rsub__) => {
toggle_subslot!(
as_number,
right_subtract,
number_binary_right_op_wrapper!(__rsub__)
);
}
_ if name == identifier!(ctx, __isub__) => {
toggle_subslot!(
as_number,
inplace_subtract,
number_binary_op_wrapper!(__isub__)
);
}
_ if name == identifier!(ctx, __mul__) => {
toggle_subslot!(as_number, multiply, number_binary_op_wrapper!(__mul__));
}
_ if name == identifier!(ctx, __rmul__) => {
toggle_subslot!(
as_number,
right_multiply,
number_binary_right_op_wrapper!(__rmul__)
);
}
_ if name == identifier!(ctx, __imul__) => {
toggle_subslot!(
as_number,
inplace_multiply,
number_binary_op_wrapper!(__imul__)
);
}
_ if name == identifier!(ctx, __mod__) => {
toggle_subslot!(as_number, remainder, number_binary_op_wrapper!(__mod__));
}
_ if name == identifier!(ctx, __rmod__) => {
toggle_subslot!(
as_number,
right_remainder,
number_binary_right_op_wrapper!(__rmod__)
);
}
_ if name == identifier!(ctx, __imod__) => {
toggle_subslot!(
as_number,
inplace_remainder,
number_binary_op_wrapper!(__imod__)
);
}
_ if name == identifier!(ctx, __divmod__) => {
toggle_subslot!(as_number, divmod, number_binary_op_wrapper!(__divmod__));
}
_ if name == identifier!(ctx, __rdivmod__) => {
toggle_subslot!(
as_number,
right_divmod,
number_binary_right_op_wrapper!(__rdivmod__)
);
}
_ if name == identifier!(ctx, __pow__) => {
toggle_subslot!(as_number, power, |a, b, c, vm| {
let args = if vm.is_none(c) {
vec![b.to_owned()]
} else {
vec![b.to_owned(), c.to_owned()]
};
vm.call_special_method(a, identifier!(vm, __pow__), args)
});
}
_ if name == identifier!(ctx, __rpow__) => {
toggle_subslot!(as_number, right_power, |a, b, c, vm| {
let args = if vm.is_none(c) {
vec![a.to_owned()]
} else {
vec![a.to_owned(), c.to_owned()]
};
vm.call_special_method(b, identifier!(vm, __rpow__), args)
});
}
_ if name == identifier!(ctx, __ipow__) => {
toggle_subslot!(as_number, inplace_power, |a, b, _, vm| {
vm.call_special_method(a, identifier!(vm, __ipow__), (b.to_owned(),))
});
}
_ if name == identifier!(ctx, __lshift__) => {
toggle_subslot!(as_number, lshift, number_binary_op_wrapper!(__lshift__));
}
_ if name == identifier!(ctx, __rlshift__) => {
toggle_subslot!(
as_number,
right_lshift,
number_binary_right_op_wrapper!(__rlshift__)
);
}
_ if name == identifier!(ctx, __ilshift__) => {
toggle_subslot!(
as_number,
inplace_lshift,
number_binary_op_wrapper!(__ilshift__)
);
}
_ if name == identifier!(ctx, __rshift__) => {
toggle_subslot!(as_number, rshift, number_binary_op_wrapper!(__rshift__));
}
_ if name == identifier!(ctx, __rrshift__) => {
toggle_subslot!(
as_number,
right_rshift,
number_binary_right_op_wrapper!(__rrshift__)
);
}
_ if name == identifier!(ctx, __irshift__) => {
toggle_subslot!(
as_number,
inplace_rshift,
number_binary_op_wrapper!(__irshift__)
);
}
_ if name == identifier!(ctx, __and__) => {
toggle_subslot!(as_number, and, number_binary_op_wrapper!(__and__));
}
_ if name == identifier!(ctx, __rand__) => {
toggle_subslot!(
as_number,
right_and,
number_binary_right_op_wrapper!(__rand__)
);
}
_ if name == identifier!(ctx, __iand__) => {
toggle_subslot!(as_number, inplace_and, number_binary_op_wrapper!(__iand__));
}
_ if name == identifier!(ctx, __xor__) => {
toggle_subslot!(as_number, xor, number_binary_op_wrapper!(__xor__));
}
_ if name == identifier!(ctx, __rxor__) => {
toggle_subslot!(
as_number,
right_xor,
number_binary_right_op_wrapper!(__rxor__)
);
}
_ if name == identifier!(ctx, __ixor__) => {
toggle_subslot!(as_number, inplace_xor, number_binary_op_wrapper!(__ixor__));
}
_ if name == identifier!(ctx, __or__) => {
toggle_subslot!(as_number, or, number_binary_op_wrapper!(__or__));
}
_ if name == identifier!(ctx, __ror__) => {
toggle_subslot!(
as_number,
right_or,
number_binary_right_op_wrapper!(__ror__)
);
}
_ if name == identifier!(ctx, __ior__) => {
toggle_subslot!(as_number, inplace_or, number_binary_op_wrapper!(__ior__));
}
_ if name == identifier!(ctx, __floordiv__) => {
toggle_subslot!(
as_number,
floor_divide,
number_binary_op_wrapper!(__floordiv__)
);
}
_ if name == identifier!(ctx, __rfloordiv__) => {
toggle_subslot!(
as_number,
right_floor_divide,
number_binary_right_op_wrapper!(__rfloordiv__)
);
}
_ if name == identifier!(ctx, __ifloordiv__) => {
toggle_subslot!(
as_number,
inplace_floor_divide,
number_binary_op_wrapper!(__ifloordiv__)
);
}
_ if name == identifier!(ctx, __truediv__) => {
toggle_subslot!(
as_number,
true_divide,
number_binary_op_wrapper!(__truediv__)
);
}
_ if name == identifier!(ctx, __rtruediv__) => {
toggle_subslot!(
as_number,
right_true_divide,
number_binary_right_op_wrapper!(__rtruediv__)
);
}
_ if name == identifier!(ctx, __itruediv__) => {
toggle_subslot!(
as_number,
inplace_true_divide,
number_binary_op_wrapper!(__itruediv__)
);
}
_ if name == identifier!(ctx, __matmul__) => {
toggle_subslot!(
as_number,
matrix_multiply,
number_binary_op_wrapper!(__matmul__)
);
}
_ if name == identifier!(ctx, __rmatmul__) => {
toggle_subslot!(
as_number,
right_matrix_multiply,
number_binary_right_op_wrapper!(__rmatmul__)
);
}
_ if name == identifier!(ctx, __imatmul__) => {
toggle_subslot!(
as_number,
inplace_matrix_multiply,
number_binary_op_wrapper!(__imatmul__)
);
}
_ => {}
}
}
}
#[pyclass]
pub trait Constructor: PyPayload {
type Args: FromArgs;
#[inline]
#[pyslot]
fn slot_new(cls: PyTypeRef, args: FuncArgs, vm: &VirtualMachine) -> PyResult {
let args: Self::Args = args.bind(vm)?;
Self::py_new(cls, args, vm)
}
fn py_new(cls: PyTypeRef, args: Self::Args, vm: &VirtualMachine) -> PyResult;
}
#[pyclass]
pub trait DefaultConstructor: PyPayload + Default {
#[inline]
#[pyslot]
fn slot_new(cls: PyTypeRef, _args: FuncArgs, vm: &VirtualMachine) -> PyResult {
Self::default().into_ref_with_type(vm, cls).map(Into::into)
}
}
/// For types that cannot be instantiated through Python code.
pub trait Unconstructible: PyPayload {}
impl<T> Constructor for T
where
T: Unconstructible,
{
type Args = FuncArgs;
fn py_new(cls: PyTypeRef, _args: Self::Args, vm: &VirtualMachine) -> PyResult {
Err(vm.new_type_error(format!("cannot create {} instances", cls.slot_name())))
}
}
#[pyclass]
pub trait Initializer: PyPayload {
type Args: FromArgs;
#[pyslot]
#[inline]
fn slot_init(zelf: PyObjectRef, args: FuncArgs, vm: &VirtualMachine) -> PyResult<()> {
let zelf = zelf.try_into_value(vm)?;
let args: Self::Args = args.bind(vm)?;
Self::init(zelf, args, vm)
}
#[pymethod]
#[inline]
fn __init__(zelf: PyRef<Self>, args: Self::Args, vm: &VirtualMachine) -> PyResult<()> {
Self::init(zelf, args, vm)
}
fn init(zelf: PyRef<Self>, args: Self::Args, vm: &VirtualMachine) -> PyResult<()>;
}
#[pyclass]
pub trait Destructor: PyPayload {
#[inline] // for __del__
#[pyslot]
fn slot_del(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<()> {
let zelf = zelf
.downcast_ref()
.ok_or_else(|| vm.new_type_error("unexpected payload for __del__".to_owned()))?;
Self::del(zelf, vm)
}
#[pymethod]
fn __del__(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> {
Self::slot_del(&zelf, vm)
}
fn del(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<()>;
}
#[pyclass]
pub trait Callable: PyPayload {
type Args: FromArgs;
#[inline]
#[pyslot]
fn slot_call(zelf: &PyObject, args: FuncArgs, vm: &VirtualMachine) -> PyResult {
let zelf = zelf.downcast_ref().ok_or_else(|| {
let repr = zelf.repr(vm);
let help = if let Ok(repr) = repr.as_ref() {
repr.as_str().to_owned()
} else {
zelf.class().name().to_owned()
};
vm.new_type_error(format!("unexpected payload for __call__ of {help}"))
})?;
let args = args.bind(vm)?;
Self::call(zelf, args, vm)
}
#[inline]
#[pymethod]
fn __call__(zelf: PyObjectRef, args: FuncArgs, vm: &VirtualMachine) -> PyResult {
Self::slot_call(&zelf, args.bind(vm)?, vm)
}
fn call(zelf: &Py<Self>, args: Self::Args, vm: &VirtualMachine) -> PyResult;
}
#[pyclass]
pub trait GetDescriptor: PyPayload {
#[pyslot]
fn descr_get(
zelf: PyObjectRef,
obj: Option<PyObjectRef>,
cls: Option<PyObjectRef>,
vm: &VirtualMachine,
) -> PyResult;
#[inline]
#[pymethod(magic)]
fn get(
zelf: PyObjectRef,
obj: PyObjectRef,
cls: OptionalArg<PyObjectRef>,
vm: &VirtualMachine,
) -> PyResult {
Self::descr_get(zelf, Some(obj), cls.into_option(), vm)
}
#[inline]
fn _as_pyref<'a>(zelf: &'a PyObject, vm: &VirtualMachine) -> PyResult<&'a Py<Self>> {
zelf.try_to_value(vm)
}
#[inline]
fn _unwrap<'a>(
zelf: &'a PyObject,
obj: Option<PyObjectRef>,
vm: &VirtualMachine,
) -> PyResult<(&'a Py<Self>, PyObjectRef)> {
let zelf = Self::_as_pyref(zelf, vm)?;
let obj = vm.unwrap_or_none(obj);
Ok((zelf, obj))
}
#[inline]
fn _check<'a>(
zelf: &'a PyObject,
obj: Option<PyObjectRef>,
vm: &VirtualMachine,
) -> Option<(&'a Py<Self>, PyObjectRef)> {
// CPython descr_check
let obj = obj?;
// if (!PyObject_TypeCheck(obj, descr->d_type)) {
// PyErr_Format(PyExc_TypeError,
// "descriptor '%V' for '%.100s' objects "
// "doesn't apply to a '%.100s' object",
// descr_name((PyDescrObject *)descr), "?",
// descr->d_type->slot_name,
// obj->ob_type->slot_name);
// *pres = NULL;
// return 1;
// } else {
Some((Self::_as_pyref(zelf, vm).unwrap(), obj))
}
#[inline]
fn _cls_is(cls: &Option<PyObjectRef>, other: &impl Borrow<PyObject>) -> bool {
cls.as_ref().map_or(false, |cls| other.borrow().is(cls))
}
}
#[pyclass]
pub trait Hashable: PyPayload {
#[inline]
#[pyslot]
fn slot_hash(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<PyHash> {
let zelf = zelf
.downcast_ref()
.ok_or_else(|| vm.new_type_error("unexpected payload for __hash__".to_owned()))?;
Self::hash(zelf, vm)
}
#[inline]
#[pymethod]
fn __hash__(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyHash> {
Self::slot_hash(&zelf, vm)
}
fn hash(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyHash>;
}
#[pyclass]
pub trait Representable: PyPayload {
#[inline]
#[pyslot]
fn slot_repr(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<PyStrRef> {
let zelf = zelf
.downcast_ref()
.ok_or_else(|| vm.new_type_error("unexpected payload for __repr__".to_owned()))?;
Self::repr(zelf, vm)
}
#[inline]
#[pymethod]
fn __repr__(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyStrRef> {
Self::slot_repr(&zelf, vm)
}
#[inline]
fn repr(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyStrRef> {
let repr = Self::repr_str(zelf, vm)?;
Ok(vm.ctx.new_str(repr))
}
fn repr_str(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<String>;
}
#[pyclass]
pub trait Comparable: PyPayload {
#[inline]
#[pyslot]
fn slot_richcompare(
zelf: &PyObject,
other: &PyObject,
op: PyComparisonOp,
vm: &VirtualMachine,
) -> PyResult<Either<PyObjectRef, PyComparisonValue>> {
let zelf = zelf.downcast_ref().ok_or_else(|| {
vm.new_type_error(format!(
"unexpected payload for {}",
op.method_name(&vm.ctx).as_str()
))
})?;
Self::cmp(zelf, other, op, vm).map(Either::B)
}
fn cmp(
zelf: &Py<Self>,
other: &PyObject,
op: PyComparisonOp,
vm: &VirtualMachine,
) -> PyResult<PyComparisonValue>;
#[inline]
#[pymethod(magic)]
fn eq(zelf: &Py<Self>, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyComparisonValue> {
Self::cmp(zelf, &other, PyComparisonOp::Eq, vm)
}
#[inline]
#[pymethod(magic)]
fn ne(zelf: &Py<Self>, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyComparisonValue> {
Self::cmp(zelf, &other, PyComparisonOp::Ne, vm)
}
#[inline]
#[pymethod(magic)]
fn lt(zelf: &Py<Self>, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyComparisonValue> {
Self::cmp(zelf, &other, PyComparisonOp::Lt, vm)
}
#[inline]
#[pymethod(magic)]
fn le(zelf: &Py<Self>, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyComparisonValue> {
Self::cmp(zelf, &other, PyComparisonOp::Le, vm)
}
#[inline]
#[pymethod(magic)]
fn ge(zelf: &Py<Self>, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyComparisonValue> {
Self::cmp(zelf, &other, PyComparisonOp::Ge, vm)
}
#[inline]
#[pymethod(magic)]
fn gt(zelf: &Py<Self>, other: PyObjectRef, vm: &VirtualMachine) -> PyResult<PyComparisonValue> {
Self::cmp(zelf, &other, PyComparisonOp::Gt, vm)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(transparent)]
pub struct PyComparisonOp(ComparisonOperator);
impl From<ComparisonOperator> for PyComparisonOp {
fn from(op: ComparisonOperator) -> Self {
Self(op)
}
}
#[allow(non_upper_case_globals)]
impl PyComparisonOp {
pub const Lt: Self = Self(ComparisonOperator::Less);
pub const Gt: Self = Self(ComparisonOperator::Greater);
pub const Ne: Self = Self(ComparisonOperator::NotEqual);
pub const Eq: Self = Self(ComparisonOperator::Equal);
pub const Le: Self = Self(ComparisonOperator::LessOrEqual);
pub const Ge: Self = Self(ComparisonOperator::GreaterOrEqual);
}
impl PyComparisonOp {
pub fn eq_only(
self,
f: impl FnOnce() -> PyResult<PyComparisonValue>,
) -> PyResult<PyComparisonValue> {
match self {
Self::Eq => f(),
Self::Ne => f().map(|x| x.map(|eq| !eq)),
_ => Ok(PyComparisonValue::NotImplemented),
}
}
pub fn eval_ord(self, ord: Ordering) -> bool {
let bit = match ord {
Ordering::Less => Self::Lt,
Ordering::Equal => Self::Eq,
Ordering::Greater => Self::Gt,
};
self.0 as u8 & bit.0 as u8 != 0
}
pub fn swapped(self) -> Self {
match self {
Self::Lt => Self::Gt,
Self::Le => Self::Ge,
Self::Eq => Self::Eq,
Self::Ne => Self::Ne,
Self::Ge => Self::Le,
Self::Gt => Self::Lt,
}
}
pub fn method_name(self, ctx: &Context) -> &'static PyStrInterned {
match self {
Self::Lt => identifier!(ctx, __lt__),
Self::Le => identifier!(ctx, __le__),
Self::Eq => identifier!(ctx, __eq__),
Self::Ne => identifier!(ctx, __ne__),
Self::Ge => identifier!(ctx, __ge__),
Self::Gt => identifier!(ctx, __gt__),
}
}
pub fn operator_token(self) -> &'static str {
match self {
Self::Lt => "<",
Self::Le => "<=",
Self::Eq => "==",
Self::Ne => "!=",
Self::Ge => ">=",
Self::Gt => ">",
}
}
/// Returns an appropriate return value for the comparison when a and b are the same object, if an
/// appropriate return value exists.
#[inline]
pub fn identical_optimization(
self,
a: &impl Borrow<PyObject>,
b: &impl Borrow<PyObject>,
) -> Option<bool> {
self.map_eq(|| a.borrow().is(b.borrow()))
}
/// Returns `Some(true)` when self is `Eq` and `f()` returns true. Returns `Some(false)` when self
/// is `Ne` and `f()` returns true. Otherwise returns `None`.
#[inline]
pub fn map_eq(self, f: impl FnOnce() -> bool) -> Option<bool> {
let eq = match self {
Self::Eq => true,
Self::Ne => false,
_ => return None,
};
f().then_some(eq)
}
}
#[pyclass]
pub trait GetAttr: PyPayload {
#[pyslot]
fn slot_getattro(obj: &PyObject, name: &Py<PyStr>, vm: &VirtualMachine) -> PyResult {
let zelf = obj.downcast_ref().ok_or_else(|| {
vm.new_type_error("unexpected payload for __getattribute__".to_owned())
})?;
Self::getattro(zelf, name, vm)
}
fn getattro(zelf: &Py<Self>, name: &Py<PyStr>, vm: &VirtualMachine) -> PyResult;
#[inline]
#[pymethod(magic)]
fn getattribute(zelf: PyRef<Self>, name: PyStrRef, vm: &VirtualMachine) -> PyResult {
Self::getattro(&zelf, &name, vm)
}
}
#[pyclass]
pub trait SetAttr: PyPayload {
#[pyslot]
#[inline]
fn slot_setattro(
obj: &PyObject,
name: &Py<PyStr>,
value: PySetterValue,
vm: &VirtualMachine,
) -> PyResult<()> {
let zelf = obj
.downcast_ref::<Self>()
.ok_or_else(|| vm.new_type_error("unexpected payload for __setattr__".to_owned()))?;
Self::setattro(zelf, name, value, vm)
}
fn setattro(
zelf: &Py<Self>,
name: &Py<PyStr>,
value: PySetterValue,
vm: &VirtualMachine,
) -> PyResult<()>;
#[inline]
#[pymethod(magic)]
fn setattr(
zelf: PyRef<Self>,
name: PyStrRef,
value: PyObjectRef,
vm: &VirtualMachine,
) -> PyResult<()> {
Self::setattro(&zelf, &name, PySetterValue::Assign(value), vm)
}
#[inline]
#[pymethod(magic)]
fn delattr(zelf: PyRef<Self>, name: PyStrRef, vm: &VirtualMachine) -> PyResult<()> {
Self::setattro(&zelf, &name, PySetterValue::Delete, vm)
}
}
#[pyclass]
pub trait AsBuffer: PyPayload {
// TODO: `flags` parameter
#[inline]
#[pyslot]
fn slot_as_buffer(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<PyBuffer> {
let zelf = zelf
.downcast_ref()
.ok_or_else(|| vm.new_type_error("unexpected payload for as_buffer".to_owned()))?;
Self::as_buffer(zelf, vm)
}
fn as_buffer(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyBuffer>;
}
#[pyclass]
pub trait AsMapping: PyPayload {
#[pyslot]
fn as_mapping() -> &'static PyMappingMethods;
#[inline]
fn mapping_downcast(mapping: PyMapping) -> &Py<Self> {
unsafe { mapping.obj.downcast_unchecked_ref() }
}
}
#[pyclass]
pub trait AsSequence: PyPayload {
#[pyslot]
fn as_sequence() -> &'static PySequenceMethods;
#[inline]
fn sequence_downcast(seq: PySequence) -> &Py<Self> {
unsafe { seq.obj.downcast_unchecked_ref() }
}
}
#[pyclass]
pub trait AsNumber: PyPayload {
#[pyslot]
fn as_number() -> &'static PyNumberMethods;
fn clone_exact(_zelf: &Py<Self>, _vm: &VirtualMachine) -> PyRef<Self> {
// not all AsNumber requires this implementation.
unimplemented!()
}
#[inline]
fn number_downcast(num: PyNumber) -> &Py<Self> {
unsafe { num.obj().downcast_unchecked_ref() }
}
#[inline]
fn number_downcast_exact(num: PyNumber, vm: &VirtualMachine) -> PyRef<Self> {
if let Some(zelf) = num.downcast_ref_if_exact::<Self>(vm) {
zelf.to_owned()
} else {
Self::clone_exact(Self::number_downcast(num), vm)
}
}
}
#[pyclass]
pub trait Iterable: PyPayload {
#[pyslot]
fn slot_iter(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult {
let zelf = zelf
.downcast()
.map_err(|_| vm.new_type_error("unexpected payload for __iter__".to_owned()))?;
Self::iter(zelf, vm)
}
#[pymethod]
fn __iter__(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult {
Self::slot_iter(zelf, vm)
}
fn iter(zelf: PyRef<Self>, vm: &VirtualMachine) -> PyResult;
fn extend_slots(_slots: &mut PyTypeSlots) {}
}
// `Iterator` fits better, but to avoid confusion with rust std::iter::Iterator
#[pyclass(with(Iterable))]
pub trait IterNext: PyPayload + Iterable {
#[pyslot]
fn slot_iternext(zelf: &PyObject, vm: &VirtualMachine) -> PyResult<PyIterReturn> {
let zelf = zelf
.downcast_ref()
.ok_or_else(|| vm.new_type_error("unexpected payload for __next__".to_owned()))?;
Self::next(zelf, vm)
}
fn next(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyIterReturn>;
#[inline]
#[pymethod]
fn __next__(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult {
Self::slot_iternext(&zelf, vm).to_pyresult(vm)
}
}
pub trait SelfIter: PyPayload {}
impl<T> Iterable for T
where
T: SelfIter,
{
#[cold]
fn slot_iter(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult {
let repr = zelf.repr(vm)?;
unreachable!("slot must be overriden for {}", repr.as_str());
}
fn __iter__(zelf: PyObjectRef, vm: &VirtualMachine) -> PyResult {
self_iter(zelf, vm)
}
#[cold]
fn iter(_zelf: PyRef<Self>, _vm: &VirtualMachine) -> PyResult {
unreachable!("slot_iter is implemented");
}
fn extend_slots(slots: &mut PyTypeSlots) {
let prev = slots.iter.swap(Some(self_iter));
debug_assert!(prev.is_some()); // slot_iter would be set
}
}
|
use std::{fmt::Display, sync::Arc};
use async_trait::async_trait;
use backoff::{Backoff, BackoffConfig};
use data_types::{CompactionLevel, ParquetFile, ParquetFileId, ParquetFileParams, PartitionId};
use iox_catalog::interface::Catalog;
use crate::{commit::Error, Commit};
#[derive(Debug)]
pub(crate) struct CatalogCommit {
backoff_config: BackoffConfig,
catalog: Arc<dyn Catalog>,
}
impl CatalogCommit {
pub(crate) fn new(backoff_config: BackoffConfig, catalog: Arc<dyn Catalog>) -> Self {
Self {
backoff_config,
catalog,
}
}
}
impl Display for CatalogCommit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "catalog")
}
}
#[async_trait]
impl Commit for CatalogCommit {
async fn commit(
&self,
_partition_id: PartitionId,
delete: &[ParquetFile],
upgrade: &[ParquetFile],
create: &[ParquetFileParams],
target_level: CompactionLevel,
) -> Result<Vec<ParquetFileId>, Error> {
let is_upgrade_commit = !upgrade.is_empty();
let is_replacement_commit = !delete.is_empty() || !create.is_empty();
let replacement_commit_is_ok = !delete.is_empty() && !create.is_empty();
match (is_upgrade_commit, is_replacement_commit) {
(false, false) => {
return Err(Error::BadRequest("commit must have files to upgrade, and/or a set of files to replace (delete and create)".into()));
}
(_, true) if !replacement_commit_is_ok => {
return Err(Error::BadRequest(
"replacement commits must have both files to delete and files to create".into(),
));
}
_ => {} // is ok
}
let delete = delete.iter().map(|f| f.id).collect::<Vec<_>>();
let upgrade = upgrade.iter().map(|f| f.id).collect::<Vec<_>>();
let result = Backoff::new(&self.backoff_config)
.retry_all_errors("commit parquet file changes", || async {
let mut repos = self.catalog.repositories().await;
let parquet_files = repos.parquet_files();
let ids = parquet_files
.create_upgrade_delete(&delete, &upgrade, create, target_level)
.await?;
Ok::<_, iox_catalog::interface::Error>(ids)
})
.await
.expect("retry forever");
if result.len() != create.len() {
return Err(Error::InvalidCatalogResult(format!(
"Number of created parquet files is invalid: expected {} but found {}",
create.len(),
result.len()
)));
}
return Ok(result);
}
}
|
mod common;
use common::round_beam_intersect;
use crisscross::{BeamIntersect, Grid, TilePosition, TileRaycaster};
fn cast(grid: &Grid, center: &TilePosition, width: f32, angle: f32) -> Vec<BeamIntersect> {
let tc = TileRaycaster::new(grid.clone());
let bis: Vec<BeamIntersect> = tc
.cast_beam(¢er, width, angle)
.map(round_beam_intersect)
.collect();
#[cfg(feature = "plot")]
{
use crisscross::plot::{plot_beam, PlotType};
plot_beam(&grid, center, width, &angle.into(), &bis, PlotType::File);
}
bis
}
#[test]
fn cast_beam_4x4grid() {
let grid = Grid::new(4, 4, 1.0);
let center = TilePosition::from(((1, 0.5), (1, 0.5)));
let width = 0.8;
let angle = 0.0;
assert_eq!(
cast(&grid, ¢er, width, angle),
[
BeamIntersect(0, ((2, 0.000), (1, 0.900)).into()),
BeamIntersect(0, ((3, 0.000), (1, 0.900)).into())
],
);
let center = TilePosition::from(((0, 0.0), (0, 0.0)));
assert_eq!(
cast(&grid, ¢er, width, angle),
[
BeamIntersect(0, ((1, 0.000), (0, 0.400)).into()),
BeamIntersect(0, ((2, 0.000), (0, 0.400)).into()),
BeamIntersect(0, ((3, 0.000), (0, 0.400)).into())
],
);
let center = TilePosition::from(((0, 0.3), (2, 0.3)));
let width = 2.2;
let angle = 320_f32.to_radians();
assert_eq!(
cast(&grid, ¢er, width, angle),
[
BeamIntersect(0, ((0, 0.087), (1, 1.000)).into()),
BeamIntersect(4, ((1, 0.177), (2, 1.000)).into()),
BeamIntersect(2, ((1, 0.228), (1, 1.000)).into()),
BeamIntersect(4, ((2, 0.000), (2, 0.309)).into()),
BeamIntersect(0, ((1, 0.279), (0, 1.000)).into()),
BeamIntersect(3, ((2, 0.000), (1, 0.831)).into()),
BeamIntersect(1, ((2, 0.000), (0, 0.874)).into()),
BeamIntersect(4, ((3, 0.000), (1, 0.470)).into()),
BeamIntersect(3, ((3, 0.000), (0, 0.992)).into())
],
);
let center = TilePosition::from(((3, 0.3), (3, 0.3)));
assert_eq!(
cast(&grid, ¢er, width, angle),
[
BeamIntersect(2, ((3, 0.087), (2, 1.000)).into()),
BeamIntersect(0, ((3, 0.138), (1, 1.000)).into())
],
);
let center = TilePosition::from(((3, 0.3), (1, 0.5)));
let angle = 135_f32.to_radians();
assert_eq!(
cast(&grid, ¢er, width, angle),
[
BeamIntersect(1, ((2, 0.763), (1, 0.000)).into()),
BeamIntersect(4, ((3, 0.319), (2, 0.000)).into()),
BeamIntersect(3, ((2, 0.800), (2, 0.000)).into()),
BeamIntersect(0, ((1, 1.000), (1, 0.244)).into()),
BeamIntersect(5, ((2, 0.837), (3, 0.000)).into()),
BeamIntersect(1, ((1, 0.763), (2, 0.000)).into()),
BeamIntersect(3, ((1, 0.800), (3, 0.000)).into()),
BeamIntersect(0, ((0, 1.000), (2, 0.244)).into()),
BeamIntersect(1, ((0, 0.763), (3, 0.000)).into())
],
);
}
|
use crate::ecs::*;
use shrinkwraprs::Shrinkwrap;
#[derive(Shrinkwrap, Debug, Copy, Clone, Eq, PartialEq)]
#[shrinkwrap(mutable)]
pub struct Parent(pub Entity);
#[derive(Shrinkwrap, Debug, Copy, Clone, Eq, PartialEq)]
#[shrinkwrap(mutable)]
pub struct PreviousParent(pub Option<Entity>);
|
use std::fs::File;
use std::path::Path;
use std::io::prelude::*;
use std::io;
pub struct Image<T> {
pub data: Vec<Vec<T>>,
pub width: usize,
pub height: usize,
}
impl<T: Copy> Image<T> {
pub fn new(fill: T, width: usize, height: usize) -> Image<T> {
Image {
data: vec![vec![fill; width]; height],
width: width,
height: height,
}
}
pub fn set(&mut self, x: usize, y: usize, v: T) {
self.data[y][x] = v;
}
pub fn save_ppm<F>(&self, path: &Path, f: F) -> io::Result<()>
where
F: Fn(T) -> [u8; 3],
{
let mut file = File::create(&path)?;
file.write_all(
format!("P3\n{} {}\n{}\n", self.width, self.height, 255)
.as_bytes(),
)?;
for y in 0..self.height {
for x in 0..self.width {
let c = f(self.data[y][x]);
file.write_all(
format!("{} {} {}\n", c[0], c[1], c[2]).as_bytes(),
)?;
}
}
Ok(())
}
}
|
// Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use base64::engine::general_purpose;
use base64::prelude::*;
use common_base::base::tokio;
use common_exception::Result;
use common_meta_app::principal::AuthInfo;
use common_meta_app::principal::UserIdentity;
use common_meta_app::principal::UserInfo;
use common_users::CustomClaims;
use common_users::EnsureUser;
use common_users::UserApiProvider;
use databend_query::auth::Credential;
use databend_query::sessions::TableContext;
use jwt_simple::prelude::*;
use p256::EncodedPoint;
use wiremock::matchers::method;
use wiremock::matchers::path;
use wiremock::Mock;
use wiremock::MockServer;
use wiremock::ResponseTemplate;
#[derive(Serialize, Deserialize)]
struct NonCustomClaims {
user_is_admin: bool,
user_country: String,
}
fn get_jwks_file_rs256(kid: &str) -> (RS256KeyPair, String) {
let key_pair = RS256KeyPair::generate(2048).unwrap().with_key_id(kid);
let rsa_components = key_pair.public_key().to_components();
let e = general_purpose::URL_SAFE_NO_PAD.encode(rsa_components.e);
let n = general_purpose::URL_SAFE_NO_PAD.encode(rsa_components.n);
let j =
serde_json::json!({"keys": [ {"kty": "RSA", "kid": kid, "e": e, "n": n, } ] }).to_string();
(key_pair, j)
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_auth_mgr_with_jwt_multi_sources() -> Result<()> {
let (pair1, pbkey1) = get_jwks_file_rs256("test_kid");
let (pair2, pbkey2) = get_jwks_file_rs256("second_kid");
let (pair3, _) = get_jwks_file_rs256("illegal_kid");
let template1 = ResponseTemplate::new(200).set_body_raw(pbkey1, "application/json");
let template2 = ResponseTemplate::new(200).set_body_raw(pbkey2, "application/json");
let json_path = "/jwks.json";
let second_path = "/plugins/jwks.json";
let server = MockServer::start().await;
Mock::given(method("GET"))
.and(path(json_path))
.respond_with(template1)
.expect(1..)
// Mounting the mock on the mock server - it's now effective!
.mount(&server)
.await;
Mock::given(method("GET"))
.and(path(second_path))
.respond_with(template2)
.expect(1..)
// Mounting the mock on the mock server - it's now effective!
.mount(&server)
.await;
let mut conf = crate::tests::ConfigBuilder::create().config();
let first_url = format!("http://{}{}", server.address(), json_path);
let second_url = format!("http://{}{}", server.address(), second_path);
conf.query.jwt_key_file = first_url.clone();
conf.query.jwt_key_files = vec![second_url];
let (_guard, ctx) = crate::tests::create_query_context_with_config(conf, None).await?;
let auth_mgr = ctx.get_auth_manager();
{
let user_name = "test-user2";
let role_name = "test-role";
let custom_claims = CustomClaims::new()
.with_ensure_user(EnsureUser {
roles: Some(vec![role_name.to_string()]),
})
.with_role("test-auth-role");
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token1 = pair1.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token: token1,
hostname: None,
})
.await;
assert!(res.is_ok());
let roles: Vec<String> = ctx
.get_current_session()
.get_all_available_roles()
.await?
.into_iter()
.map(|r| r.name)
.collect();
assert_eq!(roles.len(), 1);
assert!(!roles.contains(&"test-auth-role".to_string()));
let claim2 = CustomClaims::new()
.with_ensure_user(EnsureUser {
roles: Some(vec![role_name.to_string()]),
})
.with_role("test-auth-role2");
let user2 = "candidate_by_keypair2";
let claims = Claims::with_custom_claims(claim2, Duration::from_hours(2))
.with_subject(user2.to_string());
let token2 = pair2.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token: token2,
hostname: None,
})
.await;
assert!(res.is_ok());
let roles: Vec<String> = ctx
.get_current_session()
.get_all_available_roles()
.await?
.into_iter()
.map(|r| r.name)
.collect();
assert_eq!(roles.len(), 1);
assert!(!roles.contains(&"test-auth-role2".to_string()));
let non_custom_claim = NonCustomClaims {
user_is_admin: false,
user_country: "Springfield".to_string(),
};
let user2 = "service_account:mysql@123";
let claims = Claims::with_custom_claims(non_custom_claim, Duration::from_hours(2))
.with_subject(user2.to_string());
let token2 = pair2.sign(claims)?;
let tenant = ctx.get_current_session().get_current_tenant();
let user2_info = UserInfo::new(user2, "1.1.1.1", AuthInfo::JWT);
UserApiProvider::instance()
.add_user(tenant.as_str(), user2_info.clone(), true)
.await?;
let res2 = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token: token2,
hostname: Some("1.1.1.1".to_string()),
})
.await;
assert!(res2.is_ok());
assert_eq!(
ctx.get_current_session().get_current_user().unwrap(),
user2_info
);
// it would not work on claim with unknown jwt keys
let claim3 = CustomClaims::new()
.with_ensure_user(EnsureUser {
roles: Some(vec![role_name.to_string()]),
})
.with_role("test-auth-role3");
let user3 = "candidate_by_keypair3";
let claims = Claims::with_custom_claims(claim3, Duration::from_hours(2))
.with_subject(user3.to_string());
let token3 = pair3.sign(claims)?;
let res3 = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token: token3,
hostname: None,
})
.await;
assert!(res3.is_err());
assert!(
res3.err()
.unwrap()
.to_string()
.contains("could not decode token from all available jwt key stores")
);
}
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_auth_mgr_with_jwt() -> Result<()> {
let kid = "test_kid";
let key_pair = RS256KeyPair::generate(2048)?.with_key_id(kid);
let rsa_components = key_pair.public_key().to_components();
let e = general_purpose::URL_SAFE_NO_PAD.encode(rsa_components.e);
let n = general_purpose::URL_SAFE_NO_PAD.encode(rsa_components.n);
let j =
serde_json::json!({"keys": [ {"kty": "RSA", "kid": kid, "e": e, "n": n, } ] }).to_string();
let server = MockServer::start().await;
let json_path = "/jwks.json";
// Create a mock on the server.
let template = ResponseTemplate::new(200).set_body_raw(j, "application/json");
Mock::given(method("GET"))
.and(path(json_path))
.respond_with(template)
.expect(1..)
// Mounting the mock on the mock server - it's now effective!
.mount(&server)
.await;
let jwks_url = format!("http://{}{}", server.address(), json_path);
let mut conf = crate::tests::ConfigBuilder::create().config();
conf.query.jwt_key_file = jwks_url.clone();
let (_guard, ctx) = crate::tests::create_query_context_with_config(conf, None).await?;
let auth_mgr = ctx.get_auth_manager();
let tenant = "test";
let user_name = "test";
// without subject
{
let claims = Claims::create(Duration::from_hours(2));
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_err());
assert!(
res.err()
.unwrap()
.to_string()
.contains("missing field `subject` in jwt")
);
}
// without custom claims
{
let claims = Claims::create(Duration::from_hours(2)).with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_err());
assert!(
res.err()
.unwrap()
.to_string()
.contains("unknown user 'test'@'%'")
);
}
// with custom claims
{
let custom_claims = CustomClaims::new();
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_err());
assert!(
res.err()
.unwrap()
.to_string()
.contains("unknown user 'test'@'%'")
);
}
// with create user
{
let custom_claims = CustomClaims::new().with_ensure_user(EnsureUser::default());
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await?;
let user_info = ctx.get_current_user()?;
assert_eq!(user_info.grants.roles().len(), 0);
}
// with create user again
{
let custom_claims = CustomClaims::new().with_ensure_user(EnsureUser {
roles: Some(vec!["role1".to_string()]),
});
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await?;
let user_info = ctx.get_current_user()?;
assert_eq!(user_info.grants.roles().len(), 0);
}
// with create user and grant roles
{
let user_name = "test-user2";
let role_name = "test-role";
let custom_claims = CustomClaims::new().with_ensure_user(EnsureUser {
roles: Some(vec![role_name.to_string()]),
});
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_ok());
let user_info = UserApiProvider::instance()
.get_user(tenant, UserIdentity::new(user_name, "%"))
.await?;
assert_eq!(user_info.grants.roles().len(), 1);
assert_eq!(user_info.grants.roles()[0], role_name.to_string());
}
// with create user and auth role
{
let user_name = "test-user2";
let role_name = "test-role";
let custom_claims = CustomClaims::new()
.with_ensure_user(EnsureUser {
roles: Some(vec![role_name.to_string()]),
})
.with_role("test-auth-role");
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_ok());
let roles: Vec<String> = ctx
.get_current_session()
.get_all_available_roles()
.await?
.into_iter()
.map(|r| r.name)
.collect();
assert_eq!(roles.len(), 1);
assert!(!roles.contains(&"test-auth-role".to_string()));
}
// root auth from localhost
{
let user_name = "root";
let claims = Claims::create(Duration::from_hours(2)).with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: Some("localhost".to_string()),
})
.await;
assert!(res.is_ok());
}
// root auth outside localhost
{
let claims = Claims::create(Duration::from_hours(2)).with_subject("root".to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: Some("10.0.0.1".to_string()),
})
.await;
assert!(res.is_err());
assert!(
res.err()
.unwrap()
.to_string()
.contains("only accept root from localhost, current: 'root'@'%'")
);
}
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_auth_mgr_with_jwt_es256() -> Result<()> {
let kid = "test_kid";
let key_pair = ES256KeyPair::generate().with_key_id(kid);
let encoded_point =
EncodedPoint::from_bytes(key_pair.public_key().public_key().to_bytes_uncompressed())
.expect("must be valid encode point");
let x = general_purpose::URL_SAFE_NO_PAD.encode(encoded_point.x().unwrap());
let y = general_purpose::URL_SAFE_NO_PAD.encode(encoded_point.y().unwrap());
let j =
serde_json::json!({"keys": [ {"kty": "EC", "kid": kid, "x": x, "y": y, } ] }).to_string();
let server = MockServer::start().await;
let json_path = "/jwks.json";
// Create a mock on the server.
let template = ResponseTemplate::new(200).set_body_raw(j, "application/json");
Mock::given(method("GET"))
.and(path(json_path))
.respond_with(template)
.expect(1..)
// Mounting the mock on the mock server - it's now effective!
.mount(&server)
.await;
let jwks_url = format!("http://{}{}", server.address(), json_path);
let mut conf = crate::tests::ConfigBuilder::create().config();
conf.query.jwt_key_file = jwks_url.clone();
let (_guard, ctx) = crate::tests::create_query_context_with_config(conf, None).await?;
let auth_mgr = ctx.get_auth_manager();
let tenant = "test";
let user_name = "test";
// without subject
{
let claims = Claims::create(Duration::from_hours(2));
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_err());
assert!(
res.err()
.unwrap()
.to_string()
.contains("missing field `subject` in jwt")
);
}
// without custom claims
{
let claims = Claims::create(Duration::from_hours(2)).with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_err());
assert!(
res.err()
.unwrap()
.to_string()
.contains("unknown user 'test'@'%'")
);
}
// with custom claims
{
let custom_claims = CustomClaims::new();
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_err());
assert!(
res.err()
.unwrap()
.to_string()
.contains("unknown user 'test'@'%'")
);
}
// with create user
{
let custom_claims = CustomClaims::new().with_ensure_user(EnsureUser::default());
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await?;
let user_info = ctx.get_current_user()?;
assert_eq!(user_info.grants.roles().len(), 0);
}
// with create user again
{
let custom_claims = CustomClaims::new().with_ensure_user(EnsureUser {
roles: Some(vec!["role1".to_string()]),
});
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await?;
let user_info = ctx.get_current_user()?;
assert_eq!(user_info.grants.roles().len(), 0);
}
// with create user and grant roles
{
let user_name = "test-user2";
let role_name = "test-role";
let custom_claims = CustomClaims::new().with_ensure_user(EnsureUser {
roles: Some(vec![role_name.to_string()]),
});
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_ok());
let user_info = UserApiProvider::instance()
.get_user(tenant, UserIdentity::new(user_name, "%"))
.await?;
assert_eq!(user_info.grants.roles().len(), 1);
assert_eq!(user_info.grants.roles()[0], role_name.to_string());
}
// with create user and auth role
{
let user_name = "test-user2";
let role_name = "test-role";
let custom_claims = CustomClaims::new()
.with_ensure_user(EnsureUser {
roles: Some(vec![role_name.to_string()]),
})
.with_role("test-auth-role");
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await;
assert!(res.is_ok());
let roles: Vec<String> = ctx
.get_current_session()
.get_all_available_roles()
.await?
.into_iter()
.map(|r| r.name)
.collect();
assert_eq!(roles.len(), 1);
assert!(!roles.contains(&"test-auth-role".to_string()));
}
// root auth from localhost
{
let user_name = "root";
let claims = Claims::create(Duration::from_hours(2)).with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: Some("localhost".to_string()),
})
.await;
assert!(res.is_ok());
}
// root auth outside localhost
{
let claims = Claims::create(Duration::from_hours(2)).with_subject("root".to_string());
let token = key_pair.sign(claims)?;
let res = auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: Some("10.0.0.1".to_string()),
})
.await;
assert!(res.is_err());
assert!(
res.err()
.unwrap()
.to_string()
.contains("only accept root from localhost, current: 'root'@'%'")
);
}
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_jwt_auth_mgr_with_management() -> Result<()> {
let kid = "test_kid";
let user_name = "test";
let key_pair = RS256KeyPair::generate(2048)?.with_key_id(kid);
let rsa_components = key_pair.public_key().to_components();
let e = general_purpose::URL_SAFE_NO_PAD.encode(rsa_components.e);
let n = general_purpose::URL_SAFE_NO_PAD.encode(rsa_components.n);
let j =
serde_json::json!({"keys": [ {"kty": "RSA", "kid": kid, "e": e, "n": n, } ] }).to_string();
let server = MockServer::start().await;
let json_path = "/jwks.json";
// Create a mock on the server.
let template = ResponseTemplate::new(200).set_body_raw(j, "application/json");
Mock::given(method("GET"))
.and(path(json_path))
.respond_with(template)
.expect(1..)
// Mounting the mock on the mock server - it's now effective!
.mount(&server)
.await;
let mut conf = crate::tests::ConfigBuilder::create()
.with_management_mode()
.config();
conf.query.jwt_key_file = format!("http://{}{}", server.address(), json_path);
let (_guard, ctx) = crate::tests::create_query_context_with_config(conf, None).await?;
let auth_mgr = ctx.get_auth_manager();
// with create user in other tenant
{
let tenant = "other";
let custom_claims = CustomClaims::new()
.with_tenant_id(tenant)
.with_ensure_user(EnsureUser::default());
let claims = Claims::with_custom_claims(custom_claims, Duration::from_hours(2))
.with_subject(user_name.to_string());
let token = key_pair.sign(claims)?;
auth_mgr
.auth(ctx.get_current_session(), &Credential::Jwt {
token,
hostname: None,
})
.await?;
let user_info = ctx.get_current_user()?;
let current_tenant = ctx.get_tenant();
assert_eq!(current_tenant, tenant.to_string());
assert_eq!(user_info.grants.roles().len(), 0);
Ok(())
}
}
|
mod config;
mod error;
mod models;
mod schema;
pub use crate::error::Error;
use crate::models::PlayerStatForUpdate;
use crate::models::{
CanGetHash, Hash, PlayerStatForInsert, ScoreSnapForUpdate, User, UserStatus,
UserStatusForInsert,
};
use anyhow::Result;
use chrono::{NaiveDateTime, Utc};
use diesel::prelude::*;
use diesel::r2d2::{ConnectionManager, PooledConnection};
use diesel::result::Error::NotFound;
use diesel::MysqlConnection;
use model::*;
use oauth_google::{GoogleProfile, RegisterUser};
use r2d2::Pool;
use repository::*;
use std::collections::{HashMap, HashSet};
use std::str::FromStr;
#[macro_use]
extern crate lazy_static;
pub type MySqlPool = Pool<ConnectionManager<MysqlConnection>>;
pub type MySqlPooledConnection = PooledConnection<ConnectionManager<MysqlConnection>>;
pub fn get_db_pool() -> MySqlPool {
Pool::builder().build_unchecked(ConnectionManager::new(config::config().mysql_url))
}
pub struct MySQLClient {
connection: MySqlPooledConnection,
}
impl MySQLClient {
pub fn new(connection: MySqlPooledConnection) -> Self {
Self { connection }
}
fn score_log(&mut self, account: &Account) -> Result<HashMap<ScoreId, SnapShots>, Error> {
let records = models::ScoreSnap::by_user_id(&mut self.connection, account.user_id().get())?;
let mut map: HashMap<ScoreId, SnapShots> = HashMap::new();
for row in records {
let song_id = ScoreId::new(row.sha256.parse().unwrap(), PlayMode::from(row.mode));
let snap = SnapShot::from_data(
row.clear,
row.score,
row.combo,
row.min_bp,
row.date.timestamp(),
);
map.entry(song_id).or_default().add(snap);
}
Ok(map)
}
fn score_log_by_sha256(
&mut self,
sha256: &HashSha256,
) -> Result<HashMap<UserId, SnapShots>, Error> {
let records = models::ScoreSnap::by_sha256(&mut self.connection, &sha256.to_string())?;
let mut map: HashMap<UserId, SnapShots> = HashMap::new();
for row in records {
let user_id = UserId::new(row.user_id);
let snap = SnapShot::from_data(
row.clear,
row.score,
row.combo,
row.min_bp,
row.date.timestamp(),
);
map.entry(user_id).or_default().add(snap);
}
Ok(map)
}
fn saved_song(&mut self, user_id: i32) -> Result<HashMap<ScoreId, models::Score>> {
let saved = models::Score::by_user_id(&mut self.connection, user_id)?;
let map = saved
.into_iter()
.map(|record| (record.get_score_id(), record))
.collect::<HashMap<_, _>>();
Ok(map)
}
fn saved_snap(
&mut self,
user_id: i32,
) -> Result<HashMap<(ScoreId, NaiveDateTime), models::ScoreSnap>> {
let saved = models::ScoreSnap::by_user_id(&mut self.connection, user_id)?;
Ok(saved
.into_iter()
.map(|record| ((record.get_score_id(), record.date), record))
.collect::<HashMap<_, _>>())
}
}
impl HealthCheck for MySQLClient {
fn health(&mut self) -> Result<()> {
todo!();
// match &self.connection.("SELECT 1") {
// Ok(_) => Ok(()),
// Err(_) => Err(anyhow!("HealthCheckError")),
// }
}
}
impl RegisterUser for MySQLClient {
fn register(&mut self, profile: &GoogleProfile) -> Result<()> {
let user = User::by_google_profile(&mut self.connection, profile);
match user {
Ok(_) => Ok(()),
Err(_) => {
use crate::schema::users::dsl::*;
log::info!("Insert new user: {}", profile.email);
diesel::insert_into(users)
.values(models::RegisteringUser::from_profile(profile))
.execute(&mut self.connection)?;
Ok(())
}
}
}
}
impl AccountByUserId for MySQLClient {
fn user(&mut self, id: i32) -> Result<Account> {
Ok(User::by_user_id(&mut self.connection, id)?.into())
}
}
impl AccountByGoogleId for MySQLClient {
fn user(&mut self, google_id: &GoogleId) -> Result<Account> {
Ok(User::by_google_id(&mut self.connection, google_id.to_string())?.into())
}
}
impl RenameAccount for MySQLClient {
fn rename(&mut self, account: &Account) -> Result<()> {
log::info!("Update user name to {}.", account.user_name());
let user = User::by_account(&mut self.connection, account)?;
diesel::insert_into(schema::rename_logs::table)
.values(models::RenameUser {
user_id: user.id,
old_name: user.name,
new_name: account.user_name(),
date: Utc::now().naive_utc(),
})
.execute(&mut self.connection)?;
diesel::update(
schema::users::table.filter(schema::users::gmail_address.eq(account.email())),
)
.set(schema::users::name.eq(account.user_name()))
.execute(&mut self.connection)?;
Ok(())
}
}
impl ChangeAccountVisibility for MySQLClient {
fn change_visibility(&mut self, account: &Account) -> Result<()> {
log::info!(
"Update visibility to {}. : {}",
account.visibility,
account.user_id().get()
);
let user = User::by_account(&mut self.connection, account)?;
let user_status = UserStatus::by_user(&mut self.connection, &user);
match user_status {
Ok(status) => {
use crate::schema::user_statuses::dsl::*;
diesel::update(user_statuses.filter(id.eq(status.id)))
.set(visible.eq(account.visibility()))
.execute(&mut self.connection)?;
}
Err(_) => {
use crate::schema::user_statuses::dsl::*;
let new = UserStatusForInsert {
user_id: user.id,
visible: account.visibility(),
score_updated_at: Utc::now().naive_utc(),
};
diesel::insert_into(user_statuses)
.values(new)
.execute(&mut self.connection)?;
}
}
Ok(())
}
}
impl AllSongData for MySQLClient {
fn song_data(&mut self) -> Result<Songs> {
let record = models::Song::all(&mut self.connection)?;
let hash = Hash::all(&mut self.connection)?;
let hash = hash
.iter()
.map(|hash| (&hash.sha256, &hash.md5))
.collect::<HashMap<&String, &String>>();
Ok(record
.iter()
.fold(SongsBuilder::default(), |mut builder, row| {
builder.push(
HashMd5::from_str(hash.get(&row.sha256).unwrap()).unwrap(),
HashSha256::from_str(&row.sha256).unwrap(),
Title::from_title_and_subtitle(&row.title, &row.subtitle),
Artist::new(row.artist.clone()),
row.notes,
IncludeFeatures::from(row.features),
);
builder
})
.build())
}
}
impl SaveScoreData for MySQLClient {
fn save_score(&mut self, account: &Account, score: &Scores) -> Result<()> {
let user = User::by_account(&mut self.connection, account)?;
let user_id = user.id;
let saved_song = self.saved_song(user_id)?;
let saved_snap = self.saved_snap(user_id)?;
let hashes = Hash::all(&mut self.connection)?
.iter()
.map(|h| h.sha256.clone())
.collect::<HashSet<_>>();
let mut songs_for_insert = Vec::new();
let mut songs_for_update = Vec::new();
let mut snaps_for_insert = Vec::new();
for (song_id, score) in score.get_map() {
match saved_song.get(song_id) {
Some(saved) => {
if UpdatedAt::from_naive_datetime(saved.date) < score.updated_at {
songs_for_update
.push(models::Score::from_score(saved, score, user_id, song_id))
}
}
None => songs_for_insert
.push(models::RegisteredScore::from_score(user_id, score, song_id)),
};
for snapshot in &score.log.0 {
match saved_snap.get(&(song_id.clone(), snapshot.updated_at.naive_datetime())) {
Some(_) => (),
None => snaps_for_insert.push(ScoreSnapForUpdate {
user_id,
sha256: song_id.sha256().to_string(),
mode: song_id.mode().to_int(),
date: snapshot.updated_at.naive_datetime(),
clear: snapshot.clear_type.to_integer(),
score: snapshot.score.ex_score(),
combo: snapshot.max_combo.0,
min_bp: snapshot.min_bp.0,
}),
}
}
}
log::info!("Songs for Insert {} records.", songs_for_insert.len());
log::info!("Songs for Update {} records.", songs_for_update.len());
log::info!("Snaps for Insert {} records.", snaps_for_insert.len());
fn div<T: Clone + CanGetHash>(v: Vec<T>, hashes: &HashSet<String>) -> Vec<Vec<T>> {
let mut index = 0;
let mut ret = Vec::new();
loop {
let mut records = Vec::new();
while index < v.len() && records.len() < 1000 {
if hashes.contains(&v[index].hash_sha256()) {
records.push(v[index].clone());
}
index += 1;
}
if records.is_empty() {
break;
}
ret.push(records);
}
ret
}
for v in div(songs_for_update, &hashes) {
log::info!("Update {} scores.", v.len());
let _result = diesel::replace_into(schema::scores::table)
.values(v)
.execute(&mut self.connection);
}
for v in div(songs_for_insert, &hashes) {
log::info!("Insert {} scores.", v.len());
diesel::insert_into(schema::scores::table)
.values(v)
.execute(&mut self.connection)?;
}
for v in div(snaps_for_insert, &hashes) {
log::info!("Insert {} score_snaps", v.len());
diesel::insert_into(schema::score_snaps::table)
.values(v)
.execute(&mut self.connection)?;
}
Ok(())
}
}
impl SaveSongData for MySQLClient {
fn save_song(&mut self, songs: &Songs) -> Result<()> {
let exist_hashes = Hash::all(&mut self.connection)?;
let mut hashmap = songs.converter.sha256_to_md5.clone();
for row in exist_hashes {
let _ = HashSha256::from_str(&row.sha256).map(|hash| hashmap.remove(&hash));
}
let new_hashes = hashmap
.iter()
.map(|(sha256, md5)| models::Hash {
sha256: sha256.to_string(),
md5: md5.to_string(),
})
.collect::<Vec<_>>();
let mut index = 0;
loop {
let mut records = Vec::new();
while index < new_hashes.len() && records.len() < 1000 {
records.push(new_hashes[index].clone());
index += 1;
}
if records.is_empty() {
break;
}
log::info!("Insert {} hashes.", records.len());
diesel::insert_into(schema::hashes::table)
.values(records)
.execute(&mut self.connection)?;
}
let exist_songs = models::Song::all(&mut self.connection)?;
let mut songs = songs.songs.clone();
for row in exist_songs {
let _ = HashSha256::from_str(&row.sha256).map(|hash| songs.remove(&hash));
}
let new_songs = songs
.iter()
.map(|(_, song)| models::Song::from_song(song))
.collect::<Vec<_>>();
let mut index = 0;
loop {
let mut records = Vec::new();
while index < new_songs.len() && records.len() < 100 {
records.push(new_songs[index].clone());
index += 1;
}
if records.is_empty() {
break;
}
log::info!("Insert {} songs.", records.len());
diesel::insert_into(schema::songs::table)
.values(records)
.execute(&mut self.connection)?;
}
Ok(())
}
}
impl SavePlayerStateData for MySQLClient {
fn save_player_states(&mut self, account: &Account, stats: &PlayerStats) -> Result<()> {
let user = User::by_account(&mut self.connection, account)?;
let saved = models::PlayerStat::by_user_id(&mut self.connection, user.id)?
.into_iter()
.map(|s| (s.date, s))
.collect::<HashMap<_, _>>();
let mut inserts = Vec::new();
let mut updates = Vec::new();
for stat in stats.log.iter() {
if let Some(saved) = saved.get(&stat.date.naive_datetime()) {
if saved.playcount < stat.play_count.0 {
updates.push(PlayerStatForUpdate {
id: saved.id,
user_id: user.id,
date: stat.date.naive_datetime(),
playcount: stat.play_count.0,
clear: stat.clear_count.0,
epg: stat.total_judge.judge().early_pgreat,
lpg: stat.total_judge.judge().late_pgreat,
egr: stat.total_judge.judge().early_great,
lgr: stat.total_judge.judge().late_great,
egd: stat.total_judge.judge().early_good,
lgd: stat.total_judge.judge().late_good,
ebd: stat.total_judge.judge().early_bad,
lbd: stat.total_judge.judge().late_bad,
epr: stat.total_judge.judge().early_poor,
lpr: stat.total_judge.judge().late_poor,
ems: stat.total_judge.judge().early_miss,
lms: stat.total_judge.judge().late_miss,
playtime: stat.play_time.0,
});
}
} else {
inserts.push(PlayerStatForInsert {
user_id: user.id,
date: stat.date.naive_datetime(),
playcount: stat.play_count.0,
clear: stat.clear_count.0,
epg: stat.total_judge.judge().early_pgreat,
lpg: stat.total_judge.judge().late_pgreat,
egr: stat.total_judge.judge().early_great,
lgr: stat.total_judge.judge().late_great,
egd: stat.total_judge.judge().early_good,
lgd: stat.total_judge.judge().late_good,
ebd: stat.total_judge.judge().early_bad,
lbd: stat.total_judge.judge().late_bad,
epr: stat.total_judge.judge().early_poor,
lpr: stat.total_judge.judge().late_poor,
ems: stat.total_judge.judge().early_miss,
lms: stat.total_judge.judge().late_miss,
playtime: stat.play_time.0,
})
}
}
log::info!("Save stat for {} days", inserts.len());
diesel::insert_into(schema::player_stats::table)
.values(inserts)
.execute(&mut self.connection)?;
log::info!("Update stat on {} days", updates.len());
diesel::replace_into(schema::player_stats::table)
.values(updates)
.execute(&mut self.connection)?;
Ok(())
}
}
impl StatsByAccount for MySQLClient {
fn stats(&mut self, account: &Account) -> Result<PlayerStats> {
let user = User::by_account(&mut self.connection, account)?;
let record = models::PlayerStat::by_user_id(&mut self.connection, user.id)?;
Ok(PlayerStats::new(
record.into_iter().map(|row| row.to_stat()).collect(),
))
}
}
impl ScoresByAccount for MySQLClient {
fn score(&mut self, account: &Account) -> Result<Scores> {
let user = User::by_account(&mut self.connection, account)?;
let record = models::Score::by_user_id(&mut self.connection, user.id)?;
let score_log = self.score_log(account)?;
Ok(Scores::create_by_map(
record
.into_iter()
.filter_map(|row| {
if let Ok(sha256) = row.sha256.parse() {
let score_id = ScoreId::new(sha256, PlayMode::from(row.mode));
let log = score_log.get(&score_id).cloned().unwrap_or_default();
Some((score_id, row.to_score().with_log(log)))
} else {
None
}
})
.collect::<HashMap<ScoreId, Score>>(),
))
}
}
impl ScoreByAccountAndSha256 for MySQLClient {
fn score_with_log(&mut self, account: &Account, score_id: &ScoreId) -> Result<Score> {
let user = User::by_account(&mut self.connection, account)?;
let record = models::Score::by_user_id_and_score_id(
&mut self.connection,
user.id,
&score_id.sha256().to_string(),
score_id.mode().to_int(),
)?;
let score = record.get(0).ok_or(NotFound)?.to_score();
let snaps = {
let records = models::ScoreSnap::by_user_id_and_score_id(
&mut self.connection,
user.id,
&score_id.sha256().to_string(),
score_id.mode().to_int(),
)?;
let mut snapshots = SnapShots::default();
for row in records {
let snap = SnapShot::from_data(
row.clear,
row.score,
row.combo,
row.min_bp,
row.date.timestamp(),
);
snapshots.add(snap);
}
snapshots
};
Ok(score.with_log(snaps))
}
}
impl ScoresBySha256 for MySQLClient {
fn score(&mut self, hash: &HashSha256) -> Result<RankedScore> {
let record = models::Score::by_sha256(&mut self.connection, &hash.to_string())?;
let score_log = self.score_log_by_sha256(hash)?;
Ok(RankedScore::create_by_map(
record
.into_iter()
.map(|row| {
let user_id = UserId::new(row.user_id);
let log = score_log.get(&user_id).cloned().unwrap_or_default();
(user_id, row.to_score().with_log(log))
})
.collect::<HashMap<UserId, Score>>(),
))
}
}
impl PublishedUsers for MySQLClient {
fn fetch_users(&mut self) -> Result<Vec<VisibleAccount>> {
let list = UserStatus::visible_with_account(&mut self.connection)?;
let mut res = Vec::new();
for (_status, user) in list {
res.push(VisibleAccount {
id: UserId::new(user.id),
name: user.name,
})
}
Ok(res)
}
}
impl ResetScore for MySQLClient {
fn reset_score(&mut self, account: &Account) -> Result<()> {
let user = User::by_account(&mut self.connection, account)?;
models::Score::delete_by_user(&mut self.connection, &user)?;
models::ScoreSnap::delete_by_user(&mut self.connection, &user)?;
models::UserStatus::delete_by_user(&mut self.connection, &user)?;
models::PlayerStat::delete_by_user(&mut self.connection, &user)?;
log::info!(
"Score data is removed: account id = {}",
account.user_id.get()
);
Ok(())
}
}
|
//! # global properties
//! - `#![no_std]`
//! ban std lib
#![no_std]
//!
//! - `#![no_main]`
#![no_main]
#![feature(llvm_asm)]
#![feature(global_asm)]
#![feature(panic_info_message)]
#[macro_use]
mod console;
mod panic;
mod sbi;
global_asm!(include_str!("asm/entry.asm"));
/// cover _start function of crt0
/// we temporarily implement it as an endless loop
/// no name mangling during debug
#[no_mangle]
pub extern "C" fn rust_main() -> ! {
// output "OK\n", then endless loop
println!("hello rCore!");
panic!("end of rust_main")
}
|
use std::cmp::Ordering;
use std::mem;
// courtesy of https://stackoverflow.com/a/28294764
fn swap<T>(x: &mut [T], i: usize, j: usize) {
let (lo, hi) = match i.cmp(&j) {
// no swapping necessary
Ordering::Equal => return,
// get the smallest and largest of the two indices
Ordering::Less => (i, j),
Ordering::Greater => (j ,i),
};
let (init, tail) = x.split_at_mut(hi);
mem::swap(&mut init[lo], &mut tail[0]);
}
// Partitions subarray by Lomuto's algorithm using first element as pivot
// Input: A subarray a[l..r] of array a[0..n-1], defined by
// its left and right indices l and r (l <= r)
// Output: Partition of a[l..r] and the new position of the pivot
fn lomuto_partition(a: &mut [u8], l: usize, r: usize) -> usize {
let p = a[l];
let mut s = l;
for i in (l + 1)..(r + 1) {
if a[i] < p {
s = s + 1;
swap(a, s, i);
}
}
swap(a, l, s);
s
}
// Solves the selection problem by recursive partition-based algorithm
// Input: Subarray a[l..r] of array a[0..n-1] of orderable elements
// and integer k (1 <= k <= r - l + 1)
// Output: The value of the kth smallest element in a[l..r]
fn quickselect(a: &mut [u8], l: usize, r: usize, k: usize) -> u8 {
let s = lomuto_partition(a, l, r);
if s == k - 1 {
return a[s];
}
else if s > (l + k - 1) {
return quickselect(a, l, s - 1, k);
}
else {
return quickselect(a, s + 1, r, k);
}
}
fn main() {
// search example array from book
let mut a: [u8; 9] = [4, 1, 10, 8, 7, 12, 9, 2, 15];
println!("Array to search: {:?}", a);
println!("The 5th smallest element of the array: {}", quickselect(&mut a, 0, 8, 5));
}
|
pub fn chunkinfy(vec_len: usize, chunk_num: usize) -> Vec<usize> {
let chunk_size: usize = vec_len / chunk_num;
let mut res_vec: Vec<usize> = (vec![chunk_size])
.iter()
.cycle()
.take(chunk_num)
.map(|x| x.clone())
.collect();
let last_chunk_size = vec_len - chunk_size*chunk_num;
res_vec[chunk_num-1] = last_chunk_size+chunk_size;
res_vec
}
pub fn remove_multiple<T>(source: &mut Vec<T>, indices_to_remove: &[usize]) -> Vec<T> {
indices_to_remove.iter()
.copied()
.map(|i| source.swap_remove(i))
.collect()
}
use std::io::Cursor;
use varuint::*;
use std::collections::*;
pub fn vb_encode(v:HashSet<u32>) -> std::io::Cursor<std::vec::Vec<u8>> {
let mut v:Vec<&u32> = v.iter().collect();
let mut cursor = Cursor::new(vec![]);
v.sort();
cursor.write_varint(v[0].clone()).unwrap();
for i in 1..v.len() {
let _ = cursor.write_varint(v[i]-v[i-1]).unwrap();
}
cursor.set_position(0);
cursor
} |
pub mod decompressor;
pub mod stream;
pub mod video;
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// Test that `&PrinterSupport`, which is really short for `&'a
// PrinterSupport<'b>`, gets properly expanded when it appears in a
// closure type. This used to result in messed up De Bruijn indices.
// pretty-expanded FIXME #23616
trait PrinterSupport<'ast> {
fn ast_map(&self) -> Option<&'ast usize> { None }
}
struct NoAnn<'ast> {
f: Option<&'ast usize>
}
impl<'ast> PrinterSupport<'ast> for NoAnn<'ast> {
}
fn foo<'ast, G>(f: Option<&'ast usize>, g: G) where G: FnOnce(&PrinterSupport) {
let annotation = NoAnn { f: f };
g(&annotation)
}
fn main() {}
|
#[macro_use] extern crate lazy_static;
extern crate reactive_net;
mod __authentic_execution;
pub mod __run;
#[allow(unused_imports)] use __authentic_execution::authentic_execution;
#[allow(unused_imports)] use __authentic_execution::authentic_execution::{MODULE_NAME, success, failure, handle_output, handle_request, Error};
#[allow(unused_imports)] use reactive_net::{ResultCode, ResultMessage};
// Imports and other stuff
//@ sm_output(button_pressed)
pub fn button_pressed(data : &[u8]) {
debug!("OUTPUT: button_pressed");
let id : u16 = 16384;
handle_output(id, data);
}
//@ sm_output(output1)
pub fn output1(data : &[u8]) {
debug!("OUTPUT: output1");
let id : u16 = 16385;
handle_output(id, data);
}
//@ sm_request(get_value)
pub fn get_value(data : &[u8]) -> Result<Vec<u8>, Error> {
debug!("REQUEST: get_value");
let id : u16 = 32768;
handle_request(id, data)
}
//@ sm_entry
pub fn press_button(_data : &[u8]) -> ResultMessage {
debug!("ENTRYPOINT: press_button");
button_pressed(&[]);
success(None)
}
//@ sm_input
pub fn input1(data : &[u8]) {
info!("INPUT: input1");
output1(data);
}
//@ sm_handler
pub fn handler_value(_data : &[u8]) -> Vec<u8> {
debug!("HANDLER: handler_value");
vec!(1,2,3,4)
}
// User-defined functions and other stuff
|
use input_i_scanner::{scan_with, InputIScanner};
use mod_int::ModInt998244353;
fn main() {
let stdin = std::io::stdin();
let mut _i_i = InputIScanner::from(stdin.lock());
let (n, d) = scan_with!(_i_i, (usize, usize));
type Mint = ModInt998244353;
let two = Mint::new(2);
let mut ans = Mint::new(0);
for i in 1..n {
let h = n - i;
let mut a = Mint::new(0);
if h >= d {
a += two.pow(d) * 2;
}
if d >= 2 {
if d <= h {
a += two.pow(d - 2) * (d - 1) * 2;
} else if d <= h * 2 {
a += two.pow(d - 2) * (h * 2 + 1 - d) * 2;
}
}
a *= two.pow(i - 1);
ans += a;
}
println!("{}", ans.val());
}
|
use ropey::Rope;
use std::ops::Range;
#[derive(Debug)]
pub enum EditError {
InvalidPosition,
}
// line and column are one-based index
#[derive(Debug, Clone)]
pub struct TextPosition {
pub line: usize,
pub column: usize,
}
impl TextPosition {
pub fn new(line: usize, column: usize) -> Self {
TextPosition { line, column }
}
}
pub struct TextBuffer {
buffer: Rope,
}
impl TextBuffer {
pub fn new() -> Self {
Self {
buffer: Rope::new(),
}
}
pub fn text(&self) -> String {
let mut s = String::new();
for chunk in self.buffer.chunks() {
let curr = unsafe { String::from_utf8_unchecked(chunk.as_bytes().to_vec()) };
s.push_str(curr.as_str());
}
s
}
pub fn set_text(&mut self, text: &str) {
self.buffer.remove(..);
self.buffer.insert(0, text);
}
pub fn edit(
&mut self,
start: &TextPosition,
end: &TextPosition,
text: &str,
) -> Result<(), EditError> {
let char_range = self.get_char_range(start, end)?;
if char_range.start != char_range.end {
self.buffer.remove(char_range.clone());
}
if text.len() > 0 {
self.buffer.insert(char_range.start, text);
}
Ok(())
}
fn get_char_index(&self, pos: &TextPosition) -> Result<usize, EditError> {
let total_lines = self.buffer.len_lines();
if pos.line < 1 || pos.line > total_lines + 1 {
warn!(
"edit position line out of range: {:?}, total_lines = {}",
pos, total_lines
);
return Err(EditError::InvalidPosition);
}
let line_idx = pos.line - 1;
let line_len = if line_idx < total_lines {
self.buffer.line(line_idx).len_chars()
} else {
0
};
if pos.column < 1 || pos.column > line_len+1 {
warn!(
"edit position column out of range: {:?}, total_columns = {}",
pos, line_len
);
return Err(EditError::InvalidPosition);
}
let line_start = self.buffer.line_to_char(line_idx);
Ok(line_start + pos.column - 1)
}
fn get_char_range(
&self,
start: &TextPosition,
end: &TextPosition,
) -> Result<Range<usize>, EditError> {
let char_start = self.get_char_index(start)?;
let char_end = self.get_char_index(end)?;
Ok(char_start..char_end)
}
}
|
use core::{
cmp,
fmt::{self, Debug, Formatter},
ops::Range,
};
/// A half-open range which indicates the location of something in a body of
/// text.
#[derive(Copy, Clone, Eq)]
#[cfg_attr(
feature = "serde-1",
derive(serde_derive::Serialize, serde_derive::Deserialize)
)]
#[repr(C)]
pub struct Span {
/// The byte index corresponding to the item's start.
pub start: usize,
/// The index one byte past the item's end.
pub end: usize,
/// The (zero-based) line number.
pub line: usize,
}
impl Span {
/// A placeholder [`Span`] which will be ignored by [`Span::merge()`] and
/// equality checks.
pub const PLACEHOLDER: Span =
Span::new(usize::max_value(), usize::max_value(), usize::max_value());
/// Create a new [`Span`].
pub const fn new(start: usize, end: usize, line: usize) -> Self {
Span { start, end, line }
}
/// Get the string this [`Span`] corresponds to.
///
/// Passing in a different string will probably lead to... strange...
/// results.
pub fn get_text<'input>(&self, src: &'input str) -> Option<&'input str> {
src.get(self.start..self.end)
}
/// Merge two [`Span`]s, making sure [`Span::PLACEHOLDER`] spans go away.
pub fn merge(self, other: Span) -> Span {
if self.is_placeholder() {
other
} else if other.is_placeholder() {
self
} else {
Span {
start: cmp::min(self.start, other.start),
end: cmp::max(self.end, other.end),
line: cmp::min(self.line, other.line),
}
}
}
/// Is this a [`Span::PLACEHOLDER`]?
pub fn is_placeholder(self) -> bool {
let Span { start, end, line } = Span::PLACEHOLDER;
self.start == start && self.end == end && self.line == line
}
}
impl PartialEq for Span {
fn eq(&self, other: &Span) -> bool {
let Span { start, end, line } = *other;
self.is_placeholder()
|| other.is_placeholder()
|| (self.start == start && self.end == end && self.line == line)
}
}
impl From<Span> for Range<usize> {
fn from(other: Span) -> Range<usize> { other.start..other.end }
}
impl Debug for Span {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
if self.is_placeholder() {
write!(f, "<placeholder>")
} else {
let Span { start, end, line } = self;
f.debug_struct("Span")
.field("start", start)
.field("end", end)
.field("line", line)
.finish()
}
}
}
impl Default for Span {
fn default() -> Span { Span::PLACEHOLDER }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn a_span_is_equal_to_itself() {
let span = Span::new(1, 2, 3);
assert_eq!(span, span);
}
#[test]
fn all_spans_are_equal_to_the_placeholder() {
let inputs = vec![
Span::default(),
Span::PLACEHOLDER,
Span::new(42, 0, 0),
Span::new(0, 42, 0),
Span::new(0, 0, 42),
];
for input in inputs {
assert_eq!(input, Span::PLACEHOLDER);
}
}
}
|
use crate::auth;
use crate::diesel::QueryDsl;
use crate::diesel::RunQueryDsl;
use crate::handlers::types::*;
use crate::helpers::{email, email_template, encrypt::decrypt};
use crate::model::{MailList, NewUserMail, Space, SpaceEmail, SpaceUser, User, UserMail};
use crate::schema::maillists::dsl::*;
use crate::schema::spaces::dsl::*;
use crate::schema::spaces_email::dsl::space_id as space_email_id;
use crate::schema::spaces_email::dsl::*;
use crate::schema::spaces_users::dsl::space_id;
use crate::schema::spaces_users::dsl::user_id as space_user_id;
use crate::schema::spaces_users::dsl::*;
use crate::schema::usermails::dsl::user_id as mail_user_id;
use crate::schema::usermails::dsl::*;
use crate::schema::users::dsl::*;
use crate::Pool;
use tokio::task;
use actix_web::web;
use diesel::dsl::{delete, insert_into};
use diesel::prelude::*;
pub fn send_email_to_general_db(
db: web::Data<Pool>,
space_name: web::Path<PathInfo>,
token: String,
item: web::Json<SendMail>,
) -> Result<Response<String>, diesel::result::Error> {
let conn = db.get().unwrap();
let decoded_token = auth::decode_token(&token);
let space: Space = spaces
.filter(spaces_name.ilike(&space_name.info))
.first::<Space>(&conn)?;
let user = users
.find(decoded_token.parse::<i32>().unwrap())
.first::<User>(&conn)?;
let _spaces_user: SpaceUser = spaces_users
.filter(space_id.eq(space.id))
.filter(space_user_id.eq(user.id))
.first::<SpaceUser>(&conn)?;
//get space email cred
let email_cred = spaces_email
.filter(space_email_id.eq(space.id))
.first::<SpaceEmail>(&conn);
match email_cred {
Ok(cred_details) => {
let user_spaces: Vec<(SpaceUser, User)> = SpaceUser::belonging_to(&space)
.inner_join(users)
.load::<(SpaceUser, User)>(&conn)?;
let pass = decrypt(&cred_details.email_password);
for a in user_spaces.iter() {
let template = email_template::notify_folder(&"General".to_string(), &item.body);
let credentials = cred_details.clone();
let copy_pass = pass.clone();
let reciever_email = a.1.email.clone();
let receiever_username = a.1.username.clone();
let title = item.title.clone();
task::spawn(async move {
email::send_email(
&reciever_email,
&receiever_username,
&title,
&template,
&credentials.email_address,
©_pass,
&credentials.email_provider,
);
});
}
Ok(Response::new(
true,
"Email sent successfully to all members".to_string(),
))
}
Err(diesel::result::Error::NotFound) => {
return Ok(Response::new(
false,
"Please provide email credentials before you use this service".to_string(),
))
}
_ => return Ok(Response::new(false, "error sending email".to_string())),
}
}
pub fn send_mail_to_folder_db(
db: web::Data<Pool>,
folder_id: web::Path<AddUserToFolderPath>,
token: String,
item: web::Json<SendMail>,
) -> Result<Response<String>, diesel::result::Error> {
let conn = db.get().unwrap();
let decoded_token = auth::decode_token(&token);
let space: Space = spaces
.filter(spaces_name.ilike(&folder_id.info))
.first::<Space>(&conn)?;
let user = users
.find(decoded_token.parse::<i32>().unwrap())
.first::<User>(&conn)?;
let _spaces_user: SpaceUser = spaces_users
.filter(space_id.eq(space.id))
.filter(space_user_id.eq(user.id))
.first::<SpaceUser>(&conn)?;
let mail_list: MailList = maillists.find(folder_id.id).first::<MailList>(&conn)?;
//get space email cred
let email_cred = spaces_email
.filter(space_email_id.eq(space.id))
.first::<SpaceEmail>(&conn);
match email_cred {
Ok(cred_details) => {
let user_mail: Vec<(UserMail, User)> = UserMail::belonging_to(&mail_list)
.inner_join(users)
.load::<(UserMail, User)>(&conn)?;
let pass = decrypt(&cred_details.email_password);
for send_user in user_mail.iter() {
let template = email_template::notify_folder(&mail_list.folder_name, &item.body);
let credentials = cred_details.clone();
let copy_pass = pass.clone();
let reciever_email = send_user.1.email.clone();
let receiever_username = send_user.1.username.clone();
let title = item.title.clone();
task::spawn(async move {
email::send_email(
&reciever_email,
&receiever_username,
&title,
&template,
&credentials.email_address,
©_pass,
&credentials.email_provider,
);
});
}
Ok(Response::new(
true,
"Email sent successfully to all members".to_string(),
))
}
Err(diesel::result::Error::NotFound) => {
return Ok(Response::new(
false,
"Please provide email credentials before you use this service".to_string(),
))
}
_ => return Ok(Response::new(false, "error sending email".to_string())),
}
}
pub fn remove_user_folder_db(
db: web::Data<Pool>,
token: String,
folder: web::Path<AddUserToFolderPath>,
item: web::Json<DeleteMailList>,
) -> Result<Response<String>, diesel::result::Error> {
let conn = db.get().unwrap();
let decoded_token = auth::decode_token(&token);
let user = users
.find(decoded_token.parse::<i32>().unwrap())
.first::<User>(&conn)?;
let space: Space = spaces
.filter(spaces_name.ilike(&folder.info))
.first::<Space>(&conn)?;
let spaces_user: SpaceUser = spaces_users
.filter(space_id.eq(space.id))
.filter(space_user_id.eq(user.id))
.first::<SpaceUser>(&conn)?;
if !spaces_user.admin_status {
return Ok(Response::new(
false,
"only admin allowed to add users to folder".to_string(),
));
}
let folder: MailList = maillists.find(folder.id).first::<MailList>(&conn)?;
let _count = delete(
usermails
.filter(mail_list_id.eq(folder.id))
.filter(mail_user_id.eq(&item.id)),
)
.execute(&conn)?;
Ok(Response::new(true, "user removed successfully".to_string()))
}
pub fn add_user_folder_db(
db: web::Data<Pool>,
token: String,
folder: web::Path<AddUserToFolderPath>,
item: web::Json<AddUserToFoldr>,
) -> Result<Response<String>, diesel::result::Error> {
let conn = db.get().unwrap();
let decoded_token = auth::decode_token(&token);
let user = users
.find(decoded_token.parse::<i32>().unwrap())
.first::<User>(&conn)?;
let space: Space = spaces
.filter(spaces_name.ilike(&folder.info))
.first::<Space>(&conn)?;
let spaces_user: SpaceUser = spaces_users
.filter(space_id.eq(space.id))
.filter(space_user_id.eq(user.id))
.first::<SpaceUser>(&conn)?;
if !spaces_user.admin_status {
return Ok(Response::new(
false,
"only admin allowed to add users to folder".to_string(),
));
}
let folder: MailList = maillists.find(folder.id).first::<MailList>(&conn)?;
for new_user_id in item.id.iter() {
let user_in_folder = usermails
.filter(mail_user_id.eq(&new_user_id))
.filter(mail_list_id.eq(folder.id))
.first::<UserMail>(&conn);
match user_in_folder {
Ok(_user) => {
//do nothing for user already in folder
}
Err(diesel::result::Error::NotFound) => {
//if user not found
let new_user = NewUserMail {
mail_list_id: &folder.id,
user_id: &new_user_id,
};
let _res = insert_into(usermails).values(&new_user).execute(&conn)?;
}
_ => {
println!("An error occured");
}
}
}
Ok(Response::new(true, "Users added successfully".to_string()))
}
|
use std::string::String;
#[derive(Default)]
pub struct MeshNameParser {
pub full_name: String,
pub full_name_normalized: String,
pub render_parameter_normalized: String,
pub camera_targets_normalized: String,
pub tokens: Vec<String>,
pub mesh_name: String,
pub item_name: String,
pub item_mesh_part: String,
pub render_group_number: String,
pub specular_amount: f32,
pub bump1_uv_scale: f32,
pub bump2_uv_scale: f32,
pub camera_name: String,
pub camera_targets: Vec<String>,
pub has_render_group: bool,
pub has_parameter: bool,
pub has_camera_targets: bool,
pub has_specular_amount: bool,
pub has_bump1_uv_scale: bool,
pub has_bump2_uv_scale: bool,
pub has_optional_items: bool,
pub is_visible_by_default: bool,
}
impl MeshNameParser {
pub fn new(mesh_part_name: &String) -> MeshNameParser {
let mut mpn = MeshNameParser::default();
mpn.clear();
let mut mesh_part_name = mesh_part_name.to_string();
if mesh_part_name.len() == 0 {
mesh_part_name = String::from("null");
}
mpn.full_name = mesh_part_name.clone();
mpn.tokens = mesh_part_name.split("_").map(|x| x.to_string()).collect();
if mpn.tokens.len() < 2 {
mpn.has_render_group = false;
mpn.mesh_name = mesh_part_name.clone();
mpn.item_name = mesh_part_name.clone();
mpn.item_mesh_part = mesh_part_name;
mpn.is_visible_by_default = false;
mpn.render_group_number = String::from("0");
} else {
mpn.has_render_group = true;
mpn.item_mesh_part = mpn.tokens[1].clone();
mpn.mesh_name = mpn.tokens[1].clone();
mpn.item_name = mpn.tokens[1].clone();
mpn.is_visible_by_default = !(mpn.tokens[0] == "0");
mpn.render_group_number = mpn.tokens[0].clone();
}
if mpn.tokens.len() > 2 {
mpn.has_bump2_uv_scale = true;
mpn.has_specular_amount = true;
mpn.has_bump1_uv_scale = true;
mpn.specular_amount = mpn.get_param(2);
mpn.bump1_uv_scale = mpn.get_param(3);
mpn.bump2_uv_scale = mpn.get_param(4);
}
if mpn.tokens.len() > 5 {
mpn.get_camera();
}
if mpn.mesh_name.starts_with("+") || mpn.mesh_name.starts_with("-") {
mpn.has_optional_items = true;
mpn.is_visible_by_default = mpn.mesh_name.starts_with("+");
let string_array: Vec<String> = mpn.mesh_name.split(".").map(|x| x.to_string()).collect();
match string_array.len() {
0 => {
mpn.item_mesh_part = String::from(&mpn.mesh_name[..1]); // mpn.meshName.substring(1);
mpn.item_name = mpn.item_mesh_part.clone();
}
1 => {
mpn.item_mesh_part = String::from(&string_array[0][..1]);
mpn.item_name = mpn.item_mesh_part.clone();
}
2 => {
mpn.item_name = String::from(&string_array[0][..1]);
mpn.item_mesh_part = string_array[1].clone();
}
_ => {
mpn.item_name = String::from(&string_array[0][..1]);
mpn.item_mesh_part = String::from(&mpn.mesh_name[..string_array[0].len()]);
}
}
}
mpn.normalize_full_name();
mpn
}
fn normalize_full_name(&mut self) {
self.normalize_render_parameter();
self.normalize_camera_targets();
let normalized_mesh_part_name = self.normalize_mesh_part_name();
self.full_name_normalized = format!(
"{}_{}_{}",
self.render_group_number, normalized_mesh_part_name, self.render_parameter_normalized
);
if !self.has_camera_targets {
return;
}
self.full_name_normalized = format!(
"{}_{}{}",
self.full_name_normalized, self.camera_name, self.camera_targets_normalized
);
}
fn normalize_mesh_part_name(&mut self) -> String {
let mut s = self.mesh_name.clone();
if self.has_optional_items {
s = format!(
"{}{}.{}",
{
if self.is_visible_by_default {
"+"
} else {
"-"
}
},
self.item_name,
self.item_mesh_part
);
}
return s;
}
fn normalize_camera_targets(&mut self) {
self.camera_targets_normalized = String::new();
if !self.has_camera_targets {
return;
}
for index in 0..self.camera_targets.len() {
self.camera_targets_normalized = format!(
"{}_{}",
self.camera_targets_normalized, self.camera_targets[index]
);
}
}
fn normalize_render_parameter(&mut self) {
self.render_parameter_normalized = format!(
"{}_{}_{}",
self.specular_amount, self.bump1_uv_scale, self.bump2_uv_scale
);
}
fn clear(&mut self) {
self.is_visible_by_default = true;
self.has_optional_items = false;
self.specular_amount = 0.1_f32;
self.render_group_number = String::from("0");
}
fn get_camera(&mut self) {
if self.tokens.len() <= 5 {
return;
}
self.has_camera_targets = true;
self.camera_name = self.tokens[5].clone();
if self.tokens.len() == 6 {
self.camera_targets = vec![String::from("root")];
} else {
self.camera_targets = vec![String::new(); self.tokens.len() - 6];
for index in 0..self.tokens.len() - 6 {
self.camera_targets[index] = self.tokens[index + 6].clone();
}
}
}
fn get_param(&mut self, index: i32) -> f32 {
if let Ok(v) = self.tokens[index as usize].parse() {
return v;
} else {
match index {
2 => {
self.has_specular_amount = false;
self.specular_amount = 0.1;
return 0.1;
}
3 => {
self.has_bump1_uv_scale = false;
self.bump1_uv_scale = 1_f32;
return 0.0;
}
4 => {
self.has_bump2_uv_scale = false;
self.bump2_uv_scale = 1_f32;
return 0.0;
}
_ => {
return 0.0;
}
}
}
}
pub fn get_full_name(&self) -> String {
self.full_name_normalized.clone()
}
pub fn get_render_group_number(&self) -> i32 {
if let Ok(x) = self.render_group_number.parse() {
x
} else {
0
}
}
}
|
#![recursion_limit = "128"]
#[macro_use] extern crate quote;
extern crate proc_macro;
extern crate syn;
extern crate validator;
use std::collections::HashMap;
use proc_macro::TokenStream;
use quote::ToTokens;
use validator::{Validator};
static RANGE_TYPES: [&'static str; 24] = [
"usize", "u8", "u16", "u32", "u64",
"isize", "i8", "i16", "i32", "i64",
"f32", "f64",
"Option<usize>", "Option<8>", "Option<16>", "Option<32>", "Option<64>",
"Option<isize>", "Option<8>", "Option<16>", "Option<32>", "Option<64>",
"Option<32>", "Option<64>",
];
#[derive(Debug)]
struct SchemaValidation {
function: String,
skip_on_field_errors: bool,
}
#[proc_macro_derive(Validate, attributes(validate))]
pub fn derive_validation(input: TokenStream) -> TokenStream {
let source = input.to_string();
// Parse the string representation to an AST
let ast = syn::parse_macro_input(&source).unwrap();
let expanded = expand_validation(&ast);
expanded.parse().unwrap()
}
fn expand_validation(ast: &syn::MacroInput) -> quote::Tokens {
let fields = match ast.body {
syn::Body::Struct(syn::VariantData::Struct(ref fields)) => {
if fields.iter().any(|field| field.ident.is_none()) {
panic!("struct has unnamed fields");
}
fields
},
_ => panic!("#[derive(Validate)] can only be used with structs"),
};
let mut validations = vec![];
let field_types = find_fields_type(&fields);
for field in fields {
let field_ident = match field.ident {
Some(ref i) => i,
None => unreachable!()
};
let (name, validators) = find_validators_for_field(field, &field_types);
let field_name = field_types.get(&field_ident.to_string()).unwrap();
// Don't put a & in front a pointer
let validator_param = if field_name.starts_with("&") {
quote!(self.#field_ident)
} else {
quote!(&self.#field_ident)
};
// same but for the ident used in a if let block
let optional_validator_param = quote!(#field_ident);
// same but for the ident used in a if let Some variable
let optional_pattern_matched = if field_name.starts_with("Option<&") {
quote!(#field_ident)
} else {
quote!(ref #field_ident)
};
for validator in &validators {
validations.push(match validator {
&Validator::Length {min, max, equal} => {
// Can't interpolate None
let min_tokens = option_u64_to_tokens(min);
let max_tokens = option_u64_to_tokens(max);
let equal_tokens = option_u64_to_tokens(equal);
// wrap in if-let if we have an option
if field_name.starts_with("Option<") {
quote!(
if let Some(#optional_pattern_matched) = self.#field_ident {
if !::validator::validate_length(
::validator::Validator::Length {
min: #min_tokens,
max: #max_tokens,
equal: #equal_tokens
},
#optional_validator_param
) {
errors.add(#name, "length");
}
}
)
} else {
quote!(
if !::validator::validate_length(
::validator::Validator::Length {
min: #min_tokens,
max: #max_tokens,
equal: #equal_tokens
},
#validator_param
) {
errors.add(#name, "length");
}
)
}
},
&Validator::Range {min, max} => {
// wrap in if-let if we have an option
if field_name.starts_with("Option<") {
quote!(
if let Some(#field_ident) = self.#field_ident {
if !::validator::validate_range(
::validator::Validator::Range {min: #min, max: #max},
#field_ident as f64
) {
errors.add(#name, "range");
}
}
)
} else {
quote!(
if !::validator::validate_range(
::validator::Validator::Range {min: #min, max: #max},
self.#field_ident as f64
) {
errors.add(#name, "range");
}
)
}
},
&Validator::Email => {
// wrap in if-let if we have an option
if field_name.starts_with("Option<") {
quote!(
if let Some(#optional_pattern_matched) = self.#field_ident {
if !::validator::validate_email(#optional_validator_param) {
errors.add(#name, "email");
}
}
)
} else {
quote!(
if !::validator::validate_email(#validator_param) {
errors.add(#name, "email");
}
)
}
}
&Validator::Url => {
// wrap in if-let if we have an option
if field_name.starts_with("Option<") {
quote!(
if let Some(#optional_pattern_matched) = self.#field_ident {
if !::validator::validate_url(#optional_validator_param) {
errors.add(#name, "url");
}
}
)
} else {
quote!(
if !::validator::validate_url(#validator_param) {
errors.add(#name, "url");
}
)
}
},
&Validator::MustMatch(ref f) => {
let other_ident = syn::Ident::new(f.clone());
quote!(
if !::validator::validate_must_match(&self.#field_ident, &self.#other_ident) {
errors.add(#name, "no_match");
}
)
},
&Validator::Custom(ref f) => {
let fn_ident = syn::Ident::new(f.clone());
// wrap in if-let if we have an option
if field_name.starts_with("Option<") {
quote!(
if let Some(#optional_pattern_matched) = self.#field_ident {
match #fn_ident(#optional_validator_param) {
::std::option::Option::Some(s) => {
errors.add(#name, &s);
},
::std::option::Option::None => (),
};
}
)
} else {
quote!(
match #fn_ident(#validator_param) {
::std::option::Option::Some(s) => {
errors.add(#name, &s);
},
::std::option::Option::None => (),
};
)
}
},
&Validator::Contains(ref n) => {
// wrap in if-let if we have an option
if field_name.starts_with("Option<") {
quote!(
if let Some(#optional_pattern_matched) = self.#field_ident {
if !::validator::validate_contains(#optional_validator_param, &#n) {
errors.add(#name, "contains");
}
}
)
} else {
quote!(
if !::validator::validate_contains(#validator_param, &#n) {
errors.add(#name, "contains");
}
)
}
},
&Validator::Regex(ref re) => {
let re_ident = syn::Ident::new(re.clone());
// wrap in if-let if we have an option
if field_name.starts_with("Option<") {
quote!(
if let Some(#optional_pattern_matched) = self.#field_ident {
if !#re_ident.is_match(#optional_validator_param) {
errors.add(#name, "regex");
}
}
)
} else {
quote!(
if !#re_ident.is_match(#validator_param) {
errors.add(#name, "regex");
}
)
}
},
});
}
}
let struct_validation = find_struct_validation(&ast.attrs);
let struct_validation_tokens = match struct_validation {
Some(s) => {
let fn_ident = syn::Ident::new(s.function);
if s.skip_on_field_errors {
quote!(
if errors.is_empty() {
match #fn_ident(self) {
::std::option::Option::Some((key, val)) => {
errors.add(&key, &val);
},
::std::option::Option::None => (),
}
}
)
} else {
quote!(
match #fn_ident(self) {
::std::option::Option::Some((key, val)) => {
errors.add(&key, &val);
},
::std::option::Option::None => (),
}
)
}
},
None => quote!()
};
let ident = &ast.ident;
// Helper is provided for handling complex generic types correctly and effortlessly
let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl();
let impl_ast = quote!(
impl #impl_generics Validate for #ident #ty_generics #where_clause {
fn validate(&self) -> ::std::result::Result<(), ::validator::Errors> {
let mut errors = ::validator::Errors::new();
#(#validations)*
#struct_validation_tokens
if errors.is_empty() {
::std::result::Result::Ok(())
} else {
::std::result::Result::Err(errors)
}
}
}
);
// println!("{}", impl_ast.to_string());
impl_ast
}
/// Find if a struct has some schema validation and returns the info if so
fn find_struct_validation(struct_attrs: &Vec<syn::Attribute>) -> Option<SchemaValidation> {
let error = |msg: &str| -> ! {
panic!("Invalid schema level validation: {}", msg);
};
for attr in struct_attrs {
if attr.value.name() != "validate" {
continue;
}
match attr.value {
syn::MetaItem::List(_, ref meta_items) => {
match meta_items[0] {
syn::NestedMetaItem::MetaItem(ref item) => match item {
&syn::MetaItem::List(ref ident2, ref args) => {
if ident2 != "schema" {
error("Only `schema` is allowed as validator on a struct")
}
let mut function = "".to_string();
let mut skip_on_field_errors = true;
for arg in args {
match *arg {
syn::NestedMetaItem::MetaItem(ref item) => match *item {
syn::MetaItem::NameValue(ref name, ref val) => {
match name.to_string().as_ref() {
"function" => {
function = match lit_to_string(val) {
Some(s) => s,
None => error("invalid argument type for `function` \
: only a string is allowed"),
};
},
"skip_on_field_errors" => {
skip_on_field_errors = match lit_to_bool(val) {
Some(s) => s,
None => error("invalid argument type for `skip_on_field_errors` \
: only a bool is allowed"),
};
},
_ => error("Unknown argument")
}
},
_ => error("Unexpected args")
},
_ => error("Unexpected args")
}
}
if function == "" {
error("`function` is required");
}
return Some(SchemaValidation {
function: function,
skip_on_field_errors: skip_on_field_errors
});
},
_ => error("Unexpected struct validator")
},
_ => error("Unexpected struct validator")
}
},
_ => error("Unexpected struct validator")
}
}
None
}
// Find all the types (as string) for each field of the struct
// Needed for the `must_match` filter
fn find_fields_type(fields: &Vec<syn::Field>) -> HashMap<String, String> {
let mut types = HashMap::new();
for field in fields {
let field_name = match field.ident {
Some(ref s) => s.to_string(),
None => unreachable!(),
};
let field_type = match field.ty {
syn::Ty::Path(_, ref p) => {
let mut tokens = quote::Tokens::new();
p.to_tokens(&mut tokens);
tokens.to_string().replace(' ', "")
},
syn::Ty::Rptr(ref l, ref p) => {
let mut tokens = quote::Tokens::new();
p.ty.to_tokens(&mut tokens);
let mut name = tokens.to_string().replace(' ', "");
if l.is_some() {
name.insert(0, '&')
}
name
},
_ => panic!("Type `{:?}` of field `{}` not supported", field.ty, field_name)
};
//println!("{:?}", field_type);
types.insert(field_name, field_type);
}
types
}
/// Find everything we need to know about a Field.
fn find_validators_for_field(field: &syn::Field, field_types: &HashMap<String, String>) -> (String, Vec<Validator>) {
let mut field_name = match field.ident {
Some(ref s) => s.to_string(),
None => unreachable!(),
};
let error = |msg: &str| -> ! {
panic!("Invalid attribute #[validate] on field `{}`: {}", field.ident.clone().unwrap().to_string(), msg);
};
let field_type = field_types.get(&field_name).unwrap();
let mut validators = vec![];
let mut has_validate = false;
let find_struct_validator = |name: String, meta_items: &Vec<syn::NestedMetaItem>| -> Validator {
match name.as_ref() {
"length" => {
let mut min = None;
let mut max = None;
let mut equal = None;
for meta_item in meta_items {
match *meta_item {
syn::NestedMetaItem::MetaItem(ref item) => match *item {
syn::MetaItem::NameValue(ref name, ref val) => {
match name.to_string().as_ref() {
"min" => {
min = match lit_to_int(val) {
Some(s) => Some(s),
None => error("invalid argument type for `min` of `length` validator: only integers are allowed"),
};
},
"max" => {
max = match lit_to_int(val) {
Some(s) => Some(s),
None => error("invalid argument type for `max` of `length` validator: only integers are allowed"),
};
},
"equal" => {
equal = match lit_to_int(val) {
Some(s) => Some(s),
None => error("invalid argument type for `equal` of `length` validator: only integers are allowed"),
};
},
_ => error(&format!(
"unknown argument `{}` for validator `length` (it only has `min`, `max`, `equal`)",
name.to_string()
))
}
},
_ => panic!("unexpected item {:?} while parsing `length` validator", item)
},
_=> unreachable!()
}
}
if equal.is_some() && (min.is_some() || max.is_some()) {
error("both `equal` and `min` or `max` have been set in `length` validator: probably a mistake");
}
Validator::Length { min: min, max: max, equal: equal }
},
"range" => {
let mut min = 0.0;
let mut max = 0.0;
for meta_item in meta_items {
match *meta_item {
syn::NestedMetaItem::MetaItem(ref item) => match *item {
syn::MetaItem::NameValue(ref name, ref val) => {
match name.to_string().as_ref() {
"min" => {
min = match lit_to_float(val) {
Some(s) => s,
None => error("invalid argument type for `min` of `range` validator: only integers are allowed")
};
},
"max" => {
max = match lit_to_float(val) {
Some(s) => s,
None => error("invalid argument type for `max` of `range` validator: only integers are allowed")
};
},
_ => error(&format!(
"unknown argument `{}` for validator `range` (it only has `min`, `max`)",
name.to_string()
))
}
},
_ => panic!("unexpected item {:?} while parsing `range` validator", item)
},
_=> unreachable!()
}
}
Validator::Range { min: min, max: max}
}
_ => panic!("unexpected list validator: {:?}", name)
}
};
for attr in &field.attrs {
if attr.name() != "validate" && attr.name() != "serde" {
continue;
}
if attr.name() == "validate" {
has_validate = true;
}
match attr.value {
syn::MetaItem::List(_, ref meta_items) => {
if attr.name() == "serde" {
match find_original_field_name(meta_items) {
Some(s) => { field_name = s },
None => ()
};
continue;
}
// only validation from there on
for meta_item in meta_items {
match *meta_item {
syn::NestedMetaItem::MetaItem(ref item) => match *item {
// email, url
syn::MetaItem::Word(ref name) => match name.to_string().as_ref() {
"email" => {
if field_type != "String"
&& field_type != "&str"
&& field_type != "Option<String>"
&& !(field_type.starts_with("Option<") && field_type.ends_with("str>")) {
panic!("`email` validator can only be used on String or &str");
}
validators.push(Validator::Email);
},
"url" => {
if field_type != "String"
&& field_type != "&str"
&& field_type != "Option<String>"
&& !(field_type.starts_with("Option<") && field_type.ends_with("str>")) {
panic!("`url` validator can only be used on String or &str");
}
validators.push(Validator::Url);
},
_ => panic!("Unexpected word validator: {}", name)
},
// custom, contains, must_match
syn::MetaItem::NameValue(ref name, ref val) => {
match name.to_string().as_ref() {
"custom" => {
match lit_to_string(val) {
Some(s) => validators.push(Validator::Custom(s)),
None => error("invalid argument for `custom` validator: only strings are allowed"),
};
},
"contains" => {
match lit_to_string(val) {
Some(s) => validators.push(Validator::Contains(s)),
None => error("invalid argument for `contains` validator: only strings are allowed"),
};
},
"regex" => {
match lit_to_string(val) {
Some(s) => validators.push(Validator::Regex(s)),
None => error("invalid argument for `regex` validator: only strings are allowed"),
};
}
"must_match" => {
match lit_to_string(val) {
Some(s) => {
if let Some(t2) = field_types.get(&s) {
if field_type == t2 {
validators.push(Validator::MustMatch(s));
} else {
error("invalid argument for `must_match` validator: types of field can't match");
}
} else {
error("invalid argument for `must_match` validator: field doesn't exist in struct");
}
},
None => error("invalid argument for `must_match` validator: only strings are allowed"),
};
},
_ => panic!("unexpected name value validator: {:?}", name),
};
},
// validators with args: length for example
syn::MetaItem::List(ref name, ref meta_items) => {
// Some sanity checking first
if name == "length" {
if field_type != "String"
&& !field_type.starts_with("Vec<")
&& !field_type.starts_with("Option<Vec<")
&& field_type != "Option<String>"
// a bit ugly
&& !(field_type.starts_with("Option<") && field_type.ends_with("str>"))
&& field_type != "&str" {
error(&format!(
"Validator `length` can only be used on types `String`, `&str` or `Vec` but found `{}`",
field_type
));
}
if meta_items.len() == 0 {
error("Validator `length` requires at least 1 argument out of `min`, `max` and `equal`");
}
}
if name == "range" {
if !RANGE_TYPES.contains(&field_type.as_ref()) {
error(&format!(
"Validator `range` can only be used on number types but found `{}`",
field_type
));
}
if meta_items.len() != 2 {
error("Validator `range` requires 2 arguments: `min` and `max`");
}
}
validators.push(find_struct_validator(name.to_string(), meta_items));
},
},
_ => unreachable!("Found a non MetaItem while looking for validators")
};
}
},
_ => unreachable!("Got something other than a list of attributes while checking field `{}`", field_name),
}
}
if has_validate && validators.is_empty() {
error("it needs at least one validator");
}
(field_name, validators)
}
/// Serde can be used to rename fields on deserialization but most of the times
/// we want the error on the original field.
///
/// For example a JS frontend might send camelCase fields and Rust converts them to snake_case
/// but we want to send the errors back to the frontend with the original name
fn find_original_field_name(meta_items: &Vec<syn::NestedMetaItem>) -> Option<String> {
let mut original_name = None;
for meta_item in meta_items {
match *meta_item {
syn::NestedMetaItem::MetaItem(ref item) => match *item {
syn::MetaItem::Word(_) => continue,
syn::MetaItem::NameValue(ref name, ref val) => {
if name == "rename" {
original_name = Some(lit_to_string(val).unwrap());
}
},
// length
syn::MetaItem::List(_, ref meta_items) => {
return find_original_field_name(meta_items);
}
},
_ => unreachable!()
};
if original_name.is_some() {
return original_name;
}
}
original_name
}
fn lit_to_string(lit: &syn::Lit) -> Option<String> {
match *lit {
syn::Lit::Str(ref s, _) => Some(s.to_string()),
_ => None,
}
}
fn lit_to_int(lit: &syn::Lit) -> Option<u64> {
match *lit {
syn::Lit::Int(ref s, _) => Some(*s),
// TODO: remove when attr_literals is stable
syn::Lit::Str(ref s, _) => Some(s.parse::<u64>().unwrap()),
_ => None,
}
}
fn lit_to_float(lit: &syn::Lit) -> Option<f64> {
match *lit {
syn::Lit::Float(ref s, _) => Some(s.parse::<f64>().unwrap()),
syn::Lit::Int(ref s, _) => Some(*s as f64),
// TODO: remove when attr_literals is stable
syn::Lit::Str(ref s, _) => Some(s.parse::<f64>().unwrap()),
_ => None,
}
}
fn lit_to_bool(lit: &syn::Lit) -> Option<bool> {
match *lit {
syn::Lit::Bool(ref s) => Some(*s),
// TODO: remove when attr_literals is stable
syn::Lit::Str(ref s, _) => if s == "true" { Some(true) } else { Some(false) },
_ => None,
}
}
fn option_u64_to_tokens(opt: Option<u64>) -> quote::Tokens {
let mut tokens = quote::Tokens::new();
tokens.append("::");
tokens.append("std");
tokens.append("::");
tokens.append("option");
tokens.append("::");
tokens.append("Option");
tokens.append("::");
match opt {
Some(ref t) => {
tokens.append("Some");
tokens.append("(");
t.to_tokens(&mut tokens);
tokens.append(")");
}
None => {
tokens.append("None");
}
}
tokens
}
|
use iron::{Chain, Iron};
use persistent::Read;
use router::router;
use serde_derive::{Deserialize, Serialize};
use serenity::model::id::{ChannelId, GuildId};
use serenity::prelude::*;
use std::collections::HashSet;
use std::fs::DirBuilder;
use std::sync::Arc;
use std::thread;
mod store;
use store::StatsStore;
mod scan;
mod api;
mod event_handler;
use event_handler::OneshotData;
mod error;
use error::ConfigError;
#[derive(Serialize, Deserialize)]
struct Config {
discord_token: String,
tracked_channels: Vec<String>,
}
impl Default for Config {
fn default() -> Self {
Config {
discord_token: String::new(),
tracked_channels: Vec::new(),
}
}
}
impl Config {
pub fn load() -> Result<Config, ConfigError> {
let config_path = Config::config_path().ok_or(ConfigError::NoHome)?;
Ok(if !config_path.exists() {
DirBuilder::new()
.recursive(true)
.create(config_path.parent().ok_or(ConfigError::NoParent)?)?;
let conf = Config::default();
std::fs::write(
&config_path,
toml::to_string(&conf).expect("configuration is serializable"),
)?;
conf
} else {
let config_str = std::fs::read_to_string(config_path)?;
toml::from_str(&config_str)?
})
}
pub fn save(&self) -> Result<(), ConfigError> {
let config_path = Config::config_path().ok_or(ConfigError::NoHome)?;
std::fs::write(
config_path,
toml::to_string(self).expect("configuration is serializable"),
)?;
Ok(())
}
pub fn tracked_channels(&self) -> Result<Vec<(Option<GuildId>, ChannelId)>, ConfigError> {
let mut out = Vec::new();
for channel in &self.tracked_channels {
let chan = if channel.contains('|') {
let mut split_item = channel.split('|');
let guild = split_item.next().ok_or(ConfigError::InvalidGuildFormat)?;
let channel = split_item.next().ok_or(ConfigError::InvalidChannelFormat)?;
(
Some(GuildId(
guild
.parse::<u64>()
.map_err(|_| ConfigError::InvalidChannelFormat)?,
)),
channel
.parse()
.map_err(|_| ConfigError::InvalidChannelFormat)?,
)
} else {
(
None,
channel
.parse()
.map_err(|_| ConfigError::InvalidChannelFormat)?,
)
};
out.push(chan);
}
Ok(out)
}
pub fn config_path() -> Option<std::path::PathBuf> {
Config::data_root().map(|h| h.join("config.toml"))
}
#[cfg(target_os = "macos")]
pub fn data_root() -> Option<std::path::PathBuf> {
dirs::home_dir().map(|h| h.join(".config/discord-statistics/"))
}
#[cfg(not(target_os = "macos"))]
pub fn data_root() -> Option<std::path::PathBuf> {
dirs::config_dir().map(|h| h.join("discord-statistics/"))
}
}
fn main() {
let mut config = match Config::load() {
Ok(config) => config,
Err(e) => {
eprintln!("Error loading configuration:\n{:?}", e);
std::process::exit(1)
}
};
let db_path = match Config::data_root() {
Some(data) => data.join("store.sqlite3"),
None => {
eprintln!("Unable to get users config dir");
std::process::exit(2)
}
};
use clap::{App, Arg, SubCommand};
let matches = App::new("Discord statistics")
.author("Noskcaj19")
.subcommand(
SubCommand::with_name("token")
.about("Store your Discord token")
.arg(
Arg::with_name("token")
.required(true)
.help("Discord user token"),
),
)
.subcommand(
SubCommand::with_name("track")
.about("Start tracking a channel")
.arg(
Arg::with_name("group-name")
.required(true)
.help("Guild name or private channel user"),
)
.arg(
Arg::with_name("channel-name")
.required(false)
.help("Channel name if guild is provided"),
),
)
.subcommand(
SubCommand::with_name("fetch-history")
.about("Add previously sent messages to the log")
.arg(
Arg::with_name("max-count")
.help("Maximum amount of messages to search")
.long("max-count")
.default_value("500")
.takes_value(true)
.short("c"),
),
)
.get_matches();
if let Some(store_token) = matches.subcommand_matches("store-token") {
let token = store_token
.value_of("token")
.expect("token is a required field");
config.discord_token = token.to_owned();
if let Err(e) = config.save() {
eprintln!("An error occured saving the configuration file:\n{:?}", e);
} else {
println!("Successfully saved discord token");
}
return;
}
let token = std::env::var("DISCORD_TOKEN").unwrap_or(config.discord_token.clone());
if token.is_empty() || serenity::client::validate_token(&token).is_err() {
eprintln!("Empty or invalid token, please set it by running `discord-statistics token $DISCORD_TOKEN`\nexiting");
return;
}
if let Some(track) = matches.subcommand_matches("track") {
// Name of the private channel user or guild
let group_name = track
.value_of("group-name")
.expect("group-name is a required field");
let data = get_oneshot_data(&token);
let id_str = if let Some(channel_name) = track.value_of("channel-name") {
resolve_guild_channel_names(&data, group_name, channel_name)
.map(|(gid, cid)| format!("{}|{}", gid.0, cid.0))
} else {
resolve_private_channel(&data, group_name).map(|id| id.0.to_string())
};
match id_str {
Some(id_str) => {
config.tracked_channels.push(id_str);
println!("Added channel to tracking list");
if let Err(e) = config.save() {
eprintln!("An error occured saving the configuration file:\n{:?}", e);
}
}
None => eprintln!("Unable to find a matching channel"),
}
return;
}
let stats = match StatsStore::new(&db_path) {
Ok(conn) => Arc::new(conn),
Err(_) => {
eprintln!("Unable to construct tables. aborting");
std::process::exit(0);
}
};
if let Some(fetch) = matches.subcommand_matches("fetch-history") {
let max_count: u64 = match fetch.value_of("max-count").unwrap_or("500").parse() {
Ok(c) => c,
Err(_) => {
eprintln!("max-count must be an integer");
return;
}
};
let data = get_oneshot_data(&token);
let mut channels_to_scan = HashSet::new();
let tracked_channels = match config.tracked_channels() {
Ok(channels) => channels,
Err(e) => {
eprintln!("Error loading channels:\n{:?}", e);
std::process::exit(2);
}
};
for (guild_id, channel_id) in tracked_channels {
channels_to_scan.insert(store::Channel {
guild_id,
channel_id,
});
}
if let Ok(logged_channels) = stats.get_channels() {
channels_to_scan.extend(logged_channels)
}
println!("Scanning:");
scan::MessageScanner { data, store: stats }.scan_messages(&channels_to_scan, max_count);
return;
}
// start web server
let http_stats = stats.clone();
thread::spawn(|| {
println!("Starting webserver");
let router = router! {
api_total_msg_count_per_day: get "/api/total_msg_count_per_day" => api::total_msg_count_per_day,
api_user_msg_count_per_day: get "/api/user_msg_count_per_day" => api::msg_count_per_day,
api_total_msg_count: get "/api/total_msg_count" => api::total_msg_count,
api_edit_count: get "/api/edit_count" => api::edit_count,
api_channels: get "/api/channels" => api::get_channels,
api_msg_count: get "/api/msg_count" => api::msg_count,
dashboard_js: get "/index.js" => api::dashboard_js,
api_guilds: get "/api/guilds" => api::get_guilds,
dashboard_g: get "/*" => api::dashboard,
dashboard: get "/" => api::dashboard,
};
let mut chain = Chain::new(router);
chain.link(Read::<api::Stats>::both(http_stats));
let server = Iron::new(chain).http("localhost:8080");
if let Err(e) = server {
eprintln!("Unable to create http servere on port 8080: {:?}", e)
}
});
let tracked_channels = match config.tracked_channels() {
Ok(channels) => channels,
Err(e) => {
eprintln!("Config contains invalid tracked channels:\n{:?}", e);
std::process::exit(2)
}
};
// start discord client
let handler = event_handler::Handler::new(stats.clone(), tracked_channels);
let mut client = match Client::new(&token, handler) {
Ok(client) => client,
Err(e) => {
eprintln!("Error starting discord client: {:#?}", e);
std::process::exit(3)
}
};
if let Err(why) = client.start() {
eprintln!("Unable to connect to discord: {:?}", why);
}
}
fn resolve_guild_channel_names(
data: &OneshotData,
guild_name: &str,
channel_name: &str,
) -> Option<(GuildId, ChannelId)> {
for guild in &data.ready.guilds {
use serenity::model::guild::GuildStatus::*;
let (guild_id, name) = match guild {
OnlinePartialGuild(g) => (g.id, g.name.clone()),
OnlineGuild(g) => (g.id, g.name.clone()),
Offline(g) => (
g.id,
g.id.to_partial_guild(&data.context.http)
.expect("Unable to fetch guild data")
.name
.clone(),
),
_ => panic!("Unknown guild state"),
};
if guild_name.to_lowercase() == name.to_lowercase() {
let channels = guild_id
.channels(&data.context.http)
.expect("Unable to fetch guild channels");
for (&channel_id, channel) in channels.iter() {
use serenity::model::channel::ChannelType::*;
match channel.kind {
Text | Private | Group | News => {}
_ => continue,
}
if channel_name.to_lowercase() == channel.name.to_lowercase() {
return Some((guild_id, channel_id));
}
}
}
}
None
}
fn resolve_private_channel(data: &OneshotData, user_name: &str) -> Option<ChannelId> {
for (&id, channel) in data.ready.private_channels.iter() {
if user_name.to_lowercase() == format!("{}", channel).to_lowercase() {
return Some(id);
}
}
None
}
fn get_oneshot_data(token: &str) -> OneshotData {
let (rx, handler) = event_handler::OneshotHandler::new();
let mut client = match Client::new(&token, handler) {
Ok(client) => client,
Err(e) => {
eprintln!("Error starting discord client: {:#?}", e);
std::process::exit(3);
}
};
thread::spawn(move || {
if let Err(e) = client.start() {
eprintln!("Unable to connect to discord: {:#?}", e);
}
});
rx.recv().expect("event handler should not panic")
}
|
#[cfg(feature = "Win32_System_Com")]
::windows_sys::core::link ! ( "xmllite.dll""system" #[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`, `\"Win32_System_Com\"`*"] fn CreateXmlReader ( riid : *const :: windows_sys::core::GUID , ppvobject : *mut *mut ::core::ffi::c_void , pmalloc : super::super::super::System::Com:: IMalloc ) -> :: windows_sys::core::HRESULT );
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Com"))]
::windows_sys::core::link ! ( "xmllite.dll""system" #[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`, `\"Win32_Foundation\"`, `\"Win32_System_Com\"`*"] fn CreateXmlReaderInputWithEncodingCodePage ( pinputstream : :: windows_sys::core::IUnknown , pmalloc : super::super::super::System::Com:: IMalloc , nencodingcodepage : u32 , fencodinghint : super::super::super::Foundation:: BOOL , pwszbaseuri : :: windows_sys::core::PCWSTR , ppinput : *mut :: windows_sys::core::IUnknown ) -> :: windows_sys::core::HRESULT );
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Com"))]
::windows_sys::core::link ! ( "xmllite.dll""system" #[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`, `\"Win32_Foundation\"`, `\"Win32_System_Com\"`*"] fn CreateXmlReaderInputWithEncodingName ( pinputstream : :: windows_sys::core::IUnknown , pmalloc : super::super::super::System::Com:: IMalloc , pwszencodingname : :: windows_sys::core::PCWSTR , fencodinghint : super::super::super::Foundation:: BOOL , pwszbaseuri : :: windows_sys::core::PCWSTR , ppinput : *mut :: windows_sys::core::IUnknown ) -> :: windows_sys::core::HRESULT );
#[cfg(feature = "Win32_System_Com")]
::windows_sys::core::link ! ( "xmllite.dll""system" #[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`, `\"Win32_System_Com\"`*"] fn CreateXmlWriter ( riid : *const :: windows_sys::core::GUID , ppvobject : *mut *mut ::core::ffi::c_void , pmalloc : super::super::super::System::Com:: IMalloc ) -> :: windows_sys::core::HRESULT );
#[cfg(feature = "Win32_System_Com")]
::windows_sys::core::link ! ( "xmllite.dll""system" #[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`, `\"Win32_System_Com\"`*"] fn CreateXmlWriterOutputWithEncodingCodePage ( poutputstream : :: windows_sys::core::IUnknown , pmalloc : super::super::super::System::Com:: IMalloc , nencodingcodepage : u32 , ppoutput : *mut :: windows_sys::core::IUnknown ) -> :: windows_sys::core::HRESULT );
#[cfg(feature = "Win32_System_Com")]
::windows_sys::core::link ! ( "xmllite.dll""system" #[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`, `\"Win32_System_Com\"`*"] fn CreateXmlWriterOutputWithEncodingName ( poutputstream : :: windows_sys::core::IUnknown , pmalloc : super::super::super::System::Com:: IMalloc , pwszencodingname : :: windows_sys::core::PCWSTR , ppoutput : *mut :: windows_sys::core::IUnknown ) -> :: windows_sys::core::HRESULT );
pub type IXmlReader = *mut ::core::ffi::c_void;
pub type IXmlResolver = *mut ::core::ffi::c_void;
pub type IXmlWriter = *mut ::core::ffi::c_void;
pub type IXmlWriterLite = *mut ::core::ffi::c_void;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const _IID_IXmlReader: ::windows_sys::core::GUID = ::windows_sys::core::GUID::from_u128(0x7279fc81_709d_4095_b63d_69fe4b0d9030);
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const _IID_IXmlResolver: ::windows_sys::core::GUID = ::windows_sys::core::GUID::from_u128(0x7279fc82_709d_4095_b63d_69fe4b0d9030);
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const _IID_IXmlWriter: ::windows_sys::core::GUID = ::windows_sys::core::GUID::from_u128(0x7279fc88_709d_4095_b63d_69fe4b0d9030);
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub type DtdProcessing = i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const DtdProcessing_Prohibit: DtdProcessing = 0i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const DtdProcessing_Parse: DtdProcessing = 1i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const _DtdProcessing_Last: DtdProcessing = 1i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub type XmlConformanceLevel = i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlConformanceLevel_Auto: XmlConformanceLevel = 0i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlConformanceLevel_Fragment: XmlConformanceLevel = 1i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlConformanceLevel_Document: XmlConformanceLevel = 2i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const _XmlConformanceLevel_Last: XmlConformanceLevel = 2i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub type XmlError = i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const MX_E_MX: XmlError = -1072894464i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const MX_E_INPUTEND: XmlError = -1072894463i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const MX_E_ENCODING: XmlError = -1072894462i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const MX_E_ENCODINGSWITCH: XmlError = -1072894461i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const MX_E_ENCODINGSIGNATURE: XmlError = -1072894460i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_WC: XmlError = -1072894432i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_WHITESPACE: XmlError = -1072894431i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_SEMICOLON: XmlError = -1072894430i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_GREATERTHAN: XmlError = -1072894429i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_QUOTE: XmlError = -1072894428i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_EQUAL: XmlError = -1072894427i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_LESSTHAN: XmlError = -1072894426i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_HEXDIGIT: XmlError = -1072894425i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_DIGIT: XmlError = -1072894424i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_LEFTBRACKET: XmlError = -1072894423i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_LEFTPAREN: XmlError = -1072894422i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_XMLCHARACTER: XmlError = -1072894421i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_NAMECHARACTER: XmlError = -1072894420i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_SYNTAX: XmlError = -1072894419i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_CDSECT: XmlError = -1072894418i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_COMMENT: XmlError = -1072894417i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_CONDSECT: XmlError = -1072894416i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_DECLATTLIST: XmlError = -1072894415i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_DECLDOCTYPE: XmlError = -1072894414i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_DECLELEMENT: XmlError = -1072894413i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_DECLENTITY: XmlError = -1072894412i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_DECLNOTATION: XmlError = -1072894411i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_NDATA: XmlError = -1072894410i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_PUBLIC: XmlError = -1072894409i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_SYSTEM: XmlError = -1072894408i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_NAME: XmlError = -1072894407i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_ROOTELEMENT: XmlError = -1072894406i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_ELEMENTMATCH: XmlError = -1072894405i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_UNIQUEATTRIBUTE: XmlError = -1072894404i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_TEXTXMLDECL: XmlError = -1072894403i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_LEADINGXML: XmlError = -1072894402i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_TEXTDECL: XmlError = -1072894401i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_XMLDECL: XmlError = -1072894400i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_ENCNAME: XmlError = -1072894399i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_PUBLICID: XmlError = -1072894398i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_PESINTERNALSUBSET: XmlError = -1072894397i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_PESBETWEENDECLS: XmlError = -1072894396i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_NORECURSION: XmlError = -1072894395i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_ENTITYCONTENT: XmlError = -1072894394i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_UNDECLAREDENTITY: XmlError = -1072894393i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_PARSEDENTITY: XmlError = -1072894392i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_NOEXTERNALENTITYREF: XmlError = -1072894391i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_PI: XmlError = -1072894390i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_SYSTEMID: XmlError = -1072894389i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_QUESTIONMARK: XmlError = -1072894388i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_CDSECTEND: XmlError = -1072894387i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_MOREDATA: XmlError = -1072894386i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_DTDPROHIBITED: XmlError = -1072894385i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WC_E_INVALIDXMLSPACE: XmlError = -1072894384i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_NC: XmlError = -1072894368i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_QNAMECHARACTER: XmlError = -1072894367i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_QNAMECOLON: XmlError = -1072894366i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_NAMECOLON: XmlError = -1072894365i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_DECLAREDPREFIX: XmlError = -1072894364i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_UNDECLAREDPREFIX: XmlError = -1072894363i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_EMPTYURI: XmlError = -1072894362i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_XMLPREFIXRESERVED: XmlError = -1072894361i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_XMLNSPREFIXRESERVED: XmlError = -1072894360i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_XMLURIRESERVED: XmlError = -1072894359i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const NC_E_XMLNSURIRESERVED: XmlError = -1072894358i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const SC_E_SC: XmlError = -1072894336i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const SC_E_MAXELEMENTDEPTH: XmlError = -1072894335i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const SC_E_MAXENTITYEXPANSION: XmlError = -1072894334i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_WR: XmlError = -1072894208i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_NONWHITESPACE: XmlError = -1072894207i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_NSPREFIXDECLARED: XmlError = -1072894206i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_NSPREFIXWITHEMPTYNSURI: XmlError = -1072894205i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_DUPLICATEATTRIBUTE: XmlError = -1072894204i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_XMLNSPREFIXDECLARATION: XmlError = -1072894203i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_XMLPREFIXDECLARATION: XmlError = -1072894202i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_XMLURIDECLARATION: XmlError = -1072894201i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_XMLNSURIDECLARATION: XmlError = -1072894200i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_NAMESPACEUNDECLARED: XmlError = -1072894199i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_INVALIDXMLSPACE: XmlError = -1072894198i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_INVALIDACTION: XmlError = -1072894197i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const WR_E_INVALIDSURROGATEPAIR: XmlError = -1072894196i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XML_E_INVALID_DECIMAL: XmlError = -1072898019i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XML_E_INVALID_HEXIDECIMAL: XmlError = -1072898018i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XML_E_INVALID_UNICODE: XmlError = -1072898017i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XML_E_INVALIDENCODING: XmlError = -1072897938i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub type XmlNodeType = i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_None: XmlNodeType = 0i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_Element: XmlNodeType = 1i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_Attribute: XmlNodeType = 2i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_Text: XmlNodeType = 3i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_CDATA: XmlNodeType = 4i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_ProcessingInstruction: XmlNodeType = 7i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_Comment: XmlNodeType = 8i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_DocumentType: XmlNodeType = 10i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_Whitespace: XmlNodeType = 13i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_EndElement: XmlNodeType = 15i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlNodeType_XmlDeclaration: XmlNodeType = 17i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const _XmlNodeType_Last: XmlNodeType = 17i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub type XmlReadState = i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReadState_Initial: XmlReadState = 0i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReadState_Interactive: XmlReadState = 1i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReadState_Error: XmlReadState = 2i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReadState_EndOfFile: XmlReadState = 3i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReadState_Closed: XmlReadState = 4i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub type XmlReaderProperty = i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReaderProperty_MultiLanguage: XmlReaderProperty = 0i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReaderProperty_ConformanceLevel: XmlReaderProperty = 1i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReaderProperty_RandomAccess: XmlReaderProperty = 2i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReaderProperty_XmlResolver: XmlReaderProperty = 3i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReaderProperty_DtdProcessing: XmlReaderProperty = 4i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReaderProperty_ReadState: XmlReaderProperty = 5i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReaderProperty_MaxElementDepth: XmlReaderProperty = 6i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlReaderProperty_MaxEntityExpansion: XmlReaderProperty = 7i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const _XmlReaderProperty_Last: XmlReaderProperty = 7i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub type XmlStandalone = i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlStandalone_Omit: XmlStandalone = 0i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlStandalone_Yes: XmlStandalone = 1i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlStandalone_No: XmlStandalone = 2i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const _XmlStandalone_Last: XmlStandalone = 2i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub type XmlWriterProperty = i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlWriterProperty_MultiLanguage: XmlWriterProperty = 0i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlWriterProperty_Indent: XmlWriterProperty = 1i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlWriterProperty_ByteOrderMark: XmlWriterProperty = 2i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlWriterProperty_OmitXmlDeclaration: XmlWriterProperty = 3i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlWriterProperty_ConformanceLevel: XmlWriterProperty = 4i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const XmlWriterProperty_CompactEmptyElement: XmlWriterProperty = 5i32;
#[doc = "*Required features: `\"Win32_Data_Xml_XmlLite\"`*"]
pub const _XmlWriterProperty_Last: XmlWriterProperty = 5i32;
|
extern crate log;
use log::*;
pub struct SimpleLogger;
impl log::Log for SimpleLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
//metadata.level() <= LogLevel::Trace
metadata.level() <= LogLevel::Info
}
fn log(&self, record: &LogRecord) {
use chrono::Local;
// println!("{}: {}",
// Local::now().format("%Y-%m-%d %H:%M:%S").to_string(),
if self.enabled(record.metadata()) {
println!("{}: {} - {}",
Local::now().format("%Y-%m-%d %H:%M:%S").to_string(),
record.level(),
record.args());
}
}
}
|
#[doc = "Reader of register OPAMP1_CSR"]
pub type R = crate::R<u32, super::OPAMP1_CSR>;
#[doc = "Writer for register OPAMP1_CSR"]
pub type W = crate::W<u32, super::OPAMP1_CSR>;
#[doc = "Register OPAMP1_CSR `reset()`'s with value 0"]
impl crate::ResetValue for super::OPAMP1_CSR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `OPAEN`"]
pub type OPAEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OPAEN`"]
pub struct OPAEN_W<'a> {
w: &'a mut W,
}
impl<'a> OPAEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `FORCE_VP`"]
pub type FORCE_VP_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FORCE_VP`"]
pub struct FORCE_VP_W<'a> {
w: &'a mut W,
}
impl<'a> FORCE_VP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `VP_SEL`"]
pub type VP_SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `VP_SEL`"]
pub struct VP_SEL_W<'a> {
w: &'a mut W,
}
impl<'a> VP_SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 2)) | (((value as u32) & 0x03) << 2);
self.w
}
}
#[doc = "Reader of field `VM_SEL`"]
pub type VM_SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `VM_SEL`"]
pub struct VM_SEL_W<'a> {
w: &'a mut W,
}
impl<'a> VM_SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 5)) | (((value as u32) & 0x03) << 5);
self.w
}
}
#[doc = "Reader of field `OPAHSM`"]
pub type OPAHSM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `OPAHSM`"]
pub struct OPAHSM_W<'a> {
w: &'a mut W,
}
impl<'a> OPAHSM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `CALON`"]
pub type CALON_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CALON`"]
pub struct CALON_W<'a> {
w: &'a mut W,
}
impl<'a> CALON_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Reader of field `CALSEL`"]
pub type CALSEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CALSEL`"]
pub struct CALSEL_W<'a> {
w: &'a mut W,
}
impl<'a> CALSEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 12)) | (((value as u32) & 0x03) << 12);
self.w
}
}
#[doc = "Reader of field `PGA_GAIN`"]
pub type PGA_GAIN_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PGA_GAIN`"]
pub struct PGA_GAIN_W<'a> {
w: &'a mut W,
}
impl<'a> PGA_GAIN_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 14)) | (((value as u32) & 0x0f) << 14);
self.w
}
}
#[doc = "Reader of field `USERTRIM`"]
pub type USERTRIM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `USERTRIM`"]
pub struct USERTRIM_W<'a> {
w: &'a mut W,
}
impl<'a> USERTRIM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Reader of field `TSTREF`"]
pub type TSTREF_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TSTREF`"]
pub struct TSTREF_W<'a> {
w: &'a mut W,
}
impl<'a> TSTREF_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29);
self.w
}
}
#[doc = "Reader of field `CALOUT`"]
pub type CALOUT_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CALOUT`"]
pub struct CALOUT_W<'a> {
w: &'a mut W,
}
impl<'a> CALOUT_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30);
self.w
}
}
impl R {
#[doc = "Bit 0 - Operational amplifier Enable"]
#[inline(always)]
pub fn opaen(&self) -> OPAEN_R {
OPAEN_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Force internal reference on VP (reserved for test"]
#[inline(always)]
pub fn force_vp(&self) -> FORCE_VP_R {
FORCE_VP_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bits 2:3 - Operational amplifier PGA mode"]
#[inline(always)]
pub fn vp_sel(&self) -> VP_SEL_R {
VP_SEL_R::new(((self.bits >> 2) & 0x03) as u8)
}
#[doc = "Bits 5:6 - Inverting input selection"]
#[inline(always)]
pub fn vm_sel(&self) -> VM_SEL_R {
VM_SEL_R::new(((self.bits >> 5) & 0x03) as u8)
}
#[doc = "Bit 8 - Operational amplifier high-speed mode"]
#[inline(always)]
pub fn opahsm(&self) -> OPAHSM_R {
OPAHSM_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 11 - Calibration mode enabled"]
#[inline(always)]
pub fn calon(&self) -> CALON_R {
CALON_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bits 12:13 - Calibration selection"]
#[inline(always)]
pub fn calsel(&self) -> CALSEL_R {
CALSEL_R::new(((self.bits >> 12) & 0x03) as u8)
}
#[doc = "Bits 14:17 - allows to switch from AOP offset trimmed values to AOP offset"]
#[inline(always)]
pub fn pga_gain(&self) -> PGA_GAIN_R {
PGA_GAIN_R::new(((self.bits >> 14) & 0x0f) as u8)
}
#[doc = "Bit 18 - User trimming enable"]
#[inline(always)]
pub fn usertrim(&self) -> USERTRIM_R {
USERTRIM_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 29 - OPAMP calibration reference voltage output control (reserved for test)"]
#[inline(always)]
pub fn tstref(&self) -> TSTREF_R {
TSTREF_R::new(((self.bits >> 29) & 0x01) != 0)
}
#[doc = "Bit 30 - Operational amplifier calibration output"]
#[inline(always)]
pub fn calout(&self) -> CALOUT_R {
CALOUT_R::new(((self.bits >> 30) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Operational amplifier Enable"]
#[inline(always)]
pub fn opaen(&mut self) -> OPAEN_W {
OPAEN_W { w: self }
}
#[doc = "Bit 1 - Force internal reference on VP (reserved for test"]
#[inline(always)]
pub fn force_vp(&mut self) -> FORCE_VP_W {
FORCE_VP_W { w: self }
}
#[doc = "Bits 2:3 - Operational amplifier PGA mode"]
#[inline(always)]
pub fn vp_sel(&mut self) -> VP_SEL_W {
VP_SEL_W { w: self }
}
#[doc = "Bits 5:6 - Inverting input selection"]
#[inline(always)]
pub fn vm_sel(&mut self) -> VM_SEL_W {
VM_SEL_W { w: self }
}
#[doc = "Bit 8 - Operational amplifier high-speed mode"]
#[inline(always)]
pub fn opahsm(&mut self) -> OPAHSM_W {
OPAHSM_W { w: self }
}
#[doc = "Bit 11 - Calibration mode enabled"]
#[inline(always)]
pub fn calon(&mut self) -> CALON_W {
CALON_W { w: self }
}
#[doc = "Bits 12:13 - Calibration selection"]
#[inline(always)]
pub fn calsel(&mut self) -> CALSEL_W {
CALSEL_W { w: self }
}
#[doc = "Bits 14:17 - allows to switch from AOP offset trimmed values to AOP offset"]
#[inline(always)]
pub fn pga_gain(&mut self) -> PGA_GAIN_W {
PGA_GAIN_W { w: self }
}
#[doc = "Bit 18 - User trimming enable"]
#[inline(always)]
pub fn usertrim(&mut self) -> USERTRIM_W {
USERTRIM_W { w: self }
}
#[doc = "Bit 29 - OPAMP calibration reference voltage output control (reserved for test)"]
#[inline(always)]
pub fn tstref(&mut self) -> TSTREF_W {
TSTREF_W { w: self }
}
#[doc = "Bit 30 - Operational amplifier calibration output"]
#[inline(always)]
pub fn calout(&mut self) -> CALOUT_W {
CALOUT_W { w: self }
}
}
|
use std::borrow::Cow;
use tabwriter::TabWriter;
use CliResult;
use config::{Config, Delimiter};
use util;
static USAGE: &'static str = "
Outputs CSV data as a table with columns in alignment.
This will not work well if the CSV data contains large fields.
Note that formatting a table requires buffering all CSV data into memory.
Therefore, you should use the 'sample' or 'slice' command to trim down large
CSV data before formatting it with this command.
Usage:
xsv table [options] [<input>]
table options:
-w, --width <arg> The minimum width of each column.
[default: 2]
-p, --pad <arg> The minimum number of spaces between each column.
[default: 2]
-c, --condense <arg> Limits the length of each field to the value
specified. If the field is UTF-8 encoded, then
<arg> refers to the number of code points.
Otherwise, it refers to the number of bytes.
Common options:
-h, --help Display this message
-o, --output <file> Write output to <file> instead of stdout.
-d, --delimiter <arg> The field delimiter for reading CSV data.
Must be a single character. (default: ,)
";
#[derive(RustcDecodable)]
struct Args {
arg_input: Option<String>,
flag_width: usize,
flag_pad: usize,
flag_output: Option<String>,
flag_delimiter: Option<Delimiter>,
flag_condense: Option<usize>,
}
pub fn run(argv: &[&str]) -> CliResult<()> {
let args: Args = try!(util::get_args(USAGE, argv));
let rconfig = Config::new(&args.arg_input)
.delimiter(args.flag_delimiter)
.no_headers(true);
let wconfig = Config::new(&args.flag_output)
.delimiter(Some(Delimiter(b'\t')));
let tw = TabWriter::new(try!(wconfig.io_writer()))
.minwidth(args.flag_width)
.padding(args.flag_pad);
let mut wtr = wconfig.from_writer(tw);
let mut rdr = try!(rconfig.reader());
for r in rdr.byte_records() {
let r = try!(r);
let row = r.iter().map(|f| util::condense(Cow::Borrowed(&**f),
args.flag_condense));
try!(wtr.write(row));
}
try!(wtr.flush());
Ok(())
}
|
use log::debug;
use mkit::{
cbor::{FromCbor, IntoCbor},
thread,
};
use std::{
borrow::BorrowMut,
mem,
sync::{
atomic::{AtomicU64, Ordering::SeqCst},
Arc, RwLock,
},
};
use crate::{entry, journal::Journal, state, wral, wral::Config, Error, Result};
#[derive(Debug)]
pub enum Req {
AddEntry { op: Vec<u8> },
}
#[derive(Debug)]
pub enum Res {
Seqno(u64),
}
pub struct Writer<S> {
config: Config,
seqno: Arc<AtomicU64>,
pub journals: Vec<Journal<S>>,
pub journal: Journal<S>,
}
type SpawnWriter<S> = (
Arc<RwLock<Writer<S>>>,
thread::Thread<Req, Res, Result<u64>>,
thread::Tx<Req, Res>,
);
impl<S> Writer<S> {
pub(crate) fn start(
config: Config,
journals: Vec<Journal<S>>,
journal: Journal<S>,
seqno: u64,
) -> SpawnWriter<S>
where
S: state::State,
{
let seqno = Arc::new(AtomicU64::new(seqno));
let w = Arc::new(RwLock::new(Writer {
config: config.clone(),
seqno: Arc::clone(&seqno),
journals,
journal,
}));
let name = format!("wral-writer-{}", config.name);
let thread_w = Arc::clone(&w);
let (t, tx) = thread::Thread::new_sync(
&name,
wral::SYNC_BUFFER,
move |rx: thread::Rx<Req, Res>| MainLoop {
config,
seqno,
w: thread_w,
rx,
},
);
(w, t, tx)
}
pub fn close(&self) -> Result<u64> {
let n_batches: usize = self.journals.iter().map(|j| j.len_batches()).sum();
let (n, m) = match self.journal.len_batches() {
0 => (self.journals.len(), n_batches),
n => (self.journals.len() + 1, n_batches + n),
};
let seqno = self.seqno.load(SeqCst);
debug!(
target: "wral",
"{:?}/{} closed at seqno {}, with {} journals and {} batches",
self.config.dir, self.config.name, seqno, m, n
);
Ok(self.seqno.load(SeqCst).saturating_sub(1))
}
pub fn purge(mut self) -> Result<u64> {
self.close()?;
for j in self.journals.drain(..) {
j.purge()?
}
self.journal.purge()?;
Ok(self.seqno.load(SeqCst).saturating_sub(1))
}
}
struct MainLoop<S> {
config: Config,
seqno: Arc<AtomicU64>,
w: Arc<RwLock<Writer<S>>>,
rx: thread::Rx<Req, Res>,
}
impl<S> FnOnce<()> for MainLoop<S>
where
S: Clone + IntoCbor + FromCbor + state::State,
{
type Output = Result<u64>;
extern "rust-call" fn call_once(self, _args: ()) -> Self::Output {
use std::sync::mpsc::TryRecvError;
// block for the first request.
'a: while let Ok(req) = self.rx.recv() {
// then get as many outstanding requests as possible from
// the channel.
let mut reqs = vec![req];
loop {
match self.rx.try_recv() {
Ok(req) => reqs.push(req),
Err(TryRecvError::Empty) => break,
Err(TryRecvError::Disconnected) => break 'a,
}
}
// and then start processing it in batch.
let mut w = err_at!(Fatal, self.w.write())?;
let mut items = vec![];
for req in reqs.into_iter() {
match req {
(Req::AddEntry { op }, tx) => {
let seqno = self.seqno.fetch_add(1, SeqCst);
w.journal.add_entry(entry::Entry::new(seqno, op))?;
items.push((seqno, tx))
}
}
}
w.journal.flush()?;
for (seqno, tx) in items.into_iter() {
if let Some(tx) = tx {
err_at!(IPCFail, tx.send(Res::Seqno(seqno)))?;
}
}
if w.journal.file_size()? > self.config.journal_limit {
Self::rotate(w.borrow_mut())?;
}
}
Ok(self.seqno.load(SeqCst).saturating_sub(1))
}
}
impl<S> MainLoop<S>
where
S: Clone,
{
fn rotate(w: &mut Writer<S>) -> Result<()> {
// new journal
let journal = {
let num = w.journal.to_journal_number().saturating_add(1);
let state = w.journal.to_state();
Journal::start(&w.config.name, &w.config.dir, num, state)?
};
// replace with current journal
let journal = mem::replace(&mut w.journal, journal);
let (journal, entries, _) = journal.into_archive();
if !entries.is_empty() {
err_at!(Fatal, msg: "unflushed entries {}", entries.len())?
}
w.journals.push(journal);
Ok(())
}
}
|
#[cfg(not(target_arch = "wasm32"))]
use crate::MockedBlockchain;
/// A low-level interface of either real or mocked blockchain that contract interacts with.
pub trait BlockchainInterface {
// #############
// # Registers #
// #############
unsafe fn read_register(&self, register_id: u64, ptr: u64);
unsafe fn register_len(&self, register_id: u64) -> u64;
// ###############
// # Context API #
// ###############
unsafe fn current_account_id(&self, register_id: u64);
unsafe fn signer_account_id(&self, register_id: u64);
unsafe fn signer_account_pk(&self, register_id: u64);
unsafe fn predecessor_account_id(&self, register_id: u64);
unsafe fn input(&self, register_id: u64);
unsafe fn block_index(&self) -> u64;
unsafe fn block_timestamp(&self) -> u64;
unsafe fn storage_usage(&self) -> u64;
// #################
// # Economics API #
// #################
unsafe fn account_balance(&self, balance_ptr: u64);
unsafe fn attached_deposit(&self, balance_ptr: u64);
unsafe fn prepaid_gas(&self) -> u64;
unsafe fn used_gas(&self) -> u64;
// ############
// # Math API #
// ############
unsafe fn random_seed(&self, register_id: u64);
unsafe fn sha256(&self, value_len: u64, value_ptr: u64, register_id: u64);
// #####################
// # Miscellaneous API #
// #####################
unsafe fn value_return(&self, value_len: u64, value_ptr: u64);
unsafe fn panic(&self);
unsafe fn panic_utf8(&self, len: u64, ptr: u64);
unsafe fn log_utf8(&self, len: u64, ptr: u64);
unsafe fn log_utf16(&self, len: u64, ptr: u64);
// ################
// # Promises API #
// ################
unsafe fn promise_create(
&self,
account_id_len: u64,
account_id_ptr: u64,
method_name_len: u64,
method_name_ptr: u64,
arguments_len: u64,
arguments_ptr: u64,
amount_ptr: u64,
gas: u64,
) -> u64;
unsafe fn promise_then(
&self,
promise_index: u64,
account_id_len: u64,
account_id_ptr: u64,
method_name_len: u64,
method_name_ptr: u64,
arguments_len: u64,
arguments_ptr: u64,
amount_ptr: u64,
gas: u64,
) -> u64;
unsafe fn promise_and(&self, promise_idx_ptr: u64, promise_idx_count: u64) -> u64;
unsafe fn promise_batch_create(&self, account_id_len: u64, account_id_ptr: u64) -> u64;
unsafe fn promise_batch_then(
&self,
promise_index: u64,
account_id_len: u64,
account_id_ptr: u64,
) -> u64;
// #######################
// # Promise API actions #
// #######################
unsafe fn promise_batch_action_create_account(&self, promise_index: u64);
unsafe fn promise_batch_action_deploy_contract(
&self,
promise_index: u64,
code_len: u64,
code_ptr: u64,
);
unsafe fn promise_batch_action_function_call(
&self,
promise_index: u64,
method_name_len: u64,
method_name_ptr: u64,
arguments_len: u64,
arguments_ptr: u64,
amount_ptr: u64,
gas: u64,
);
unsafe fn promise_batch_action_transfer(&self, promise_index: u64, amount_ptr: u64);
unsafe fn promise_batch_action_stake(
&self,
promise_index: u64,
amount_ptr: u64,
public_key_len: u64,
public_key_ptr: u64,
);
unsafe fn promise_batch_action_add_key_with_full_access(
&self,
promise_index: u64,
public_key_len: u64,
public_key_ptr: u64,
nonce: u64,
);
unsafe fn promise_batch_action_add_key_with_function_call(
&self,
promise_index: u64,
public_key_len: u64,
public_key_ptr: u64,
nonce: u64,
allowance_ptr: u64,
receiver_id_len: u64,
receiver_id_ptr: u64,
method_names_len: u64,
method_names_ptr: u64,
);
unsafe fn promise_batch_action_delete_key(
&self,
promise_index: u64,
public_key_len: u64,
public_key_ptr: u64,
);
unsafe fn promise_batch_action_delete_account(
&self,
promise_index: u64,
beneficiary_id_len: u64,
beneficiary_id_ptr: u64,
);
// #######################
// # Promise API results #
// #######################
unsafe fn promise_results_count(&self) -> u64;
unsafe fn promise_result(&self, result_idx: u64, register_id: u64) -> u64;
unsafe fn promise_return(&self, promise_id: u64);
// ###############
// # Storage API #
// ###############
unsafe fn storage_write(
&self,
key_len: u64,
key_ptr: u64,
value_len: u64,
value_ptr: u64,
register_id: u64,
) -> u64;
unsafe fn storage_read(&self, key_len: u64, key_ptr: u64, register_id: u64) -> u64;
unsafe fn storage_remove(&self, key_len: u64, key_ptr: u64, register_id: u64) -> u64;
unsafe fn storage_has_key(&self, key_len: u64, key_ptr: u64) -> u64;
unsafe fn storage_iter_prefix(&self, prefix_len: u64, prefix_ptr: u64) -> u64;
unsafe fn storage_iter_range(
&self,
start_len: u64,
start_ptr: u64,
end_len: u64,
end_ptr: u64,
) -> u64;
unsafe fn storage_iter_next(
&self,
iterator_id: u64,
key_register_id: u64,
value_register_id: u64,
) -> u64;
#[cfg(not(target_arch = "wasm32"))]
fn as_mut_mocked_blockchain(&mut self) -> Option<&mut MockedBlockchain> {
None
}
}
|
//! This module implements the `skipped-compactions` CLI command
use comfy_table::{Cell, Table};
use influxdb_iox_client::{
compactor::{self, generated_types::SkippedCompaction},
connection::Connection,
};
use iox_time::Time;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error {
#[error("JSON Serialization error: {0}")]
Serde(#[from] serde_json::Error),
#[error("Client error: {0}")]
Client(#[from] influxdb_iox_client::error::Error),
#[error("Invalid `skipped_at` timestamp: {0}s")]
InvalidTimestamp(i64),
}
/// Various commands for skipped compaction inspection
#[derive(Debug, clap::Parser)]
pub struct Config {
#[clap(subcommand)]
command: Command,
}
/// All possible subcommands for skipped compaction
#[derive(Debug, clap::Parser)]
enum Command {
/// List all skipped compactions
List,
/// Delete the requested skipped compaction
Delete { partition_id: i64 },
}
pub async fn command(connection: Connection, config: Config) -> Result<(), Error> {
let mut client = compactor::Client::new(connection);
match config.command {
Command::List => {
let skipped_compactions = client.skipped_compactions().await?;
println!("{}", create_table(&skipped_compactions)?);
}
Command::Delete { partition_id } => {
let deleted_skipped_compactions =
client.delete_skipped_compactions(partition_id).await?;
let deleted_skipped_compactions = deleted_skipped_compactions
.as_ref()
.map(std::slice::from_ref)
.unwrap_or_default();
println!("{}", create_table(deleted_skipped_compactions)?);
} // Deliberately not adding _ => so the compiler will direct people here to impl new
// commands
}
Ok(())
}
/// Turn skipped compaction records into a table
#[allow(clippy::result_large_err)]
fn create_table(skipped_compactions: &[SkippedCompaction]) -> Result<Table, Error> {
let mut table = Table::new();
table.load_preset("||--+-++| ++++++");
let headers: Vec<_> = [
"partition_id",
"reason",
"skipped_at",
"estimated_bytes",
"limit_bytes",
"num_files",
"limit_num_files",
]
.into_iter()
.map(Cell::new)
.collect();
table.set_header(headers);
for skipped_compaction in skipped_compactions {
let timestamp = Time::from_timestamp(skipped_compaction.skipped_at, 0)
.ok_or(Error::InvalidTimestamp(skipped_compaction.skipped_at))?;
table.add_row(vec![
Cell::new(skipped_compaction.partition_id.to_string()),
Cell::new(&skipped_compaction.reason),
Cell::new(timestamp.to_rfc3339()),
Cell::new(skipped_compaction.estimated_bytes.to_string()),
Cell::new(skipped_compaction.limit_bytes.to_string()),
Cell::new(skipped_compaction.num_files.to_string()),
Cell::new(skipped_compaction.limit_num_files.to_string()),
]);
}
Ok(table)
}
|
extern crate gunship;
use gunship::engine::EngineBuilder;
fn main() {
let mut builder = EngineBuilder::new();
builder.max_workers(1);
builder.build(|| {});
}
|
use chrono::{DateTime, Duration, Utc};
use iox_catalog::interface::{Catalog, ParquetFileRepo};
use object_store::ObjectMeta;
use observability_deps::tracing::*;
use snafu::prelude::*;
use std::collections::HashSet;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::time::timeout;
use uuid::Uuid;
#[derive(Debug, Snafu)]
#[allow(missing_docs)]
pub enum Error {
#[snafu(display("Expected a file name"))]
FileNameMissing,
#[snafu(display("Channel closed unexpectedly"))]
ChannelClosed,
#[snafu(display("The catalog could not be queried for {object_store_id}"))]
GetFile {
source: iox_catalog::interface::Error,
object_store_id: uuid::Uuid,
},
#[snafu(display("The catalog could not be queried for the batch"))]
FileExists {
source: iox_catalog::interface::Error,
},
#[snafu(display("The deleter task exited unexpectedly"))]
DeleterExited {
source: tokio::sync::mpsc::error::SendError<ObjectMeta>,
},
}
/// The number of parquet files we will ask the catalog to look for at once.
// todo(pjb): I have no idea what's a good value here to amortize the request. More than 1 is a start.
// Here's the idea: group everything you can for 100ms `RECEIVE_TIMEOUT`, because that's not so much
// of a delay that it would cause issues, but if you manage to get a huge number of file ids, stop
// accumulating at 100 `CATALOG_BATCH_SIZE`.
const CATALOG_BATCH_SIZE: usize = 100;
const RECEIVE_TIMEOUT: core::time::Duration = core::time::Duration::from_millis(100); // This may not be long enough to collect many objects.
pub(crate) type Result<T, E = Error> = std::result::Result<T, E>;
pub(crate) async fn perform(
catalog: Arc<dyn Catalog>,
cutoff: Duration,
items: mpsc::Receiver<ObjectMeta>,
deleter: mpsc::Sender<ObjectMeta>,
) -> Result<()> {
let mut repositories = catalog.repositories().await;
let parquet_files = repositories.parquet_files();
perform_inner(parquet_files, cutoff, items, deleter).await
}
/// Allows easier mocking of just `ParquetFileRepo` in tests.
async fn perform_inner(
parquet_files: &mut dyn ParquetFileRepo,
cutoff: Duration,
mut items: mpsc::Receiver<ObjectMeta>,
deleter: mpsc::Sender<ObjectMeta>,
) -> Result<()> {
let mut batch = Vec::with_capacity(CATALOG_BATCH_SIZE);
loop {
let maybe_item = timeout(RECEIVE_TIMEOUT, items.recv()).await;
// if we have an error, we timed out.
let timedout = maybe_item.is_err();
if let Ok(res) = maybe_item {
match res {
Some(item) => {
batch.push(item);
}
None => {
// The channel has been closed unexpectedly
return Err(Error::ChannelClosed);
}
}
};
if batch.len() >= CATALOG_BATCH_SIZE || timedout {
let older_than = chrono::offset::Utc::now() - cutoff;
for item in should_delete(batch, older_than, parquet_files).await {
deleter.send(item).await.context(DeleterExitedSnafu)?;
}
batch = Vec::with_capacity(100);
}
}
}
/// [should_delete] processes a list of object store file information to see if the object for this
/// [ObjectMeta] can be deleted.
/// It can be deleted if it is old enough AND there isn't a reference in the catalog for it anymore (or ever)
/// It will also say the file can be deleted if it isn't a parquet file or the uuid isn't valid.
/// [should_delete] returns a subset of the input, which are the items that "should" be deleted.
// It first processes the easy checks, age, uuid, file suffix, and other parse/data input errors. This
// checking is cheap. For the files that need to be checked against the catalog, it batches them to
// reduce the number of requests on the wire and amortize the catalog overhead. Setting the batch size
// to 1 will return this method to its previous behavior (1 request per file) and resource usage.
async fn should_delete(
items: Vec<ObjectMeta>,
cutoff: DateTime<Utc>,
parquet_files: &mut dyn ParquetFileRepo,
) -> Vec<ObjectMeta> {
// to_delete is the vector we will return to the caller containing ObjectMeta we think should be deleted.
// it is never longer than `items`
let mut to_delete = Vec::with_capacity(items.len());
// After filtering out potential errors and non-parquet files, this vector accumulates the objects
// that need to be checked against the catalog to see if we can delete them.
let mut to_check_in_catalog = Vec::with_capacity(items.len());
for candidate in items {
if cutoff < candidate.last_modified {
// expected to be a common reason to skip a file
debug!(
location = %candidate.location,
deleting = false,
reason = "too new",
cutoff = %cutoff,
last_modified = %candidate.last_modified,
"Ignoring object",
);
// Not old enough; do not delete
continue;
}
let file_name = candidate.location.parts().last();
if matches!(file_name, None) {
warn!(
location = %candidate.location,
deleting = true,
reason = "bad location",
"Ignoring object",
);
// missing file name entirely! likely not a valid object store file entry
// skip it
continue;
}
// extract the file suffix, delete it if it isn't a parquet file
if let Some(uuid) = file_name.unwrap().as_ref().strip_suffix(".parquet") {
if let Ok(object_store_id) = uuid.parse::<Uuid>() {
// add it to the list to check against the catalog
// push a tuple that maps the uuid to the object meta struct so we don't have generate the uuid again
to_check_in_catalog.push((object_store_id, candidate))
} else {
// expected to be a rare situation so warn.
warn!(
location = %candidate.location,
deleting = true,
uuid,
reason = "not a valid UUID",
"Scheduling file for deletion",
);
to_delete.push(candidate)
}
} else {
// expected to be a rare situation so warn.
warn!(
location = %candidate.location,
deleting = true,
reason = "not a .parquet file",
"Scheduling file for deletion",
);
to_delete.push(candidate)
}
}
// do_not_delete contains the items that are present in the catalog
let mut do_not_delete: HashSet<Uuid> = HashSet::with_capacity(to_check_in_catalog.len());
for batch in to_check_in_catalog.chunks(CATALOG_BATCH_SIZE) {
let just_uuids: Vec<_> = batch.iter().map(|id| id.0).collect();
match check_ids_exists_in_catalog(just_uuids.clone(), parquet_files).await {
Ok(present_uuids) => {
do_not_delete.extend(present_uuids.iter());
}
Err(e) => {
// on error assume all the uuids in this batch are present in the catalog
do_not_delete.extend(just_uuids.iter());
warn!(
error = %e,
reason = "error querying catalog",
"Ignoring batch and continuing",
);
}
}
}
if enabled!(Level::DEBUG) {
do_not_delete.iter().for_each(|uuid| {
debug!(
deleting = false,
uuid = %uuid,
reason = "Object is present in catalog, not deleting",
"Ignoring object",
)
});
}
// we have a Vec of uuids for the files we _do not_ want to delete (present in the catalog)
// remove these uuids from the Vec of all uuids we checked, adding the remainder to the delete list
to_check_in_catalog
.iter()
.filter(|c| !do_not_delete.contains(&c.0))
.for_each(|c| to_delete.push(c.1.clone()));
to_delete
}
/// helper to check a batch of ids for presence in the catalog.
/// returns a list of the ids (from the original batch) that exist (or catalog error).
async fn check_ids_exists_in_catalog(
candidates: Vec<Uuid>,
parquet_files: &mut dyn ParquetFileRepo,
) -> Result<Vec<Uuid>> {
parquet_files
.exists_by_object_store_id_batch(candidates)
.await
.context(FileExistsSnafu)
}
#[cfg(test)]
mod tests {
use super::*;
use async_trait::async_trait;
use chrono::TimeZone;
use data_types::{
ColumnId, ColumnSet, CompactionLevel, NamespaceId, ParquetFile, ParquetFileId,
ParquetFileParams, PartitionId, TableId, Timestamp, TransitionPartitionId,
};
use iox_catalog::{
interface::Catalog,
mem::MemCatalog,
test_helpers::{arbitrary_namespace, arbitrary_table},
};
use object_store::path::Path;
use once_cell::sync::Lazy;
use parquet_file::ParquetFilePath;
use std::{assert_eq, vec};
use uuid::Uuid;
static OLDER_TIME: Lazy<DateTime<Utc>> =
Lazy::new(|| Utc.datetime_from_str("2022-01-01T00:00:00z", "%+").unwrap());
static NEWER_TIME: Lazy<DateTime<Utc>> =
Lazy::new(|| Utc.datetime_from_str("2022-02-02T00:00:00z", "%+").unwrap());
async fn create_catalog_and_file() -> (Arc<dyn Catalog>, ParquetFile) {
let metric_registry = Arc::new(metric::Registry::new());
let catalog = Arc::new(MemCatalog::new(Arc::clone(&metric_registry)));
create_schema_and_file(catalog).await
}
async fn create_schema_and_file(catalog: Arc<dyn Catalog>) -> (Arc<dyn Catalog>, ParquetFile) {
let mut repos = catalog.repositories().await;
let namespace = arbitrary_namespace(&mut *repos, "namespace_parquet_file_test").await;
let table = arbitrary_table(&mut *repos, "test_table", &namespace).await;
let partition = repos
.partitions()
.create_or_get("one".into(), table.id)
.await
.unwrap();
let parquet_file_params = ParquetFileParams {
namespace_id: namespace.id,
table_id: partition.table_id,
partition_id: partition.transition_partition_id(),
object_store_id: Uuid::new_v4(),
min_time: Timestamp::new(1),
max_time: Timestamp::new(10),
file_size_bytes: 1337,
row_count: 0,
compaction_level: CompactionLevel::Initial,
created_at: Timestamp::new(1),
column_set: ColumnSet::new([ColumnId::new(1), ColumnId::new(2)]),
max_l0_created_at: Timestamp::new(1),
};
let parquet_file = repos
.parquet_files()
.create(parquet_file_params)
.await
.unwrap();
(catalog, parquet_file)
}
#[tokio::test]
async fn dont_delete_new_file_in_catalog() {
let (catalog, file_in_catalog) = create_catalog_and_file().await;
let mut repositories = catalog.repositories().await;
let parquet_files = repositories.parquet_files();
let location = ParquetFilePath::new(
file_in_catalog.namespace_id,
file_in_catalog.table_id,
&file_in_catalog.partition_id.clone(),
file_in_catalog.object_store_id,
)
.object_store_path();
let cutoff = *OLDER_TIME;
let last_modified = *NEWER_TIME;
let item = ObjectMeta {
location,
last_modified,
size: 0,
e_tag: None,
};
let results = should_delete(vec![item], cutoff, parquet_files).await;
assert_eq!(results.len(), 0);
}
#[tokio::test]
async fn dont_delete_new_file_not_in_catalog() {
let metric_registry = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metric_registry)));
let mut repositories = catalog.repositories().await;
let parquet_files = repositories.parquet_files();
let location = ParquetFilePath::new(
NamespaceId::new(1),
TableId::new(2),
&TransitionPartitionId::Deprecated(PartitionId::new(4)),
Uuid::new_v4(),
)
.object_store_path();
let cutoff = *OLDER_TIME;
let last_modified = *NEWER_TIME;
let item = ObjectMeta {
location,
last_modified,
size: 0,
e_tag: None,
};
let results = should_delete(vec![item], cutoff, parquet_files).await;
assert_eq!(results.len(), 0);
}
#[tokio::test]
async fn dont_delete_new_file_with_unparseable_path() {
let metric_registry = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metric_registry)));
let mut repositories = catalog.repositories().await;
let parquet_files = repositories.parquet_files();
let cutoff = *OLDER_TIME;
let last_modified = *NEWER_TIME;
let item = ObjectMeta {
location: Path::from("not-a-uuid.parquet"),
last_modified,
size: 0,
e_tag: None,
};
let results = should_delete(vec![item], cutoff, parquet_files).await;
assert_eq!(results.len(), 0);
}
#[tokio::test]
async fn dont_delete_old_file_in_catalog() {
let (catalog, file_in_catalog) = create_catalog_and_file().await;
let mut repositories = catalog.repositories().await;
let parquet_files = repositories.parquet_files();
let location = ParquetFilePath::new(
file_in_catalog.namespace_id,
file_in_catalog.table_id,
&file_in_catalog.partition_id.clone(),
file_in_catalog.object_store_id,
)
.object_store_path();
let cutoff = *NEWER_TIME;
let last_modified = *OLDER_TIME;
let item = ObjectMeta {
location,
last_modified,
size: 0,
e_tag: None,
};
let results = should_delete(vec![item], cutoff, parquet_files).await;
assert_eq!(results.len(), 0);
}
#[tokio::test]
async fn delete_old_file_not_in_catalog() {
let metric_registry = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metric_registry)));
let mut repositories = catalog.repositories().await;
let parquet_files = repositories.parquet_files();
let location = ParquetFilePath::new(
NamespaceId::new(1),
TableId::new(2),
&TransitionPartitionId::Deprecated(PartitionId::new(4)),
Uuid::new_v4(),
)
.object_store_path();
let cutoff = *NEWER_TIME;
let last_modified = *OLDER_TIME;
let item = ObjectMeta {
location,
last_modified,
size: 0,
e_tag: None,
};
let results = should_delete(vec![item.clone()], cutoff, parquet_files).await;
assert_eq!(results.len(), 1);
assert_eq!(results[0], item);
}
#[tokio::test]
async fn delete_old_file_with_unparseable_path() {
let metric_registry = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metric_registry)));
let mut repositories = catalog.repositories().await;
let parquet_files = repositories.parquet_files();
let cutoff = *NEWER_TIME;
let last_modified = *OLDER_TIME;
let item = ObjectMeta {
location: Path::from("not-a-uuid.parquet"),
last_modified,
size: 0,
e_tag: None,
};
let results = should_delete(vec![item.clone()], cutoff, parquet_files).await;
assert_eq!(results.len(), 1);
assert_eq!(results[0], item);
}
/// The garbage collector checks the catalog for files it _should not delete_. If we can't reach
/// the catalog (some error), assume we are keeping all the files we are checking.
/// [do_not_delete_on_catalog_error] tests that.
#[tokio::test]
async fn do_not_delete_on_catalog_error() {
let metric_registry = Arc::new(metric::Registry::new());
let catalog: Arc<dyn Catalog> = Arc::new(MemCatalog::new(Arc::clone(&metric_registry)));
let (catalog, file_in_catalog) = create_schema_and_file(catalog).await;
let mut repositories = catalog.repositories().await;
let parquet_files = repositories.parquet_files();
// A ParquetFileRepo that returns an error in the one method [should_delete] uses.
let mut mocked_parquet_files = MockParquetFileRepo {
inner: parquet_files,
};
let cutoff = *NEWER_TIME;
let last_modified = *OLDER_TIME;
let loc = ParquetFilePath::new(
file_in_catalog.namespace_id,
file_in_catalog.table_id,
&file_in_catalog.partition_id.clone(),
file_in_catalog.object_store_id,
)
.object_store_path();
let item = ObjectMeta {
location: loc,
last_modified,
size: 0,
e_tag: None,
};
// check precondition, file exists in catalog
let pf = mocked_parquet_files
.get_by_object_store_id(file_in_catalog.object_store_id)
.await
.unwrap()
.unwrap();
assert_eq!(pf, file_in_catalog);
// because of the db error, there should be no results
let results = should_delete(vec![item.clone()], cutoff, &mut mocked_parquet_files).await;
assert_eq!(results.len(), 0);
}
struct MockParquetFileRepo<'a> {
inner: &'a mut dyn ParquetFileRepo,
}
#[async_trait]
impl ParquetFileRepo for MockParquetFileRepo<'_> {
async fn create(
&mut self,
parquet_file_params: ParquetFileParams,
) -> iox_catalog::interface::Result<ParquetFile> {
self.inner.create(parquet_file_params).await
}
async fn list_all(&mut self) -> iox_catalog::interface::Result<Vec<ParquetFile>> {
self.inner.list_all().await
}
async fn flag_for_delete_by_retention(
&mut self,
) -> iox_catalog::interface::Result<Vec<ParquetFileId>> {
self.inner.flag_for_delete_by_retention().await
}
async fn list_by_namespace_not_to_delete(
&mut self,
namespace_id: NamespaceId,
) -> iox_catalog::interface::Result<Vec<ParquetFile>> {
self.inner
.list_by_namespace_not_to_delete(namespace_id)
.await
}
async fn list_by_table_not_to_delete(
&mut self,
table_id: TableId,
) -> iox_catalog::interface::Result<Vec<ParquetFile>> {
self.inner.list_by_table_not_to_delete(table_id).await
}
async fn delete_old_ids_only(
&mut self,
older_than: Timestamp,
) -> iox_catalog::interface::Result<Vec<ParquetFileId>> {
self.inner.delete_old_ids_only(older_than).await
}
async fn list_by_partition_not_to_delete(
&mut self,
partition_id: &TransitionPartitionId,
) -> iox_catalog::interface::Result<Vec<ParquetFile>> {
self.inner
.list_by_partition_not_to_delete(partition_id)
.await
}
async fn get_by_object_store_id(
&mut self,
object_store_id: Uuid,
) -> iox_catalog::interface::Result<Option<ParquetFile>> {
self.inner.get_by_object_store_id(object_store_id).await
}
async fn exists_by_object_store_id_batch(
&mut self,
_object_store_ids: Vec<Uuid>,
) -> iox_catalog::interface::Result<Vec<Uuid>> {
Err(iox_catalog::interface::Error::SqlxError {
source: sqlx::Error::WorkerCrashed,
})
}
async fn create_upgrade_delete(
&mut self,
delete: &[ParquetFileId],
upgrade: &[ParquetFileId],
create: &[ParquetFileParams],
target_level: CompactionLevel,
) -> iox_catalog::interface::Result<Vec<ParquetFileId>> {
self.create_upgrade_delete(delete, upgrade, create, target_level)
.await
}
}
}
|
use std::collections::HashMap;
use std::rc::Rc;
pub struct Request {
method: String,
url: String,
headers: HashMap<String, String>,
body: Vec<u8>,
}
pub struct Response {
code: u32,
headers: HashMap<String, String>,
body: Vec<u8>,
}
type BoxedCallback = Box<Fn(&Request) -> Response>;
pub struct BasicRouter {
routes: HashMap<String, BoxedCallback>,
}
impl BasicRouter {
/// Create an empty router
pub fn new() -> BasicRouter {
BasicRouter {
routes: HashMap::new(),
}
}
/// Add a route to the router
pub fn add_route<C>(&mut self, url: &str, callback: C)
where
C: Fn(&Request) -> Response + 'static,
{
self.routes.insert(url.to_string(), Box::new(callback));
}
fn handle_request(&self, request: &Request) -> Response {
match self.routes.get(&request.url) {
None => not_found_response(request),
Some(callback) => callback(request),
}
}
}
fn not_found_response(request: &Request) -> Response {
let r = Response {
code: 404,
body: "Not found".as_bytes().to_vec(),
headers: HashMap::new(),
};
println!(
"The request with url {} returned 404 Not Found",
&request.url
);
r
}
#[test]
fn test_router() {
let mut router = BasicRouter::new();
use std::cell::RefCell;
let mut callback_called = Rc::new(RefCell::new(false));
let assertion = callback_called.clone();
router.add_route("/hello", move |request| {
println!(
"Handler working {} request {} with content {}",
request.method,
request.url,
String::from_utf8_lossy(&request.body[..])
);
*callback_called.borrow_mut() = true;
let mut response = String::from("Hello ");
response.push_str(&String::from_utf8_lossy(&request.body[..]));
Response {
code: 200,
headers: HashMap::new(),
body: response.as_bytes().to_vec()
}
});
let resp = router.handle_request(&Request {
url: "/hello".to_string(),
method: "POST".to_string(),
body: "This is superman!".as_bytes().to_vec(),
headers: HashMap::new(),
});
assert_eq!(*assertion.borrow(), true);
assert_eq!(String::from_utf8(resp.body).unwrap(), "Hello This is superman!");
}
|
// auto generated, do not modify.
// created: Mon Feb 22 23:57:02 2016
// src-file: /QtGui/qimageiohandler.h
// dst-file: /src/gui/qimageiohandler.rs
//
// header block begin =>
#![feature(libc)]
#![feature(core)]
#![feature(collections)]
extern crate libc;
use self::libc::*;
// <= header block end
// main block begin =>
// <= main block end
// use block begin =>
use std::ops::Deref;
use super::super::core::qrect::*; // 771
use super::super::core::qbytearray::*; // 771
use super::qimage::*; // 773
use super::super::core::qvariant::*; // 771
use super::super::core::qiodevice::*; // 771
use super::super::core::qobject::*; // 771
use super::super::core::qobjectdefs::*; // 771
// use super::qimageiohandler::QImageIOHandler; // 773
// <= use block end
// ext block begin =>
// #[link(name = "Qt5Core")]
// #[link(name = "Qt5Gui")]
// #[link(name = "Qt5Widgets")]
// #[link(name = "QtInline")]
extern {
fn QImageIOHandler_Class_Size() -> c_int;
// proto: int QImageIOHandler::imageCount();
fn C_ZNK15QImageIOHandler10imageCountEv(qthis: u64 /* *mut c_void*/) -> c_int;
// proto: QRect QImageIOHandler::currentImageRect();
fn C_ZNK15QImageIOHandler16currentImageRectEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: bool QImageIOHandler::jumpToImage(int imageNumber);
fn C_ZN15QImageIOHandler11jumpToImageEi(qthis: u64 /* *mut c_void*/, arg0: c_int) -> c_char;
// proto: int QImageIOHandler::currentImageNumber();
fn C_ZNK15QImageIOHandler18currentImageNumberEv(qthis: u64 /* *mut c_void*/) -> c_int;
// proto: void QImageIOHandler::setFormat(const QByteArray & format);
fn C_ZN15QImageIOHandler9setFormatERK10QByteArray(qthis: u64 /* *mut c_void*/, arg0: *mut c_void);
// proto: bool QImageIOHandler::jumpToNextImage();
fn C_ZN15QImageIOHandler15jumpToNextImageEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: void QImageIOHandler::~QImageIOHandler();
fn C_ZN15QImageIOHandlerD2Ev(qthis: u64 /* *mut c_void*/);
// proto: int QImageIOHandler::loopCount();
fn C_ZNK15QImageIOHandler9loopCountEv(qthis: u64 /* *mut c_void*/) -> c_int;
// proto: void QImageIOHandler::QImageIOHandler();
fn C_ZN15QImageIOHandlerC2Ev() -> u64;
// proto: bool QImageIOHandler::read(QImage * image);
fn C_ZN15QImageIOHandler4readEP6QImage(qthis: u64 /* *mut c_void*/, arg0: *mut c_void) -> c_char;
// proto: QByteArray QImageIOHandler::name();
fn C_ZNK15QImageIOHandler4nameEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: QByteArray QImageIOHandler::format();
fn C_ZNK15QImageIOHandler6formatEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: int QImageIOHandler::nextImageDelay();
fn C_ZNK15QImageIOHandler14nextImageDelayEv(qthis: u64 /* *mut c_void*/) -> c_int;
// proto: void QImageIOHandler::setDevice(QIODevice * device);
fn C_ZN15QImageIOHandler9setDeviceEP9QIODevice(qthis: u64 /* *mut c_void*/, arg0: *mut c_void);
// proto: bool QImageIOHandler::canRead();
fn C_ZNK15QImageIOHandler7canReadEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: QIODevice * QImageIOHandler::device();
fn C_ZNK15QImageIOHandler6deviceEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: bool QImageIOHandler::write(const QImage & image);
fn C_ZN15QImageIOHandler5writeERK6QImage(qthis: u64 /* *mut c_void*/, arg0: *mut c_void) -> c_char;
fn QImageIOPlugin_Class_Size() -> c_int;
// proto: const QMetaObject * QImageIOPlugin::metaObject();
fn C_ZNK14QImageIOPlugin10metaObjectEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: void QImageIOPlugin::~QImageIOPlugin();
fn C_ZN14QImageIOPluginD2Ev(qthis: u64 /* *mut c_void*/);
// proto: QImageIOHandler * QImageIOPlugin::create(QIODevice * device, const QByteArray & format);
fn C_ZNK14QImageIOPlugin6createEP9QIODeviceRK10QByteArray(qthis: u64 /* *mut c_void*/, arg0: *mut c_void, arg1: *mut c_void) -> *mut c_void;
// proto: void QImageIOPlugin::QImageIOPlugin(QObject * parent);
fn C_ZN14QImageIOPluginC2EP7QObject(arg0: *mut c_void) -> u64;
} // <= ext block end
// body block begin =>
// class sizeof(QImageIOHandler)=1
#[derive(Default)]
pub struct QImageIOHandler {
// qbase: None,
pub qclsinst: u64 /* *mut c_void*/,
}
// class sizeof(QImageIOPlugin)=1
#[derive(Default)]
pub struct QImageIOPlugin {
qbase: QObject,
pub qclsinst: u64 /* *mut c_void*/,
}
impl /*struct*/ QImageIOHandler {
pub fn inheritFrom(qthis: u64 /* *mut c_void*/) -> QImageIOHandler {
return QImageIOHandler{qclsinst: qthis, ..Default::default()};
}
}
// proto: int QImageIOHandler::imageCount();
impl /*struct*/ QImageIOHandler {
pub fn imageCount<RetType, T: QImageIOHandler_imageCount<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.imageCount(self);
// return 1;
}
}
pub trait QImageIOHandler_imageCount<RetType> {
fn imageCount(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: int QImageIOHandler::imageCount();
impl<'a> /*trait*/ QImageIOHandler_imageCount<i32> for () {
fn imageCount(self , rsthis: & QImageIOHandler) -> i32 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK15QImageIOHandler10imageCountEv()};
let mut ret = unsafe {C_ZNK15QImageIOHandler10imageCountEv(rsthis.qclsinst)};
return ret as i32; // 1
// return 1;
}
}
// proto: QRect QImageIOHandler::currentImageRect();
impl /*struct*/ QImageIOHandler {
pub fn currentImageRect<RetType, T: QImageIOHandler_currentImageRect<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.currentImageRect(self);
// return 1;
}
}
pub trait QImageIOHandler_currentImageRect<RetType> {
fn currentImageRect(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: QRect QImageIOHandler::currentImageRect();
impl<'a> /*trait*/ QImageIOHandler_currentImageRect<QRect> for () {
fn currentImageRect(self , rsthis: & QImageIOHandler) -> QRect {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK15QImageIOHandler16currentImageRectEv()};
let mut ret = unsafe {C_ZNK15QImageIOHandler16currentImageRectEv(rsthis.qclsinst)};
let mut ret1 = QRect::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: bool QImageIOHandler::jumpToImage(int imageNumber);
impl /*struct*/ QImageIOHandler {
pub fn jumpToImage<RetType, T: QImageIOHandler_jumpToImage<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.jumpToImage(self);
// return 1;
}
}
pub trait QImageIOHandler_jumpToImage<RetType> {
fn jumpToImage(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: bool QImageIOHandler::jumpToImage(int imageNumber);
impl<'a> /*trait*/ QImageIOHandler_jumpToImage<i8> for (i32) {
fn jumpToImage(self , rsthis: & QImageIOHandler) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN15QImageIOHandler11jumpToImageEi()};
let arg0 = self as c_int;
let mut ret = unsafe {C_ZN15QImageIOHandler11jumpToImageEi(rsthis.qclsinst, arg0)};
return ret as i8; // 1
// return 1;
}
}
// proto: int QImageIOHandler::currentImageNumber();
impl /*struct*/ QImageIOHandler {
pub fn currentImageNumber<RetType, T: QImageIOHandler_currentImageNumber<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.currentImageNumber(self);
// return 1;
}
}
pub trait QImageIOHandler_currentImageNumber<RetType> {
fn currentImageNumber(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: int QImageIOHandler::currentImageNumber();
impl<'a> /*trait*/ QImageIOHandler_currentImageNumber<i32> for () {
fn currentImageNumber(self , rsthis: & QImageIOHandler) -> i32 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK15QImageIOHandler18currentImageNumberEv()};
let mut ret = unsafe {C_ZNK15QImageIOHandler18currentImageNumberEv(rsthis.qclsinst)};
return ret as i32; // 1
// return 1;
}
}
// proto: void QImageIOHandler::setFormat(const QByteArray & format);
impl /*struct*/ QImageIOHandler {
pub fn setFormat<RetType, T: QImageIOHandler_setFormat<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.setFormat(self);
// return 1;
}
}
pub trait QImageIOHandler_setFormat<RetType> {
fn setFormat(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: void QImageIOHandler::setFormat(const QByteArray & format);
impl<'a> /*trait*/ QImageIOHandler_setFormat<()> for (&'a QByteArray) {
fn setFormat(self , rsthis: & QImageIOHandler) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN15QImageIOHandler9setFormatERK10QByteArray()};
let arg0 = self.qclsinst as *mut c_void;
unsafe {C_ZN15QImageIOHandler9setFormatERK10QByteArray(rsthis.qclsinst, arg0)};
// return 1;
}
}
// proto: bool QImageIOHandler::jumpToNextImage();
impl /*struct*/ QImageIOHandler {
pub fn jumpToNextImage<RetType, T: QImageIOHandler_jumpToNextImage<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.jumpToNextImage(self);
// return 1;
}
}
pub trait QImageIOHandler_jumpToNextImage<RetType> {
fn jumpToNextImage(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: bool QImageIOHandler::jumpToNextImage();
impl<'a> /*trait*/ QImageIOHandler_jumpToNextImage<i8> for () {
fn jumpToNextImage(self , rsthis: & QImageIOHandler) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN15QImageIOHandler15jumpToNextImageEv()};
let mut ret = unsafe {C_ZN15QImageIOHandler15jumpToNextImageEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: void QImageIOHandler::~QImageIOHandler();
impl /*struct*/ QImageIOHandler {
pub fn free<RetType, T: QImageIOHandler_free<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.free(self);
// return 1;
}
}
pub trait QImageIOHandler_free<RetType> {
fn free(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: void QImageIOHandler::~QImageIOHandler();
impl<'a> /*trait*/ QImageIOHandler_free<()> for () {
fn free(self , rsthis: & QImageIOHandler) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN15QImageIOHandlerD2Ev()};
unsafe {C_ZN15QImageIOHandlerD2Ev(rsthis.qclsinst)};
// return 1;
}
}
// proto: int QImageIOHandler::loopCount();
impl /*struct*/ QImageIOHandler {
pub fn loopCount<RetType, T: QImageIOHandler_loopCount<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.loopCount(self);
// return 1;
}
}
pub trait QImageIOHandler_loopCount<RetType> {
fn loopCount(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: int QImageIOHandler::loopCount();
impl<'a> /*trait*/ QImageIOHandler_loopCount<i32> for () {
fn loopCount(self , rsthis: & QImageIOHandler) -> i32 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK15QImageIOHandler9loopCountEv()};
let mut ret = unsafe {C_ZNK15QImageIOHandler9loopCountEv(rsthis.qclsinst)};
return ret as i32; // 1
// return 1;
}
}
// proto: void QImageIOHandler::QImageIOHandler();
impl /*struct*/ QImageIOHandler {
pub fn new<T: QImageIOHandler_new>(value: T) -> QImageIOHandler {
let rsthis = value.new();
return rsthis;
// return 1;
}
}
pub trait QImageIOHandler_new {
fn new(self) -> QImageIOHandler;
}
// proto: void QImageIOHandler::QImageIOHandler();
impl<'a> /*trait*/ QImageIOHandler_new for () {
fn new(self) -> QImageIOHandler {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN15QImageIOHandlerC2Ev()};
let ctysz: c_int = unsafe{QImageIOHandler_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let qthis: u64 = unsafe {C_ZN15QImageIOHandlerC2Ev()};
let rsthis = QImageIOHandler{qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: bool QImageIOHandler::read(QImage * image);
impl /*struct*/ QImageIOHandler {
pub fn read<RetType, T: QImageIOHandler_read<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.read(self);
// return 1;
}
}
pub trait QImageIOHandler_read<RetType> {
fn read(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: bool QImageIOHandler::read(QImage * image);
impl<'a> /*trait*/ QImageIOHandler_read<i8> for (&'a QImage) {
fn read(self , rsthis: & QImageIOHandler) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN15QImageIOHandler4readEP6QImage()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZN15QImageIOHandler4readEP6QImage(rsthis.qclsinst, arg0)};
return ret as i8; // 1
// return 1;
}
}
// proto: QByteArray QImageIOHandler::name();
impl /*struct*/ QImageIOHandler {
pub fn name<RetType, T: QImageIOHandler_name<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.name(self);
// return 1;
}
}
pub trait QImageIOHandler_name<RetType> {
fn name(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: QByteArray QImageIOHandler::name();
impl<'a> /*trait*/ QImageIOHandler_name<QByteArray> for () {
fn name(self , rsthis: & QImageIOHandler) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK15QImageIOHandler4nameEv()};
let mut ret = unsafe {C_ZNK15QImageIOHandler4nameEv(rsthis.qclsinst)};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: QByteArray QImageIOHandler::format();
impl /*struct*/ QImageIOHandler {
pub fn format<RetType, T: QImageIOHandler_format<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.format(self);
// return 1;
}
}
pub trait QImageIOHandler_format<RetType> {
fn format(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: QByteArray QImageIOHandler::format();
impl<'a> /*trait*/ QImageIOHandler_format<QByteArray> for () {
fn format(self , rsthis: & QImageIOHandler) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK15QImageIOHandler6formatEv()};
let mut ret = unsafe {C_ZNK15QImageIOHandler6formatEv(rsthis.qclsinst)};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: int QImageIOHandler::nextImageDelay();
impl /*struct*/ QImageIOHandler {
pub fn nextImageDelay<RetType, T: QImageIOHandler_nextImageDelay<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.nextImageDelay(self);
// return 1;
}
}
pub trait QImageIOHandler_nextImageDelay<RetType> {
fn nextImageDelay(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: int QImageIOHandler::nextImageDelay();
impl<'a> /*trait*/ QImageIOHandler_nextImageDelay<i32> for () {
fn nextImageDelay(self , rsthis: & QImageIOHandler) -> i32 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK15QImageIOHandler14nextImageDelayEv()};
let mut ret = unsafe {C_ZNK15QImageIOHandler14nextImageDelayEv(rsthis.qclsinst)};
return ret as i32; // 1
// return 1;
}
}
// proto: void QImageIOHandler::setDevice(QIODevice * device);
impl /*struct*/ QImageIOHandler {
pub fn setDevice<RetType, T: QImageIOHandler_setDevice<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.setDevice(self);
// return 1;
}
}
pub trait QImageIOHandler_setDevice<RetType> {
fn setDevice(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: void QImageIOHandler::setDevice(QIODevice * device);
impl<'a> /*trait*/ QImageIOHandler_setDevice<()> for (&'a QIODevice) {
fn setDevice(self , rsthis: & QImageIOHandler) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN15QImageIOHandler9setDeviceEP9QIODevice()};
let arg0 = self.qclsinst as *mut c_void;
unsafe {C_ZN15QImageIOHandler9setDeviceEP9QIODevice(rsthis.qclsinst, arg0)};
// return 1;
}
}
// proto: bool QImageIOHandler::canRead();
impl /*struct*/ QImageIOHandler {
pub fn canRead<RetType, T: QImageIOHandler_canRead<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.canRead(self);
// return 1;
}
}
pub trait QImageIOHandler_canRead<RetType> {
fn canRead(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: bool QImageIOHandler::canRead();
impl<'a> /*trait*/ QImageIOHandler_canRead<i8> for () {
fn canRead(self , rsthis: & QImageIOHandler) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK15QImageIOHandler7canReadEv()};
let mut ret = unsafe {C_ZNK15QImageIOHandler7canReadEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: QIODevice * QImageIOHandler::device();
impl /*struct*/ QImageIOHandler {
pub fn device<RetType, T: QImageIOHandler_device<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.device(self);
// return 1;
}
}
pub trait QImageIOHandler_device<RetType> {
fn device(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: QIODevice * QImageIOHandler::device();
impl<'a> /*trait*/ QImageIOHandler_device<QIODevice> for () {
fn device(self , rsthis: & QImageIOHandler) -> QIODevice {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK15QImageIOHandler6deviceEv()};
let mut ret = unsafe {C_ZNK15QImageIOHandler6deviceEv(rsthis.qclsinst)};
let mut ret1 = QIODevice::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: bool QImageIOHandler::write(const QImage & image);
impl /*struct*/ QImageIOHandler {
pub fn write<RetType, T: QImageIOHandler_write<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.write(self);
// return 1;
}
}
pub trait QImageIOHandler_write<RetType> {
fn write(self , rsthis: & QImageIOHandler) -> RetType;
}
// proto: bool QImageIOHandler::write(const QImage & image);
impl<'a> /*trait*/ QImageIOHandler_write<i8> for (&'a QImage) {
fn write(self , rsthis: & QImageIOHandler) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN15QImageIOHandler5writeERK6QImage()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZN15QImageIOHandler5writeERK6QImage(rsthis.qclsinst, arg0)};
return ret as i8; // 1
// return 1;
}
}
impl /*struct*/ QImageIOPlugin {
pub fn inheritFrom(qthis: u64 /* *mut c_void*/) -> QImageIOPlugin {
return QImageIOPlugin{qbase: QObject::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
}
}
impl Deref for QImageIOPlugin {
type Target = QObject;
fn deref(&self) -> &QObject {
return & self.qbase;
}
}
impl AsRef<QObject> for QImageIOPlugin {
fn as_ref(& self) -> & QObject {
return & self.qbase;
}
}
// proto: const QMetaObject * QImageIOPlugin::metaObject();
impl /*struct*/ QImageIOPlugin {
pub fn metaObject<RetType, T: QImageIOPlugin_metaObject<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.metaObject(self);
// return 1;
}
}
pub trait QImageIOPlugin_metaObject<RetType> {
fn metaObject(self , rsthis: & QImageIOPlugin) -> RetType;
}
// proto: const QMetaObject * QImageIOPlugin::metaObject();
impl<'a> /*trait*/ QImageIOPlugin_metaObject<QMetaObject> for () {
fn metaObject(self , rsthis: & QImageIOPlugin) -> QMetaObject {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK14QImageIOPlugin10metaObjectEv()};
let mut ret = unsafe {C_ZNK14QImageIOPlugin10metaObjectEv(rsthis.qclsinst)};
let mut ret1 = QMetaObject::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QImageIOPlugin::~QImageIOPlugin();
impl /*struct*/ QImageIOPlugin {
pub fn free<RetType, T: QImageIOPlugin_free<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.free(self);
// return 1;
}
}
pub trait QImageIOPlugin_free<RetType> {
fn free(self , rsthis: & QImageIOPlugin) -> RetType;
}
// proto: void QImageIOPlugin::~QImageIOPlugin();
impl<'a> /*trait*/ QImageIOPlugin_free<()> for () {
fn free(self , rsthis: & QImageIOPlugin) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN14QImageIOPluginD2Ev()};
unsafe {C_ZN14QImageIOPluginD2Ev(rsthis.qclsinst)};
// return 1;
}
}
// proto: QImageIOHandler * QImageIOPlugin::create(QIODevice * device, const QByteArray & format);
impl /*struct*/ QImageIOPlugin {
pub fn create<RetType, T: QImageIOPlugin_create<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.create(self);
// return 1;
}
}
pub trait QImageIOPlugin_create<RetType> {
fn create(self , rsthis: & QImageIOPlugin) -> RetType;
}
// proto: QImageIOHandler * QImageIOPlugin::create(QIODevice * device, const QByteArray & format);
impl<'a> /*trait*/ QImageIOPlugin_create<QImageIOHandler> for (&'a QIODevice, Option<&'a QByteArray>) {
fn create(self , rsthis: & QImageIOPlugin) -> QImageIOHandler {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK14QImageIOPlugin6createEP9QIODeviceRK10QByteArray()};
let arg0 = self.0.qclsinst as *mut c_void;
let arg1 = (if self.1.is_none() {QByteArray::new(()).qclsinst} else {self.1.unwrap().qclsinst}) as *mut c_void;
let mut ret = unsafe {C_ZNK14QImageIOPlugin6createEP9QIODeviceRK10QByteArray(rsthis.qclsinst, arg0, arg1)};
let mut ret1 = QImageIOHandler::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QImageIOPlugin::QImageIOPlugin(QObject * parent);
impl /*struct*/ QImageIOPlugin {
pub fn new<T: QImageIOPlugin_new>(value: T) -> QImageIOPlugin {
let rsthis = value.new();
return rsthis;
// return 1;
}
}
pub trait QImageIOPlugin_new {
fn new(self) -> QImageIOPlugin;
}
// proto: void QImageIOPlugin::QImageIOPlugin(QObject * parent);
impl<'a> /*trait*/ QImageIOPlugin_new for (Option<&'a QObject>) {
fn new(self) -> QImageIOPlugin {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN14QImageIOPluginC2EP7QObject()};
let ctysz: c_int = unsafe{QImageIOPlugin_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = (if self.is_none() {0} else {self.unwrap().qclsinst}) as *mut c_void;
let qthis: u64 = unsafe {C_ZN14QImageIOPluginC2EP7QObject(arg0)};
let rsthis = QImageIOPlugin{qbase: QObject::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// <= body block end
|
extern crate regex;
use std::io::{self, Write};
use regex::RegexSet;
fn main() {
let stdin = io::stdin();
let mut number: String = "".to_string();
let _formats = RegexSet::new(&[
r"^\d{3}-\d{4}$", // 555-1212
r"^\d{7}$", // 5551212
r"^\d{10}$", // 4345551212
r"^\d{6}-\d{4}$", // 434555-1212
r"^\d{3}-\d{3}-\d{4}$", // 434-555-1212
r"^\d{4}-\d{3}-\d{4}$", // 1434-555-1212
r"^\(\d{3}\)\d{7}$", // (434)5551212
r"^\+\d\s\(\d{3}\)\s\d{3}-\d{4}$", // +1 (434) 555-1212
r"^\d{3}\.\d{3}\.\d{4}$", // 434.555.1212
r"^\(\d{3}\)\s\d{3}-\d{4}$", // (434) 555-1212
r"^\+\d\s\d{3}-\d{3}-\d{4}$", // +1 434-555-1212
]).unwrap();
print!("Enter a phone number: ");
io::stdout().flush().expect("Not flushed!");
stdin.read_line(&mut number)
.expect("Whoops! Didn't catch that!");
if _formats.is_match(&number.trim()) {
println!("Match found!");
let num = number.replace("+1", "")
.replace("(", "")
.replace(")", "")
.replace("-", "")
.replace(" ", "")
.replace("\n", "");
if num.len() == 10 {
let numchars = num.chars().collect::<Vec<_>>();
println!("Your number has been reformatted.");
println!("({0}{1}{2}) {3}{4}{5}-{6}{7}{8}{9}",
numchars[0], numchars[1], numchars[2], numchars[3],
numchars[4], numchars[5], numchars[6], numchars[7],
numchars[8], numchars[9]);
}
else if num.len() == 7 {
let numchars = num.chars().collect::<Vec<_>>();
println!("Your number has been reformatted.");
println!("(XXX) {0}{1}{2}-{3}{4}{5}{6}",
numchars[0], numchars[1], numchars[2], numchars[3],
numchars[4], numchars[5], numchars[6]);
}
else {
println!("Not enough digits! :(");
}
}
else {
println!("Match not found! Try again! :(");
}
}
|
// Standard library
use std::cell::RefCell;
use std::collections::{hash_map::Entry, HashMap};
use std::mem::take;
use std::sync::{Arc, Mutex, Weak};
// This crate
use crate::stt::{SttPoolItem, SttSet};
// Other crates
use anyhow::{anyhow, Result};
use log::debug;
thread_local! {
pub static CAPS_MANAGER: RefCell<CapsManager> = RefCell::new(CapsManager::new());
}
/*** Session*******************************************************************/
pub struct SessionManager {
sessions: HashMap<String, Arc<Mutex<Session>>>,
}
// Session
impl SessionManager {
pub fn new() -> Self {
Self {
sessions: HashMap::new(),
}
}
pub fn session_for(&mut self, uuid: String) -> Weak<Mutex<Session>> {
match self.sessions.entry(uuid.clone()) {
Entry::Occupied(o) => Arc::downgrade(o.get()),
Entry::Vacant(v) => {
let arc = Arc::new(Mutex::new(Session::new(uuid)));
Arc::downgrade(v.insert(arc))
}
}
}
pub fn end_session(&mut self, uuid: &str) -> Result<()> {
match self.sessions.remove(uuid) {
Some(_) => Ok(()),
None => Err(anyhow!("{} had no active session", uuid)),
}
}
}
pub struct Session {
device: String,
curr_utt: Option<SttPoolItem>,
}
impl Session {
fn new(device: String) -> Self {
Self {
device,
curr_utt: None,
}
}
pub async fn get_stt_or_make(
&mut self,
set: &mut SttSet,
audio: &[i16],
) -> Result<&mut SttPoolItem> {
match self.curr_utt {
Some(ref mut i) => Ok(i),
None => {
let mut stt = set.guess_stt(audio).await?;
debug!("STT for current session: {}", stt.get_info());
stt.begin_decoding().await?;
self.curr_utt = Some(stt);
Ok(self.curr_utt.as_mut().unwrap())
}
}
}
pub fn end_utt(&mut self) -> Result<()> {
match take(&mut self.curr_utt) {
Some(_) => Ok(()),
None => Err(anyhow!("{} had no active session", &self.device)),
}
}
}
/*** Capabilities *************************************************************/
pub struct CapsManager {
// For now just a map of capabilities, which is a map in which if exists is true
clients_caps: HashMap<String, HashMap<String, ()>>,
}
// Capabilities
impl CapsManager {
fn new() -> Self {
Self {
clients_caps: HashMap::new(),
}
}
pub fn add_client(&mut self, uuid: &str, caps: Vec<String>) {
let mut caps_map = HashMap::new();
for cap in caps {
caps_map.insert(cap, ());
}
self.clients_caps.insert(uuid.to_owned(), caps_map);
}
pub fn has_cap(&self, uuid: &str, cap_name: &str) -> bool {
match self.clients_caps.get(uuid) {
Some(client) => client.get(cap_name).map(|_| true).unwrap_or(false),
None => false,
}
}
pub fn disconnected(&mut self, uuid: &str) -> Result<()> {
match self.clients_caps.remove(uuid) {
Some(_) => Ok(()),
None => Err(anyhow!(format!(
"Satellite {} asked for a disconnect but was not connected",
uuid
))),
}
}
}
|
// This file is part of linux-epoll. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/linux-epoll/master/COPYRIGHT. No part of linux-epoll, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2019 The developers of linux-epoll. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/linux-epoll/master/COPYRIGHT.
/// Next secure version 3 (`NSEC3`).
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct NextSecureVersion3<'a>
{
/// Opt-Out.
pub opt_out: bool,
/// Iteration count.
pub iterations: u16,
/// Salt.
pub salt: &'a [u8],
/// Next owner name, hashed.
pub next_hashed_owner_name: NextSecureVersion3Hash<'a>,
/// Type bitmaps.
pub type_bitmaps: TypeBitmaps,
}
|
#[derive(Debug)]
pub struct NewtonMethod {
pub tol_f: f64,
pub max_iter: i32,
pub abs_step: f64,
pub rel_step: f64,
pub min_x_step: f64,
pub min_f_x_step: f64
}
impl NewtonMethod {
pub fn solve(self, x0 : f64, fun : (f64) -> f64) -> (f64, f64) {
// TODO: check if x0 is in admissible area
let f_x0 = fun(x0);
let f_error = f_x0; // L2 norm
if f_error < self.tol_f {
// end convergence
(x0, f_x0)
}
// iterative case
// TODO: consider min_step
let mut curr_x = x0;
let mut curr_fx = f_x0;
let mut iter = 0;
while f_error > self.tol_f && iter < self.max_iter {
let h = abs_step;
let fun_derivative_val = (fun(curr_x + h) - fun(curr_x)) / h;
let deltaX = - fun(curr_x) / fun_derivative_val;
curr_x = curr_x + deltaX;
curr_fx = fun(curr_fx);
f_error = curr_fx;
iter += 1;
}
(curr_x, curr_fx)
}
fn compute_h(self) -> f64 {
}
} |
// Test that a covariant struct does not permit the lifetime of a
// reference to be enlarged.
// revisions: base nll
// ignore-compare-mode-nll
//[nll] compile-flags: -Z borrowck=mir
struct SomeStruct<T>(T);
fn foo<'min,'max>(v: SomeStruct<&'min ()>)
-> SomeStruct<&'max ()>
where 'max : 'min
{
v
//[base]~^ ERROR mismatched types
//[nll]~^^ ERROR lifetime may not live long enough
}
fn main() { }
|
// Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use common_ast::ast::Expr;
use common_ast::ast::Literal;
use common_ast::ast::OrderByExpr;
use common_exception::ErrorCode;
use common_exception::Result;
use super::bind_context::NameResolutionResult;
use crate::binder::scalar::ScalarBinder;
use crate::binder::select::SelectList;
use crate::binder::Binder;
use crate::binder::ColumnBinding;
use crate::normalize_identifier;
use crate::optimizer::SExpr;
use crate::planner::semantic::GroupingChecker;
use crate::plans::AggregateFunction;
use crate::plans::AndExpr;
use crate::plans::BoundColumnRef;
use crate::plans::CastExpr;
use crate::plans::ComparisonExpr;
use crate::plans::EvalScalar;
use crate::plans::FunctionCall;
use crate::plans::NotExpr;
use crate::plans::OrExpr;
use crate::plans::ScalarExpr;
use crate::plans::ScalarItem;
use crate::plans::Sort;
use crate::plans::SortItem;
use crate::BindContext;
use crate::IndexType;
#[derive(Debug)]
pub struct OrderItems {
pub(crate) items: Vec<OrderItem>,
}
#[derive(Debug)]
pub struct OrderItem {
pub expr: OrderByExpr,
pub index: IndexType,
pub name: String,
// True if item need to wrap EvalScalar plan.
pub need_eval_scalar: bool,
}
impl Binder {
pub(super) async fn analyze_order_items(
&mut self,
from_context: &BindContext,
scalar_items: &mut HashMap<IndexType, ScalarItem>,
projections: &[ColumnBinding],
order_by: &[OrderByExpr],
distinct: bool,
) -> Result<OrderItems> {
let mut order_items = Vec::with_capacity(order_by.len());
for order in order_by {
match &order.expr {
Expr::ColumnRef {
database: ref database_name,
table: ref table_name,
column: ref ident,
..
} => {
// We first search the identifier in select list
let mut found = false;
let database = database_name
.as_ref()
.map(|ident| normalize_identifier(ident, &self.name_resolution_ctx).name);
let table = table_name
.as_ref()
.map(|ident| normalize_identifier(ident, &self.name_resolution_ctx).name);
let column = normalize_identifier(ident, &self.name_resolution_ctx).name;
for item in projections.iter() {
if BindContext::match_column_binding(
database.as_deref(),
table.as_deref(),
column.as_str(),
item,
) {
order_items.push(OrderItem {
expr: order.clone(),
index: item.index,
name: item.column_name.clone(),
need_eval_scalar: scalar_items.get(&item.index).map_or(
false,
|scalar_item| {
!matches!(
&scalar_item.scalar,
ScalarExpr::BoundColumnRef(_)
)
},
),
});
found = true;
break;
}
}
if found {
continue;
}
// If there isn't a matched alias in select list, we will fallback to
// from clause.
let result = from_context.resolve_name(
database.as_deref(),
table.as_deref(),
&column,
ident.span,
&[])
.and_then(|v| {
if distinct {
Err(ErrorCode::SemanticError("for SELECT DISTINCT, ORDER BY expressions must appear in select list".to_string()).set_span(order.expr.span()))
} else {
Ok(v)
}
})?;
match result {
NameResolutionResult::Column(column) => {
order_items.push(OrderItem {
expr: order.clone(),
name: column.column_name.clone(),
index: column.index,
need_eval_scalar: false,
});
}
NameResolutionResult::InternalColumn(column) => {
order_items.push(OrderItem {
expr: order.clone(),
name: column.internal_column.column_name().clone(),
index: column.index,
need_eval_scalar: false,
});
}
NameResolutionResult::Alias { .. } => {
return Err(ErrorCode::Internal("Invalid name resolution result"));
}
}
}
Expr::Literal {
lit: Literal::UInt64(index),
..
} => {
let index = *index as usize - 1;
if index >= projections.len() {
return Err(ErrorCode::SemanticError(format!(
"ORDER BY position {} is not in select list",
index + 1
))
.set_span(order.expr.span()));
}
order_items.push(OrderItem {
expr: order.clone(),
name: projections[index].column_name.clone(),
index: projections[index].index,
need_eval_scalar: scalar_items.get(&projections[index].index).map_or(
false,
|scalar_item| {
!matches!(&scalar_item.scalar, ScalarExpr::BoundColumnRef(_))
},
),
});
}
_ => {
let mut bind_context = from_context.clone();
for column_binding in projections.iter() {
if bind_context.columns.contains(column_binding) {
continue;
}
bind_context.columns.push(column_binding.clone());
}
let mut scalar_binder = ScalarBinder::new(
&mut bind_context,
self.ctx.clone(),
&self.name_resolution_ctx,
self.metadata.clone(),
&[],
);
let (bound_expr, _) = scalar_binder.bind(&order.expr).await?;
let rewrite_scalar = self
.rewrite_scalar_with_replacement(&bound_expr, &|nest_scalar| {
if let ScalarExpr::BoundColumnRef(BoundColumnRef { column, .. }) =
nest_scalar
{
if let Some(scalar_item) = scalar_items.get(&column.index) {
return Ok(Some(scalar_item.scalar.clone()));
}
}
Ok(None)
})
.map_err(|e| ErrorCode::SemanticError(e.message()))?;
let column_binding = self.create_column_binding(
None,
None,
format!("{:#}", order.expr),
rewrite_scalar.data_type()?,
);
order_items.push(OrderItem {
expr: order.clone(),
name: column_binding.column_name.clone(),
index: column_binding.index,
need_eval_scalar: true,
});
scalar_items.insert(column_binding.index, ScalarItem {
scalar: rewrite_scalar,
index: column_binding.index,
});
}
}
}
Ok(OrderItems { items: order_items })
}
pub(super) async fn bind_order_by(
&mut self,
from_context: &BindContext,
order_by: OrderItems,
select_list: &SelectList<'_>,
scalar_items: &mut HashMap<IndexType, ScalarItem>,
child: SExpr,
) -> Result<SExpr> {
let mut order_by_items = Vec::with_capacity(order_by.items.len());
let mut scalars = vec![];
for order in order_by.items {
if from_context.in_grouping {
let mut group_checker = GroupingChecker::new(from_context);
// Perform grouping check on original scalar expression if order item is alias.
if let Some(scalar_item) = select_list
.items
.iter()
.find(|item| item.alias == order.name)
{
group_checker.resolve(&scalar_item.scalar, None)?;
}
}
if let Expr::ColumnRef {
database: ref database_name,
table: ref table_name,
..
} = order.expr.expr
{
if let (Some(table_name), Some(database_name)) = (table_name, database_name) {
let catalog_name = self.ctx.get_current_catalog();
let catalog = self.ctx.get_catalog(catalog_name.as_str())?;
catalog
.get_table(
&self.ctx.get_tenant(),
&database_name.name,
&table_name.name,
)
.await?;
}
}
if order.need_eval_scalar {
if let Entry::Occupied(entry) = scalar_items.entry(order.index) {
let (index, item) = entry.remove_entry();
let mut scalar = item.scalar;
let mut need_group_check = false;
if let ScalarExpr::AggregateFunction(_) = scalar {
need_group_check = true;
}
if from_context.in_grouping || need_group_check {
let mut group_checker = GroupingChecker::new(from_context);
scalar = group_checker.resolve(&scalar, None)?;
}
scalars.push(ScalarItem { scalar, index });
}
}
// null is the largest value in databend, smallest in hive
// todo: rewrite after https://github.com/jorgecarleitao/arrow2/pull/1286 is merged
let default_nulls_first = !self
.ctx
.get_settings()
.get_sql_dialect()
.unwrap()
.is_null_biggest();
let order_by_item = SortItem {
index: order.index,
asc: order.expr.asc.unwrap_or(true),
nulls_first: order.expr.nulls_first.unwrap_or(default_nulls_first),
};
order_by_items.push(order_by_item);
}
let mut new_expr = if !scalars.is_empty() {
let eval_scalar = EvalScalar { items: scalars };
SExpr::create_unary(eval_scalar.into(), child)
} else {
child
};
let sort_plan = Sort {
items: order_by_items,
limit: None,
};
new_expr = SExpr::create_unary(sort_plan.into(), new_expr);
Ok(new_expr)
}
pub(crate) async fn bind_order_by_for_set_operation(
&mut self,
bind_context: &mut BindContext,
child: SExpr,
order_by: &[OrderByExpr],
) -> Result<SExpr> {
let mut scalar_binder = ScalarBinder::new(
bind_context,
self.ctx.clone(),
&self.name_resolution_ctx,
self.metadata.clone(),
&[],
);
let mut order_by_items = Vec::with_capacity(order_by.len());
for order in order_by.iter() {
match order.expr {
Expr::ColumnRef { .. } => {
let scalar = scalar_binder.bind(&order.expr).await?.0;
match scalar {
ScalarExpr::BoundColumnRef(BoundColumnRef { column, .. }) => {
let order_by_item = SortItem {
index: column.index,
asc: order.asc.unwrap_or(true),
nulls_first: order.nulls_first.unwrap_or(false),
};
order_by_items.push(order_by_item);
}
_ => {
return Err(ErrorCode::Internal("scalar should be BoundColumnRef")
.set_span(order.expr.span()));
}
}
}
_ => {
return Err(
ErrorCode::SemanticError("can only order by column".to_string())
.set_span(order.expr.span()),
);
}
}
}
let sort_plan = Sort {
items: order_by_items,
limit: None,
};
Ok(SExpr::create_unary(sort_plan.into(), child))
}
#[allow(clippy::only_used_in_recursion)]
pub(crate) fn rewrite_scalar_with_replacement<F>(
&self,
original_scalar: &ScalarExpr,
replacement_fn: &F,
) -> Result<ScalarExpr>
where
F: Fn(&ScalarExpr) -> Result<Option<ScalarExpr>>,
{
let replacement_opt = replacement_fn(original_scalar)?;
match replacement_opt {
Some(replacement) => Ok(replacement),
None => match original_scalar {
ScalarExpr::AndExpr(AndExpr { left, right }) => {
let left =
Box::new(self.rewrite_scalar_with_replacement(left, replacement_fn)?);
let right =
Box::new(self.rewrite_scalar_with_replacement(right, replacement_fn)?);
Ok(ScalarExpr::AndExpr(AndExpr { left, right }))
}
ScalarExpr::OrExpr(OrExpr { left, right }) => {
let left =
Box::new(self.rewrite_scalar_with_replacement(left, replacement_fn)?);
let right =
Box::new(self.rewrite_scalar_with_replacement(right, replacement_fn)?);
Ok(ScalarExpr::OrExpr(OrExpr { left, right }))
}
ScalarExpr::NotExpr(NotExpr { argument }) => {
let argument =
Box::new(self.rewrite_scalar_with_replacement(argument, replacement_fn)?);
Ok(ScalarExpr::NotExpr(NotExpr { argument }))
}
ScalarExpr::ComparisonExpr(ComparisonExpr { op, left, right }) => {
let left =
Box::new(self.rewrite_scalar_with_replacement(left, replacement_fn)?);
let right =
Box::new(self.rewrite_scalar_with_replacement(right, replacement_fn)?);
Ok(ScalarExpr::ComparisonExpr(ComparisonExpr {
op: op.clone(),
left,
right,
}))
}
ScalarExpr::AggregateFunction(AggregateFunction {
display_name,
func_name,
distinct,
params,
args,
return_type,
}) => {
let args = args
.iter()
.map(|arg| self.rewrite_scalar_with_replacement(arg, replacement_fn))
.collect::<Result<Vec<_>>>()?;
Ok(ScalarExpr::AggregateFunction(AggregateFunction {
display_name: display_name.clone(),
func_name: func_name.clone(),
distinct: *distinct,
params: params.clone(),
args,
return_type: return_type.clone(),
}))
}
ScalarExpr::FunctionCall(FunctionCall {
span,
params,
arguments,
func_name,
}) => {
let arguments = arguments
.iter()
.map(|arg| self.rewrite_scalar_with_replacement(arg, replacement_fn))
.collect::<Result<Vec<_>>>()?;
Ok(ScalarExpr::FunctionCall(FunctionCall {
span: *span,
params: params.clone(),
arguments,
func_name: func_name.clone(),
}))
}
ScalarExpr::CastExpr(CastExpr {
span,
is_try,
argument,
target_type,
}) => {
let argument =
Box::new(self.rewrite_scalar_with_replacement(argument, replacement_fn)?);
Ok(ScalarExpr::CastExpr(CastExpr {
span: *span,
is_try: *is_try,
argument,
target_type: target_type.clone(),
}))
}
_ => Ok(original_scalar.clone()),
},
}
}
}
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
//
extern crate clap;
extern crate gltf_variant_meld;
use std::fs;
use gltf_variant_meld::{Result, VariationalAsset};
mod args;
use args::parse_args;
pub use args::{SourceAsset, SourceAssets, WorkOrder};
fn main() {
let work_order = parse_args();
if let Err(err) = process(work_order) {
eprintln!("Error: {}", err);
}
}
fn process(work_order: WorkOrder) -> Result<()> {
let base = read_asset(&work_order.source_assets.base)?;
if work_order.verbose() {
println!("Base asset:");
describe_asset(&base);
}
let mut result = base;
for meld in &work_order.source_assets.melds {
let meld = read_asset(meld)?;
result = VariationalAsset::meld(&result, &meld)?;
if work_order.verbose() {
println!("New melded result:");
describe_asset(&result);
}
}
fs::write(&work_order.output_path, result.glb())
.map_err(|e| format!("Couldn't write output file: {}", e))?;
if !work_order.quiet() {
println!(
"Success! {} bytes written to '{}'.",
result.glb().len(),
work_order.output_path.to_str().unwrap_or("<error>"),
);
}
Ok(())
}
fn read_asset(asset: &SourceAsset) -> Result<VariationalAsset> {
Ok(VariationalAsset::from_file(
&asset.path,
asset.tag.as_ref(),
)?)
}
fn describe_asset(asset: &VariationalAsset) {
println!(" Total file size: {}", size(asset.glb().len()));
let total = asset.metadata().total_sizes().texture_bytes;
let variational = asset.metadata().variational_sizes().texture_bytes;
println!(" Total texture data: {}", size(total));
println!(" Of which is depends on tag: {}", size(variational));
}
fn size(byte_count: usize) -> String {
if byte_count < 1000000 {
format!("{:.01} kB", byte_count / 1000)
} else {
format!("{:.01} MB", byte_count / 1000000)
}
}
|
use std::fs::File;
use std::io::{BufReader, ErrorKind};
use std::io::prelude::*;
use std::{fmt, env};
// (C) Copyright 2020 xkr47@outerspace.dyndns.org
//
// Exercise program in Rust to detect file type of raw audio 16/24 bit PCM files
// Uses algorithm purely invented by xkr47
fn main() {
let args: Vec<String> = env::args().skip(1).collect();
if args.is_empty() {
eprintln!("Usage: {} <file.pcm> [...]", env::args().next().unwrap());
return;
}
for file in args {
let res = investigate(&file);
if let Ok(res) = res {
if let Ok(res) = res.guess_type() {
println!("{}: {} {} {}",
file,
if res.signed { "signed" } else { "unsigned" },
if res.bits24 { "24bit" } else { "16bit" },
if res.big_endian { "big-endian" } else { "little-endian" },
);
} else {
println!("{}: unclear {:?}", file, res);
}
} else {
println!("{}: error {}", file, res.unwrap_err());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
extern crate spectral;
use spectral::prelude::*;
fn test(filename: &str, pcm_type: PcmType) {
let result = investigate(filename);
assert_that(&result).is_ok();
assert_that(&result.unwrap().guess_type()).is_ok().is_equal_to(pcm_type);
}
#[test]
fn s16le() {
test("test-s16.pcm", PcmType { signed: true, bits24: false, big_endian: false });
}
#[test]
fn s16be() {
test("test-s16be.pcm", PcmType { signed: true, bits24: false, big_endian: true });
}
#[test]
fn u16le() {
test("test-u16.pcm", PcmType { signed: false, bits24: false, big_endian: false });
}
#[test]
fn u16be() {
test("test-u16be.pcm", PcmType { signed: false, bits24: false, big_endian: true });
}
#[test]
fn s24le() {
test("test-s24.pcm", PcmType { signed: true, bits24: true, big_endian: false });
}
#[test]
fn s24be() {
test("test-s24be.pcm", PcmType { signed: true, bits24: true, big_endian: true });
}
#[test]
fn u24le() {
test("test-u24.pcm", PcmType { signed: false, bits24: true, big_endian: false });
}
#[test]
fn u24be() {
test("test-u24be.pcm", PcmType { signed: false, bits24: true, big_endian: true });
}
}
#[derive(Clone,Copy,Debug,PartialEq,Eq)]
struct PcmType {
signed: bool,
bits24: bool,
big_endian: bool,
}
#[derive(Debug)]
struct PcmResults {
s16le: f64,
s16be: f64,
u16le: f64,
u16be: f64,
s24le: f64,
s24be: f64,
u24le: f64,
u24be: f64,
}
impl PcmResults {
// how much more sure must we be of the most likely outcome compared to the second most likely
const THRESHOLD: f64 = 4.0;
fn guess_type(&self) -> Result<PcmType, String> {
let mut res = vec!(
(PcmType { signed: true, bits24: false, big_endian: false }, self.s16le),
(PcmType { signed: true, bits24: false, big_endian: true }, self.s16be),
(PcmType { signed: false, bits24: false, big_endian: false }, self.u16le),
(PcmType { signed: false, bits24: false, big_endian: true }, self.u16be),
(PcmType { signed: true, bits24: true, big_endian: false }, self.s24le),
(PcmType { signed: true, bits24: true, big_endian: true }, self.s24be),
(PcmType { signed: false, bits24: true, big_endian: false }, self.u24le),
(PcmType { signed: false, bits24: true, big_endian: true }, self.u24be),
);
res.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
if res[0].1 / res[1].1 < PcmResults::THRESHOLD {
Err(format!("Below threshold for {:?} vs {:?}", res[0], res[1]))
} else {
Ok(res[0].0)
}
}
}
struct Avg {
sum: i64,
diffsum: u64,
count: u32,
last: i8,
debug: bool,
}
impl Avg {
fn new() -> Avg {
Avg { sum: 0, diffsum: 0, count: 0, last: 0, debug: false }
}
fn _newd() -> Avg {
Avg { sum: 0, diffsum: 0, count: 0, last: 0, debug: true }
}
fn add(&mut self, value: i8) {
if self.count > 0 {
self.sum += value as i64;
self.diffsum += (value as i64 - self.last as i64).abs() as u64;
}
self.count = self.count+1;
self.last = value;
if self.debug && self.count < 10000 {
println!("{}. {} {} {}", self.count, value, self.diffsum, self.diffavg());
}
}
fn _avg(&self) -> f64 {
self.sum as f64 / self.count as f64
}
fn diffavg(&self) -> f64 {
self.diffsum as f64 / self.count as f64
}
}
#[derive(Clone,Copy,Debug)]
struct Stereo<T: Copy + std::fmt::Debug> {
l: T,
r: T
}
struct Avg2 {
l: Avg,
r: Avg
}
impl Avg2 {
fn new() -> Avg2 { Avg2 { l: Avg::new(), r: Avg::new() } }
fn _newd() -> Avg2 { Avg2 { l: Avg::_newd(), r: Avg::_newd() } }
fn add(&mut self, l: i8, r: i8) {
self.l.add(l);
self.r.add(r);
}
fn _avg(&self) -> Stereo<f64> {
Stereo { l: self.l._avg(), r: self.r._avg() }
}
fn diffavg(&self) -> Stereo<f64> {
Stereo { l: self.l.diffavg(), r: self.r.diffavg() }
}
}
impl fmt::Display for Avg {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} / {}", self._avg(), self.diffavg())
}
}
impl fmt::Debug for Avg {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//write!(f, "{} / {} sum {} diffsum {} count {}", self.avg(), self.diffavg(), self.sum, self.diffsum, self.count)
write!(f, "{}", self.diffavg())
}
}
fn investigate(filename: &str) -> std::io::Result<PcmResults> {
let file = File::open(filename)?;
let meta = file.metadata()?;
if !meta.is_file() {
return Err(std::io::Error::from(ErrorKind::InvalidInput));
}
/*
match meta.len() / 2 % 6 {
2 | 4 => return Ok(Signed16),
3 => return Ok(Signed24),
1 | 5 => panic!("Bad file {} length {}", filename, meta.len()),
_ => (),
}
*/
let mut buf_reader = BufReader::new(file);
let mut a16: [[Avg; 2]; 2] = [[Avg::new(), Avg::new()], [Avg::new(), Avg::new()]];
let mut a24: [[Avg2; 3]; 2] = [ // [bit7-inv][byte]
[Avg2::new(), Avg2::new(), Avg2::new()], [Avg2::new(), Avg2::new(), Avg2::new()],
];
let mut buf = [0_u8; 12];
let mut bufs = [[0_i8; 12]; 2];
while let Ok(()) = buf_reader.read_exact(&mut buf) {
for i in 0..buf.len() {
let v = buf[i] as i8;
bufs[0][i] = v;
bufs[1][i] = v.wrapping_add(-128);
}
for toggled in 0..=1 { // 0 = original, 1 = bit 7 toggled
for sample in 0..=2 {
for byte in 0..=1 {
a16[toggled][byte].add(bufs[toggled][sample * 4 + byte]);
}
}
for sample in 0..=1 {
for byte in 0..=2 {
if toggled == 0 || byte != 1 {
a24[toggled][byte].add(bufs[toggled][sample * 6 + byte], bufs[toggled][sample * 6 + 3 + byte]);
}
}
}
}
};
let v16 = [[a16[0][0].diffavg(), a16[0][1].diffavg()], [a16[1][0].diffavg(), a16[1][1].diffavg()]];
let v24i = [
[
a24[0][0].diffavg(),
a24[0][1].diffavg(),
a24[0][2].diffavg()
], [
a24[1][0].diffavg(),
Stereo { l: 0.0, r: 0.0 }, /* 7-bit toggled middle byte not needed */
a24[1][2].diffavg()
]
];
// the inner if expressions below are to handle 16 bit files that have been converted to 24 bit by filling lsbs with zeroes
let v24= [
[
Stereo { l: if v24i[0][0].l <= 0.0 { v24i[0][1].l } else { v24i[0][0].l }, r: if v24i[0][0].r <= 0.0 { v24i[0][1].r } else { v24i[0][0].r } },
v24i[0][1],
Stereo { l: if v24i[0][2].l <= 0.0 { v24i[0][1].l } else { v24i[0][2].l }, r: if v24i[0][2].r <= 0.0 { v24i[0][1].r } else { v24i[0][2].r } },
], [
Stereo { l: if v24i[1][0].l <= 0.0 { v24i[0][1].l } else { v24i[1][0].l }, r: if v24i[1][0].r <= 0.0 { v24i[0][1].r } else { v24i[1][0].r } },
v24i[1][1], // not needed
Stereo { l: if v24i[1][2].l <= 0.0 { v24i[0][1].l } else { v24i[1][2].l }, r: if v24i[1][2].r <= 0.0 { v24i[0][1].r } else { v24i[1][2].r } },
]
];
//println!("v16 signed {:?} unsigned {:?}", v16[0], v16[1]);
//println!("v24 signed {:?} unsigned {:?}", v24[0], v24[1]);
let results = PcmResults {
s16le: if v16[0][1] > 0.0 { v16[0][0] / v16[0][1] * v16[1][1] } else { 0. },
s16be: if v16[0][0] > 0.0 { v16[0][1] / v16[0][0] * v16[1][0] } else { 0. },
u16le: if v16[1][1] > 0.0 { v16[0][0] / v16[1][1] * v16[0][1] } else { 0. },
u16be: if v16[1][0] > 0.0 { v16[0][1] / v16[1][0] * v16[0][0] } else { 0. },
s24le: (if v24[0][2].l > 0.0 { v24[0][1].l / v24[0][2].l * v24[1][2].l } else { 0. }).min(if v24[0][2].r > 0.0 { v24[0][1].r / v24[0][2].r * v24[1][2].r } else { 0. }),
s24be: (if v24[0][0].l > 0.0 { v24[0][1].l / v24[0][0].l * v24[1][0].l } else { 0. }).min(if v24[0][0].r > 0.0 { v24[0][1].r / v24[0][0].r * v24[1][0].r } else { 0. }),
u24le: (if v24[1][2].l > 0.0 { v24[0][1].l / v24[1][2].l * v24[0][2].l } else { 0. }).min(if v24[1][2].r > 0.0 { v24[0][1].r / v24[1][2].r * v24[0][2].r } else { 0. }),
u24be: (if v24[1][0].l > 0.0 { v24[0][1].l / v24[1][0].l * v24[0][0].l } else { 0. }).min(if v24[1][0].r > 0.0 { v24[0][1].r / v24[1][0].r * v24[0][0].r } else { 0. }),
};
//println!("Res {:?}", results);
Ok(results)
}
|
use std::mem;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::Weak;
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use backend::*;
use output_log::*;
pub struct OutputState {
backend: Option <Box <Backend>>,
synchronous: bool,
logs: Vec <OutputLogInternal>,
next_log_id: u64,
background_join_handle: Option <thread::JoinHandle <()>>,
background_sender: Option <mpsc::Sender <()>>,
paused: bool,
changed: bool,
}
impl OutputState {
pub fn new (
backend: Option <Box <Backend>>,
update_duration: Duration,
) -> Arc <Mutex <OutputState>> {
let synchronous =
backend.as_ref ().map (
|backend| backend.synchronous (),
).unwrap_or (true);
let real_self = OutputState {
backend: backend,
synchronous: synchronous,
logs: Vec::new (),
next_log_id: 0,
background_join_handle: None,
background_sender: None,
paused: false,
changed: false,
};
let shared_self =
Arc::new (Mutex::new (
real_self,
));
if ! synchronous {
let mut real_self =
shared_self.lock ().expect (
"OutputState::new");
let (background_sender, background_receiver) =
mpsc::channel ();
real_self.background_sender =
Some (background_sender);
{
let shared_self =
Arc::downgrade (
& shared_self);
real_self.background_join_handle = Some (
thread::spawn (move ||
Self::background_thread (
shared_self,
background_receiver,
update_duration,
)
)
);
}
}
shared_self
}
#[ inline ]
pub fn add_log (
& mut self,
message: String,
state: OutputLogState,
) -> u64 {
let log_id = self.next_log_id;
self.next_log_id += 1;
let log_internal =
OutputLogInternal::new (
log_id,
message,
state);
self.logs.push (
log_internal);
self.update_backend_auto (
state);
log_id
}
pub fn get_log_internal (
& mut self,
log_id: u64,
) -> Option <& mut OutputLogInternal> {
self.logs.iter_mut ().filter (
|log_internal|
log_internal.log_id () == log_id
).next ()
}
pub fn update_backend_auto (
& mut self,
state: OutputLogState,
) {
if state == OutputLogState::Message
|| state == OutputLogState::Complete
|| state == OutputLogState::Incomplete {
self.update_backend_synchronous ();
} else {
self.update_backend_asynchronous ();
};
}
pub fn update_backend_asynchronous (
& mut self,
) {
if self.synchronous {
self.update_backend_synchronous ();
} else {
self.changed = true;
}
}
pub fn pause (
& mut self,
) {
self.paused = true;
}
pub fn unpause (
& mut self,
) {
self.paused = false;
self.update_backend_real ();
}
pub fn flush (
& mut self,
) {
let old_paused = self.paused;
self.paused = false;
self.update_backend_real ();
self.paused = old_paused;
}
fn update_backend_synchronous (
& mut self,
) {
self.changed = true;
self.update_backend_real ();
}
fn update_backend_real (
& mut self,
) {
if ! self.changed || self.paused {
return;
}
if let Some (ref mut backend) =
self.backend {
backend.update (
& self.logs);
}
let logs_temp =
mem::replace (
& mut self.logs,
vec! []);
self.logs =
logs_temp.into_iter ().filter (
|log_internal|
log_internal.state () == OutputLogState::Running
).collect ();
self.changed = false;
}
fn background_thread (
shared_state: Weak <Mutex <OutputState>>,
background_receiver: mpsc::Receiver <()>,
update_time: Duration,
) {
loop {
// wait a bit
match background_receiver.recv_timeout (
update_time) {
Ok (()) => (),
Err (mpsc::RecvTimeoutError::Timeout) => (),
Err (mpsc::RecvTimeoutError::Disconnected) => break,
}
// perform updates
if let Some (ref mut shared_state) =
shared_state.upgrade () {
let mut state =
shared_state.lock ().expect (
"OutputState::background_thread");
state.update_backend_real ();
}
}
}
}
impl Drop for OutputState {
fn drop (
& mut self,
) {
// ask background thread to stop
let background_sender =
self.background_sender.take ().expect (
"OutputState::drop");
drop (background_sender);
// wait for background thread to stop
let background_join_handle =
self.background_join_handle.take ().expect (
"OutputState::drop");
background_join_handle.join ().expect (
"OutputState::drop");
// perform final update
self.paused = false;
self.update_backend_real ();
}
}
// ex: noet ts=4 filetype=rust
|
use crate::authorize::AuthToken;
use crate::authorize::Login;
use crate::authorize::NewUser;
use crate::authorize::User;
use crate::helpers::*;
use crate::schema::users;
use diesel;
use diesel::prelude::*;
pub fn allusers( connection: &PgConnection)->Vec<User> {
users::table.load::<User>(&*connection).unwrap()
}
pub fn insert(user: NewUser, connection: &PgConnection) -> User {
let new_user = NewUser {
username: user.username.to_string(),
email: user.email.to_string(),
password: encrypt_password(&user.password),
enable: 1,
first_name: user.first_name,
last_name: user.last_name
};
diesel::insert_into(users::table)
.values(&new_user)
.execute(connection)
.expect("Error creating new user");
users::table
.order(users::id.desc())
.first(connection)
.unwrap()
}
pub fn login(login: Login, connection: &PgConnection) -> AuthToken {
use crate::schema::auth_tokens;
let result = User::find_by_login(&connection, &login.username, &login.password);
match result {
Ok(user) => {
let rand_hash = gen_random_hash();
let expired_at = (epoch_now() as i64) + AUTH_TOKEN_TTL;
let new_auth_token = AuthToken {
token: rand_hash,
expired_at: expired_at,
user_id: user.id,
};
diesel::insert_into(auth_tokens::table)
.values(&new_auth_token)
.execute(connection)
.expect("Error creating auth token");
auth_tokens::table
.order(auth_tokens::token.desc())
.first(connection)
.unwrap()
}
Err(_err) => {
AuthToken {
user_id: 0,
token: "Not found".to_string(),
expired_at: 0,
}
}
}
}
|
use std::cmp::Ordering;
#[derive(Default, Clone, Copy, Debug)]
struct BitCount {
zero: u32,
one: u32,
}
fn bit_counts(input: &str) -> Vec<BitCount> {
let len = input.lines().next().unwrap().len();
let mut counts = vec![BitCount::default(); len];
for line in input.lines() {
for (i, count) in counts.iter_mut().enumerate() {
match line.as_bytes()[i] {
b'0' => count.zero += 1,
b'1' => count.one += 1,
c => panic!("What? {} byte encountered ({})", c, c as char),
}
}
}
counts
}
fn bit_counts_2(bin_strings: &[Vec<u8>]) -> Vec<BitCount> {
let mut counts = vec![BitCount::default(); bin_strings[0].len()];
for bin_string in bin_strings {
for (i, count) in counts.iter_mut().enumerate() {
match bin_string[i] {
0 => count.zero += 1,
1 => count.one += 1,
c => panic!("What? {} byte encountered ({})", c, c as char),
}
}
}
counts
}
fn most_common_bits(bc: &[BitCount]) -> Vec<u8> {
bc.iter()
.map(|bc| if bc.zero > bc.one { 0 } else { 1 })
.collect()
}
fn least_common_bits(bc: &[BitCount]) -> Vec<u8> {
bc.iter()
.map(|bc| if bc.zero < bc.one { 0 } else { 1 })
.collect()
}
fn bin_to_u32(digits: &[u8]) -> u32 {
let mut accum = 0;
for (i, &digit) in digits.iter().rev().enumerate() {
accum += 2u32.pow(i as u32) * digit as u32;
}
accum
}
fn part1(input: &str) -> u32 {
let counts = bit_counts(input);
let gamma = bin_to_u32(&most_common_bits(&counts));
let epsilon = bin_to_u32(&least_common_bits(&counts));
gamma * epsilon
}
fn part2(input: &str) -> u32 {
let mut binary_numbers: Vec<Vec<u8>> = Vec::new();
for line in input.lines() {
binary_numbers.push(
line.bytes()
.map(|b| if b == b'0' { 0 } else { 1 })
.collect(),
);
}
let oxygen_generator_rating = find_matching(binary_numbers.clone(), |count, bin_digit| {
match count.zero.cmp(&count.one) {
Ordering::Less | Ordering::Equal => bin_digit == 1,
Ordering::Greater => bin_digit == 0,
}
});
let co2_scrubber_rating = find_matching(binary_numbers.clone(), |count, bin_digit| match count
.zero
.cmp(&count.one)
{
Ordering::Less | Ordering::Equal => bin_digit == 0,
Ordering::Greater => bin_digit == 1,
});
oxygen_generator_rating * co2_scrubber_rating
}
fn find_matching(
mut binary_numbers: Vec<Vec<u8>>,
mut retain_cond: impl FnMut(&BitCount, u8) -> bool,
) -> u32 {
let mut i = 0;
while binary_numbers.len() > 1 {
let counts = bit_counts_2(&binary_numbers);
binary_numbers.retain(|bin_string| {
let count = &counts[i];
retain_cond(count, bin_string[i])
});
i += 1;
}
bin_to_u32(&binary_numbers[0])
}
#[cfg(test)]
const TEST_INPUT: &str = "00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010";
aoc::tests! {
fn bin_to_u32:
&[1,0,1,1,0] => 22;
&[0,1,0,0,1] => 9;
fn part1:
TEST_INPUT => 198;
in => 2743844;
fn part2:
TEST_INPUT => 230;
in => 6677951;
}
aoc::main!(part1, part2);
|
#[doc = "Reader of register HWCFGR"]
pub type R = crate::R<u32, super::HWCFGR>;
#[doc = "Writer for register HWCFGR"]
pub type W = crate::W<u32, super::HWCFGR>;
#[doc = "Register HWCFGR `reset()`'s with value 0"]
impl crate::ResetValue for super::HWCFGR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `ALARMB`"]
pub type ALARMB_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `ALARMB`"]
pub struct ALARMB_W<'a> {
w: &'a mut W,
}
impl<'a> ALARMB_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
#[doc = "Reader of field `WAKEUP`"]
pub type WAKEUP_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `WAKEUP`"]
pub struct WAKEUP_W<'a> {
w: &'a mut W,
}
impl<'a> WAKEUP_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 4)) | (((value as u32) & 0x0f) << 4);
self.w
}
}
#[doc = "Reader of field `SMOOTH_CALIB`"]
pub type SMOOTH_CALIB_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `SMOOTH_CALIB`"]
pub struct SMOOTH_CALIB_W<'a> {
w: &'a mut W,
}
impl<'a> SMOOTH_CALIB_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 8)) | (((value as u32) & 0x0f) << 8);
self.w
}
}
#[doc = "Reader of field `TIMESTAMP`"]
pub type TIMESTAMP_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `TIMESTAMP`"]
pub struct TIMESTAMP_W<'a> {
w: &'a mut W,
}
impl<'a> TIMESTAMP_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 12)) | (((value as u32) & 0x0f) << 12);
self.w
}
}
#[doc = "Reader of field `OPTIONREG_OUT`"]
pub type OPTIONREG_OUT_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `OPTIONREG_OUT`"]
pub struct OPTIONREG_OUT_W<'a> {
w: &'a mut W,
}
impl<'a> OPTIONREG_OUT_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 16)) | (((value as u32) & 0xff) << 16);
self.w
}
}
#[doc = "Reader of field `TRUST_ZONE`"]
pub type TRUST_ZONE_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `TRUST_ZONE`"]
pub struct TRUST_ZONE_W<'a> {
w: &'a mut W,
}
impl<'a> TRUST_ZONE_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 24)) | (((value as u32) & 0x0f) << 24);
self.w
}
}
impl R {
#[doc = "Bits 0:3 - ALARMB"]
#[inline(always)]
pub fn alarmb(&self) -> ALARMB_R {
ALARMB_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bits 4:7 - WAKEUP"]
#[inline(always)]
pub fn wakeup(&self) -> WAKEUP_R {
WAKEUP_R::new(((self.bits >> 4) & 0x0f) as u8)
}
#[doc = "Bits 8:11 - SMOOTH_CALIB"]
#[inline(always)]
pub fn smooth_calib(&self) -> SMOOTH_CALIB_R {
SMOOTH_CALIB_R::new(((self.bits >> 8) & 0x0f) as u8)
}
#[doc = "Bits 12:15 - TIMESTAMP"]
#[inline(always)]
pub fn timestamp(&self) -> TIMESTAMP_R {
TIMESTAMP_R::new(((self.bits >> 12) & 0x0f) as u8)
}
#[doc = "Bits 16:23 - OPTIONREG_OUT"]
#[inline(always)]
pub fn optionreg_out(&self) -> OPTIONREG_OUT_R {
OPTIONREG_OUT_R::new(((self.bits >> 16) & 0xff) as u8)
}
#[doc = "Bits 24:27 - TRUST_ZONE"]
#[inline(always)]
pub fn trust_zone(&self) -> TRUST_ZONE_R {
TRUST_ZONE_R::new(((self.bits >> 24) & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 0:3 - ALARMB"]
#[inline(always)]
pub fn alarmb(&mut self) -> ALARMB_W {
ALARMB_W { w: self }
}
#[doc = "Bits 4:7 - WAKEUP"]
#[inline(always)]
pub fn wakeup(&mut self) -> WAKEUP_W {
WAKEUP_W { w: self }
}
#[doc = "Bits 8:11 - SMOOTH_CALIB"]
#[inline(always)]
pub fn smooth_calib(&mut self) -> SMOOTH_CALIB_W {
SMOOTH_CALIB_W { w: self }
}
#[doc = "Bits 12:15 - TIMESTAMP"]
#[inline(always)]
pub fn timestamp(&mut self) -> TIMESTAMP_W {
TIMESTAMP_W { w: self }
}
#[doc = "Bits 16:23 - OPTIONREG_OUT"]
#[inline(always)]
pub fn optionreg_out(&mut self) -> OPTIONREG_OUT_W {
OPTIONREG_OUT_W { w: self }
}
#[doc = "Bits 24:27 - TRUST_ZONE"]
#[inline(always)]
pub fn trust_zone(&mut self) -> TRUST_ZONE_W {
TRUST_ZONE_W { w: self }
}
}
|
#[doc = "Reader of register C3ISR"]
pub type R = crate::R<u32, super::C3ISR>;
#[doc = "Reader of field `TEIF3`"]
pub type TEIF3_R = crate::R<bool, bool>;
#[doc = "Reader of field `CTCIF3`"]
pub type CTCIF3_R = crate::R<bool, bool>;
#[doc = "Reader of field `BRTIF3`"]
pub type BRTIF3_R = crate::R<bool, bool>;
#[doc = "Reader of field `BTIF3`"]
pub type BTIF3_R = crate::R<bool, bool>;
#[doc = "Reader of field `TCIF3`"]
pub type TCIF3_R = crate::R<bool, bool>;
#[doc = "Reader of field `CRQA3`"]
pub type CRQA3_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 0 - Channel x transfer error interrupt flag This bit is set by hardware. It is cleared by software writing 1 to the corresponding bit in the DMA_IFCRy register."]
#[inline(always)]
pub fn teif3(&self) -> TEIF3_R {
TEIF3_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Channel x Channel Transfer Complete interrupt flag This bit is set by hardware. It is cleared by software writing 1 to the corresponding bit in the DMA_IFCRy register. CTC is set when the last block was transferred and the channel has been automatically disabled. CTC is also set when the channel is suspended, as a result of writing EN bit to 0."]
#[inline(always)]
pub fn ctcif3(&self) -> CTCIF3_R {
CTCIF3_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Channel x block repeat transfer complete interrupt flag This bit is set by hardware. It is cleared by software writing 1 to the corresponding bit in the DMA_IFCRy register."]
#[inline(always)]
pub fn brtif3(&self) -> BRTIF3_R {
BRTIF3_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Channel x block transfer complete interrupt flag This bit is set by hardware. It is cleared by software writing 1 to the corresponding bit in the DMA_IFCRy register."]
#[inline(always)]
pub fn btif3(&self) -> BTIF3_R {
BTIF3_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - channel x buffer transfer complete"]
#[inline(always)]
pub fn tcif3(&self) -> TCIF3_R {
TCIF3_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 16 - channel x request active flag"]
#[inline(always)]
pub fn crqa3(&self) -> CRQA3_R {
CRQA3_R::new(((self.bits >> 16) & 0x01) != 0)
}
}
|
#[doc = "Reader of register AHB2ENR"]
pub type R = crate::R<u32, super::AHB2ENR>;
#[doc = "Writer for register AHB2ENR"]
pub type W = crate::W<u32, super::AHB2ENR>;
#[doc = "Register AHB2ENR `reset()`'s with value 0"]
impl crate::ResetValue for super::AHB2ENR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `GPIOAEN`"]
pub type GPIOAEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `GPIOAEN`"]
pub struct GPIOAEN_W<'a> {
w: &'a mut W,
}
impl<'a> GPIOAEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `GPIOBEN`"]
pub type GPIOBEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `GPIOBEN`"]
pub struct GPIOBEN_W<'a> {
w: &'a mut W,
}
impl<'a> GPIOBEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `GPIOCEN`"]
pub type GPIOCEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `GPIOCEN`"]
pub struct GPIOCEN_W<'a> {
w: &'a mut W,
}
impl<'a> GPIOCEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `GPIODEN`"]
pub type GPIODEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `GPIODEN`"]
pub struct GPIODEN_W<'a> {
w: &'a mut W,
}
impl<'a> GPIODEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `GPIOEEN`"]
pub type GPIOEEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `GPIOEEN`"]
pub struct GPIOEEN_W<'a> {
w: &'a mut W,
}
impl<'a> GPIOEEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `GPIOFEN`"]
pub type GPIOFEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `GPIOFEN`"]
pub struct GPIOFEN_W<'a> {
w: &'a mut W,
}
impl<'a> GPIOFEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `GPIOGEN`"]
pub type GPIOGEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `GPIOGEN`"]
pub struct GPIOGEN_W<'a> {
w: &'a mut W,
}
impl<'a> GPIOGEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `ADC12EN`"]
pub type ADC12EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ADC12EN`"]
pub struct ADC12EN_W<'a> {
w: &'a mut W,
}
impl<'a> ADC12EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Reader of field `ADC345EN`"]
pub type ADC345EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ADC345EN`"]
pub struct ADC345EN_W<'a> {
w: &'a mut W,
}
impl<'a> ADC345EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Reader of field `DAC1EN`"]
pub type DAC1EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DAC1EN`"]
pub struct DAC1EN_W<'a> {
w: &'a mut W,
}
impl<'a> DAC1EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `DAC2EN`"]
pub type DAC2EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DAC2EN`"]
pub struct DAC2EN_W<'a> {
w: &'a mut W,
}
impl<'a> DAC2EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Reader of field `DAC3EN`"]
pub type DAC3EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DAC3EN`"]
pub struct DAC3EN_W<'a> {
w: &'a mut W,
}
impl<'a> DAC3EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Reader of field `DAC4EN`"]
pub type DAC4EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DAC4EN`"]
pub struct DAC4EN_W<'a> {
w: &'a mut W,
}
impl<'a> DAC4EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "Reader of field `AESEN`"]
pub type AESEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `AESEN`"]
pub struct AESEN_W<'a> {
w: &'a mut W,
}
impl<'a> AESEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Reader of field `RNGEN`"]
pub type RNGEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RNGEN`"]
pub struct RNGEN_W<'a> {
w: &'a mut W,
}
impl<'a> RNGEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26);
self.w
}
}
impl R {
#[doc = "Bit 0 - IO port A clock enable"]
#[inline(always)]
pub fn gpioaen(&self) -> GPIOAEN_R {
GPIOAEN_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - IO port B clock enable"]
#[inline(always)]
pub fn gpioben(&self) -> GPIOBEN_R {
GPIOBEN_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - IO port C clock enable"]
#[inline(always)]
pub fn gpiocen(&self) -> GPIOCEN_R {
GPIOCEN_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - IO port D clock enable"]
#[inline(always)]
pub fn gpioden(&self) -> GPIODEN_R {
GPIODEN_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - IO port E clock enable"]
#[inline(always)]
pub fn gpioeen(&self) -> GPIOEEN_R {
GPIOEEN_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - IO port F clock enable"]
#[inline(always)]
pub fn gpiofen(&self) -> GPIOFEN_R {
GPIOFEN_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - IO port G clock enable"]
#[inline(always)]
pub fn gpiogen(&self) -> GPIOGEN_R {
GPIOGEN_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 13 - ADC clock enable"]
#[inline(always)]
pub fn adc12en(&self) -> ADC12EN_R {
ADC12EN_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - DCMI clock enable"]
#[inline(always)]
pub fn adc345en(&self) -> ADC345EN_R {
ADC345EN_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 16 - AES accelerator clock enable"]
#[inline(always)]
pub fn dac1en(&self) -> DAC1EN_R {
DAC1EN_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - HASH clock enable"]
#[inline(always)]
pub fn dac2en(&self) -> DAC2EN_R {
DAC2EN_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - Random Number Generator clock enable"]
#[inline(always)]
pub fn dac3en(&self) -> DAC3EN_R {
DAC3EN_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - DAC4 clock enable"]
#[inline(always)]
pub fn dac4en(&self) -> DAC4EN_R {
DAC4EN_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 24 - AES clock enable"]
#[inline(always)]
pub fn aesen(&self) -> AESEN_R {
AESEN_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 26 - Random Number Generator clock enable"]
#[inline(always)]
pub fn rngen(&self) -> RNGEN_R {
RNGEN_R::new(((self.bits >> 26) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - IO port A clock enable"]
#[inline(always)]
pub fn gpioaen(&mut self) -> GPIOAEN_W {
GPIOAEN_W { w: self }
}
#[doc = "Bit 1 - IO port B clock enable"]
#[inline(always)]
pub fn gpioben(&mut self) -> GPIOBEN_W {
GPIOBEN_W { w: self }
}
#[doc = "Bit 2 - IO port C clock enable"]
#[inline(always)]
pub fn gpiocen(&mut self) -> GPIOCEN_W {
GPIOCEN_W { w: self }
}
#[doc = "Bit 3 - IO port D clock enable"]
#[inline(always)]
pub fn gpioden(&mut self) -> GPIODEN_W {
GPIODEN_W { w: self }
}
#[doc = "Bit 4 - IO port E clock enable"]
#[inline(always)]
pub fn gpioeen(&mut self) -> GPIOEEN_W {
GPIOEEN_W { w: self }
}
#[doc = "Bit 5 - IO port F clock enable"]
#[inline(always)]
pub fn gpiofen(&mut self) -> GPIOFEN_W {
GPIOFEN_W { w: self }
}
#[doc = "Bit 6 - IO port G clock enable"]
#[inline(always)]
pub fn gpiogen(&mut self) -> GPIOGEN_W {
GPIOGEN_W { w: self }
}
#[doc = "Bit 13 - ADC clock enable"]
#[inline(always)]
pub fn adc12en(&mut self) -> ADC12EN_W {
ADC12EN_W { w: self }
}
#[doc = "Bit 14 - DCMI clock enable"]
#[inline(always)]
pub fn adc345en(&mut self) -> ADC345EN_W {
ADC345EN_W { w: self }
}
#[doc = "Bit 16 - AES accelerator clock enable"]
#[inline(always)]
pub fn dac1en(&mut self) -> DAC1EN_W {
DAC1EN_W { w: self }
}
#[doc = "Bit 17 - HASH clock enable"]
#[inline(always)]
pub fn dac2en(&mut self) -> DAC2EN_W {
DAC2EN_W { w: self }
}
#[doc = "Bit 18 - Random Number Generator clock enable"]
#[inline(always)]
pub fn dac3en(&mut self) -> DAC3EN_W {
DAC3EN_W { w: self }
}
#[doc = "Bit 19 - DAC4 clock enable"]
#[inline(always)]
pub fn dac4en(&mut self) -> DAC4EN_W {
DAC4EN_W { w: self }
}
#[doc = "Bit 24 - AES clock enable"]
#[inline(always)]
pub fn aesen(&mut self) -> AESEN_W {
AESEN_W { w: self }
}
#[doc = "Bit 26 - Random Number Generator clock enable"]
#[inline(always)]
pub fn rngen(&mut self) -> RNGEN_W {
RNGEN_W { w: self }
}
}
|
use winnow::prelude::*;
mod parser;
use parser::hex_color;
fn main() -> Result<(), lexopt::Error> {
let args = Args::parse()?;
let input = args.input.as_deref().unwrap_or("#AAAAAA");
println!("{} =", input);
match hex_color.parse(input) {
Ok(result) => {
println!(" {:?}", result);
}
Err(err) => {
println!(" {}", err);
}
}
Ok(())
}
#[derive(Default)]
struct Args {
input: Option<String>,
}
impl Args {
fn parse() -> Result<Self, lexopt::Error> {
use lexopt::prelude::*;
let mut res = Args::default();
let mut args = lexopt::Parser::from_env();
while let Some(arg) = args.next()? {
match arg {
Value(input) => {
res.input = Some(input.string()?);
}
_ => return Err(arg.unexpected()),
}
}
Ok(res)
}
}
#[test]
fn parse_color() {
assert_eq!(
hex_color.parse_peek("#2F14DF"),
Ok((
"",
parser::Color {
red: 47,
green: 20,
blue: 223,
}
))
);
}
|
use super::error::{PineError, PineErrorKind};
use super::input::{Input, StrRange};
use std::cell::{Cell, RefCell};
#[derive(Debug, PartialEq, Clone)]
pub struct PineInputError {
pub code: PineErrorKind,
pub range: StrRange,
}
impl PineInputError {
pub fn new(code: PineErrorKind, range: StrRange) -> PineInputError {
PineInputError { code, range }
}
}
pub struct AstState {
errors: RefCell<Vec<PineInputError>>,
indent: Cell<usize>,
}
impl AstState {
pub fn new() -> AstState {
AstState {
errors: RefCell::new(vec![]),
indent: Cell::new(0),
}
}
pub fn enter_scope(&self) {
self.indent.replace(self.indent.get() + 1);
}
pub fn exit_scope(&self) {
debug_assert!(self.indent.get() > 0);
self.indent.replace(self.indent.get() - 1);
}
pub fn get_indent(&self) -> usize {
self.indent.get()
}
pub fn merge_pine_error(&self, mut err: PineError<Input>) {
match err.errors.pop() {
None => (),
Some((input, kind)) => match kind {
PineErrorKind::Nom(_) | PineErrorKind::Char(_) | PineErrorKind::Context(_) => self
.catch(PineInputError::new(
PineErrorKind::NonRecongnizeStmt,
StrRange::new(input.start, input.end),
)),
_ => self.catch(PineInputError::new(
kind,
StrRange::new(input.start, input.end),
)),
},
}
}
pub fn catch(&self, err: PineInputError) {
self.errors.borrow_mut().push(err);
}
pub fn is_ok(&self) -> bool {
self.errors.borrow().is_empty()
}
pub fn into_inner(&self) -> Vec<PineInputError> {
self.errors.replace(vec![])
}
}
|
#[link(name = "foo", kind = "static-nobundle")]
//~^ WARNING: link kind `static-nobundle` has been superseded by specifying modifier `-bundle` with link kind `static`
//~^^ ERROR: link kind `static-nobundle` is unstable
extern "C" {}
fn main() {}
|
use rand::prelude::*;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
assert_eq!(add_one(1), 2);
}
}
pub fn add_one(a: i32) -> i32 {
a + 1
}
pub fn add_rand(a: i32) -> i32 {
let mut rng = rand::thread_rng();
let y: i32 = rng.gen();
a + y
}
|
pub fn bcd_to_binary(bcd: u8) -> u8 {
return (bcd & 0x0f) + ((bcd >> 4) * 10);
}
pub fn read(register: u8) -> u8 {
use x86_64::instructions::port::Port;
let mut port_70 = Port::new(0x70);
let mut port_71 = Port::new(0x71);
unsafe {
port_70.write(register);
port_71.read()
}
}
|
use std::sync::Arc;
use thiserror::Error;
use crate::internals::{
entity::EntityLocation,
insert::ArchetypeSource,
query::filter::{FilterResult, LayoutFilter},
storage::{
archetype::{Archetype, EntityLayout},
component::{Component, ComponentTypeId},
ComponentStorage, Components, UnknownComponentStorage,
},
subworld::ComponentAccess,
world::World,
};
/// An error type which describes why an attempt to retrieve a component failed.
#[derive(Error, Copy, Clone, Debug, PartialEq, Hash)]
pub enum ComponentError {
/// The component was not found on the entity.
#[error("the component {component_name} was not found on the entity")]
NotFound {
/// The type ID of the component.
component_type: ComponentTypeId,
/// The type name of the component.
component_name: &'static str,
},
/// The world does not allow access to the component.
#[error("the world does not declare appropriate access to {component_name}, \
consider adding a query which contains `{component_name}` and this entity in its result set to the system, \
or use `SystemBuilder::read_component` to declare global access over all entities")]
Denied {
/// The type ID of the component.
component_type: ComponentTypeId,
/// The type name of the component.
component_name: &'static str,
},
}
/// Provides safe read-only access to an entity's components.
pub struct EntryRef<'a> {
pub(crate) location: EntityLocation,
pub(crate) components: &'a Components,
pub(crate) archetypes: &'a [Archetype],
pub(crate) allowed_components: ComponentAccess<'a>,
}
impl<'a> EntryRef<'a> {
pub(crate) fn new(
location: EntityLocation,
components: &'a Components,
archetypes: &'a [Archetype],
allowed_components: ComponentAccess<'a>,
) -> Self {
Self {
location,
components,
archetypes,
allowed_components,
}
}
/// Returns the entity's archetype.
pub fn archetype(&self) -> &Archetype {
&self.archetypes[self.location.archetype()]
}
/// Returns the entity's location.
pub fn location(&self) -> EntityLocation {
self.location
}
/// Returns a reference to one of the entity's components.
pub fn into_component<T: Component>(self) -> Result<&'a T, ComponentError> {
let component_type = ComponentTypeId::of::<T>();
if !self.allowed_components.allows_read(component_type) {
return Err(ComponentError::Denied {
component_type,
component_name: std::any::type_name::<T>(),
});
}
let component = self.location.component();
let archetype = self.location.archetype();
self.components
.get_downcast::<T>()
.and_then(move |storage| storage.get(archetype))
.and_then(move |slice| slice.into_slice().get(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type,
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a mutable reference to one of the entity's components.
///
/// # Safety
/// This function bypasses static borrow checking. The caller must ensure that the component reference
/// will not be mutably aliased.
pub unsafe fn into_component_unchecked<T: Component>(
self,
) -> Result<&'a mut T, ComponentError> {
let component_type = ComponentTypeId::of::<T>();
if !self.allowed_components.allows_write(component_type) {
return Err(ComponentError::Denied {
component_type,
component_name: std::any::type_name::<T>(),
});
}
let component = self.location.component();
let archetype = self.location.archetype();
self.components
.get_downcast::<T>()
.and_then(move |storage| storage.get_mut(archetype))
.and_then(move |slice| slice.into_slice().get_mut(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type,
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a reference to one of the entity's components.
pub fn get_component<T: Component>(&self) -> Result<&T, ComponentError> {
let component_type = ComponentTypeId::of::<T>();
if !self.allowed_components.allows_read(component_type) {
return Err(ComponentError::Denied {
component_type,
component_name: std::any::type_name::<T>(),
});
}
let component = self.location.component();
let archetype = self.location.archetype();
self.components
.get_downcast::<T>()
.and_then(move |storage| storage.get(archetype))
.and_then(move |slice| slice.into_slice().get(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type,
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a mutable reference to one of the entity's components.
///
/// # Safety
/// This function bypasses static borrow checking. The caller must ensure that the component reference
/// will not be mutably aliased.
pub unsafe fn get_component_unchecked<T: Component>(&self) -> Result<&mut T, ComponentError> {
let component_type = ComponentTypeId::of::<T>();
if !self.allowed_components.allows_write(component_type) {
return Err(ComponentError::Denied {
component_type,
component_name: std::any::type_name::<T>(),
});
}
let component = self.location.component();
let archetype = self.location.archetype();
self.components
.get_downcast::<T>()
.and_then(move |storage| storage.get_mut(archetype))
.and_then(move |slice| slice.into_slice().get_mut(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type,
component_name: std::any::type_name::<T>(),
}
})
}
}
/// Provides safe read and write access to an entity's components.
pub struct EntryMut<'a> {
pub(crate) location: EntityLocation,
pub(crate) components: &'a Components,
pub(crate) archetypes: &'a [Archetype],
pub(crate) allowed_components: ComponentAccess<'a>,
}
impl<'a> EntryMut<'a> {
pub(crate) unsafe fn new(
location: EntityLocation,
components: &'a Components,
archetypes: &'a [Archetype],
allowed_components: ComponentAccess<'a>,
) -> Self {
Self {
location,
components,
archetypes,
allowed_components,
}
}
/// Returns the entity's archetype.
pub fn archetype(&self) -> &Archetype {
&self.archetypes[self.location.archetype()]
}
/// Returns the entity's location.
pub fn location(&self) -> EntityLocation {
self.location
}
/// Returns a reference to one of the entity's components.
pub fn into_component<T: Component>(self) -> Result<&'a T, ComponentError> {
let component_type = ComponentTypeId::of::<T>();
if !self.allowed_components.allows_read(component_type) {
return Err(ComponentError::Denied {
component_type,
component_name: std::any::type_name::<T>(),
});
}
let component = self.location.component();
let archetype = self.location.archetype();
self.components
.get_downcast::<T>()
.and_then(move |storage| storage.get(archetype))
.and_then(move |slice| slice.into_slice().get(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type,
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a mutable reference to one of the entity's components.
///
/// # Safety
/// This function bypasses static borrow checking. The caller must ensure that the component reference
/// will not be mutably aliased.
pub unsafe fn into_component_unchecked<T: Component>(
self,
) -> Result<&'a mut T, ComponentError> {
let component_type = ComponentTypeId::of::<T>();
if !self.allowed_components.allows_write(component_type) {
return Err(ComponentError::Denied {
component_type,
component_name: std::any::type_name::<T>(),
});
}
let component = self.location.component();
let archetype = self.location.archetype();
self.components
.get_downcast::<T>()
.and_then(move |storage| storage.get_mut(archetype))
.and_then(move |slice| slice.into_slice().get_mut(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type,
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a mutable reference to one of the entity's components.
pub fn into_component_mut<T: Component>(self) -> Result<&'a mut T, ComponentError> {
// safety: we have exclusive access to the entry.
// the world must ensure that mut entries handed out are unique
unsafe { self.into_component_unchecked() }
}
/// Returns a reference to one of the entity's components.
pub fn get_component<T: Component>(&self) -> Result<&T, ComponentError> {
let component_type = ComponentTypeId::of::<T>();
if !self
.allowed_components
.allows_read(ComponentTypeId::of::<T>())
{
return Err(ComponentError::Denied {
component_type,
component_name: std::any::type_name::<T>(),
});
}
let component = self.location.component();
let archetype = self.location.archetype();
self.components
.get_downcast::<T>()
.and_then(move |storage| storage.get(archetype))
.and_then(move |slice| slice.into_slice().get(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type,
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a mutable reference to one of the entity's components.
///
/// # Safety
/// The caller must ensure that the component reference will not be mutably aliased.
pub unsafe fn get_component_unchecked<T: Component>(&self) -> Result<&mut T, ComponentError> {
let component_type = ComponentTypeId::of::<T>();
if !self
.allowed_components
.allows_write(ComponentTypeId::of::<T>())
{
return Err(ComponentError::Denied {
component_type,
component_name: std::any::type_name::<T>(),
});
}
let component = self.location.component();
let archetype = self.location.archetype();
self.components
.get_downcast::<T>()
.and_then(move |storage| storage.get_mut(archetype))
.and_then(move |slice| slice.into_slice().get_mut(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type,
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a mutable reference to one of the entity's components.
pub fn get_component_mut<T: Component>(&mut self) -> Result<&mut T, ComponentError> {
// safety: we have exclusive access to the entry.
// the world must ensure that mut entries handed out are unique
unsafe { self.get_component_unchecked() }
}
}
/// Provides safe read and write access to an entity's components, and the ability to modify the entity.
pub struct Entry<'a> {
location: EntityLocation,
world: &'a mut World,
}
impl<'a> Entry<'a> {
pub(crate) fn new(location: EntityLocation, world: &'a mut World) -> Self {
Self { location, world }
}
/// Returns the entity's archetype.
pub fn archetype(&self) -> &Archetype {
&self.world.archetypes()[self.location.archetype()]
}
/// Returns the entity's location.
pub fn location(&self) -> EntityLocation {
self.location
}
/// Returns a reference to one of the entity's components.
pub fn into_component<T: Component>(self) -> Result<&'a T, ComponentError> {
let component = self.location.component();
let archetype = self.location.archetype();
self.world
.components()
.get_downcast::<T>()
.and_then(move |storage| storage.get(archetype))
.and_then(move |slice| slice.into_slice().get(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type: ComponentTypeId::of::<T>(),
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a mutable reference to one of the entity's components.
pub fn into_component_mut<T: Component>(self) -> Result<&'a mut T, ComponentError> {
// safety: we have exclusive access to both the entry and the world
unsafe { self.into_component_unchecked() }
}
/// Returns a mutable reference to one of the entity's components.
///
/// # Safety
/// This function bypasses static borrow checking. The caller must ensure that the component reference
/// will not be mutably aliased.
pub unsafe fn into_component_unchecked<T: Component>(
self,
) -> Result<&'a mut T, ComponentError> {
let component = self.location.component();
let archetype = self.location.archetype();
self.world
.components()
.get_downcast::<T>()
.and_then(move |storage| storage.get_mut(archetype))
.and_then(move |slice| slice.into_slice().get_mut(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type: ComponentTypeId::of::<T>(),
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a reference to one of the entity's components.
pub fn get_component<T: Component>(&self) -> Result<&T, ComponentError> {
let component = self.location.component();
let archetype = self.location.archetype();
self.world
.components()
.get_downcast::<T>()
.and_then(move |storage| storage.get(archetype))
.and_then(move |slice| slice.into_slice().get(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type: ComponentTypeId::of::<T>(),
component_name: std::any::type_name::<T>(),
}
})
}
/// Returns a mutable reference to one of the entity's components.
pub fn get_component_mut<T: Component>(&mut self) -> Result<&mut T, ComponentError> {
// safety: we have exclusive access to both the entry and the world
unsafe { self.get_component_unchecked() }
}
/// Returns a mutable reference to one of the entity's components.
///
/// # Safety
/// This function bypasses static borrow checking. The caller must ensure that the component reference
/// will not be mutably aliased.
pub unsafe fn get_component_unchecked<T: Component>(&self) -> Result<&mut T, ComponentError> {
let component = self.location.component();
let archetype = self.location.archetype();
self.world
.components()
.get_downcast::<T>()
.and_then(move |storage| storage.get_mut(archetype))
.and_then(move |slice| slice.into_slice().get_mut(component.0))
.ok_or_else(|| {
ComponentError::NotFound {
component_type: ComponentTypeId::of::<T>(),
component_name: std::any::type_name::<T>(),
}
})
}
/// Adds a new component to the entity.
/// If the component already exists, its value will be replaced.
pub fn add_component<T: Component>(&mut self, component: T) {
if let Ok(comp) = self.get_component_mut::<T>() {
*comp = component;
return;
}
let target_arch = {
let mut source = DynamicArchetype {
base: self.world.archetypes()[self.location.archetype()]
.layout()
.clone(),
add: &[ComponentTypeId::of::<T>()],
add_constructors: &[|| Box::new(T::Storage::default())],
remove: &[],
};
self.world.get_archetype_for_components(&mut source)
};
unsafe {
let idx = self.world.transfer_archetype(
self.location.archetype(),
target_arch,
self.location.component(),
);
self.world
.components_mut()
.get_downcast_mut::<T>()
.unwrap()
.extend_memcopy(target_arch, &component as *const T, 1);
std::mem::forget(component);
self.location = EntityLocation::new(target_arch, idx);
};
}
/// Removes a component from the entity.
/// Does nothing if the entity does not have the component.
pub fn remove_component<T: Component>(&mut self) {
if !self.archetype().layout().has_component::<T>() {
return;
}
let target_arch = {
let mut source = DynamicArchetype {
base: self.world.archetypes()[self.location.archetype()]
.layout()
.clone(),
add: &[],
add_constructors: &[],
remove: &[ComponentTypeId::of::<T>()],
};
self.world.get_archetype_for_components(&mut source)
};
unsafe {
let idx = self.world.transfer_archetype(
self.location.archetype(),
target_arch,
self.location.component(),
);
self.location = EntityLocation::new(target_arch, idx);
};
}
}
#[derive(Clone)]
struct DynamicArchetype<'a> {
base: Arc<EntityLayout>,
add: &'a [ComponentTypeId],
add_constructors: &'a [fn() -> Box<dyn UnknownComponentStorage>],
remove: &'a [ComponentTypeId],
}
impl<'a> LayoutFilter for DynamicArchetype<'a> {
fn matches_layout(&self, components: &[ComponentTypeId]) -> FilterResult {
let base_components = self.base.component_types();
FilterResult::Match(
components.len() == (base_components.len() + self.add.len() - self.remove.len())
&& components.iter().all(|t| {
!self.remove.contains(t)
&& (base_components.iter().any(|x| x == t)
|| self.add.iter().any(|x| x == t))
}),
)
}
}
impl<'a> ArchetypeSource for DynamicArchetype<'a> {
type Filter = Self;
fn filter(&self) -> Self::Filter {
self.clone()
}
fn layout(&mut self) -> EntityLayout {
let mut layout = EntityLayout::new();
for (type_id, constructor) in self
.base
.component_types()
.iter()
.zip(self.base.component_constructors())
{
if !self.remove.contains(type_id) {
unsafe { layout.register_component_raw(*type_id, *constructor) };
}
}
for (type_id, constructor) in self.add.iter().zip(self.add_constructors.iter()) {
unsafe { layout.register_component_raw(*type_id, *constructor) };
}
layout
}
}
#[cfg(test)]
mod test {
use crate::internals::{
query::{view::read::Read, IntoQuery},
world::World,
};
#[test]
#[allow(clippy::float_cmp)]
fn add_component() {
let mut world = World::default();
let entities = world.extend(vec![(1usize, true), (2usize, false)]).to_vec();
let mut query = Read::<f32>::query();
assert_eq!(query.iter(&world).count(), 0);
{
let mut entry = world.entry(entities[0]).unwrap();
assert!(entry.get_component::<f32>().is_err());
entry.add_component(5f32);
assert_eq!(entry.get_component::<f32>(), Ok(&5f32));
}
let after = query.iter(&world).collect::<Vec<_>>();
assert_eq!(after.len(), 1);
assert_eq!(after[0], &5f32);
}
#[test]
fn remove_component() {
let mut world = World::default();
let entities = world.extend(vec![(1usize, true), (2usize, false)]).to_vec();
let mut query = Read::<usize>::query();
let before = query.iter(&world).collect::<Vec<_>>();
assert_eq!(before.len(), 2);
assert_eq!(before[0], &1usize);
assert_eq!(before[1], &2usize);
{
let mut entry = world.entry(entities[0]).unwrap();
assert_eq!(entry.get_component::<usize>(), Ok(&1usize));
entry.remove_component::<usize>();
assert!(entry.get_component::<usize>().is_err());
}
let after = query.iter(&world).collect::<Vec<_>>();
assert_eq!(after.len(), 1);
assert_eq!(after[0], &2usize);
}
}
|
use itertools::Itertools;
use std::cmp::{max, min};
use whiteread::parse_line;
use std::collections::HashMap;
use std::collections::HashSet;
fn main() {
let n: usize = parse_line().unwrap();
let mut hashiras_vec: Vec<(i64, i64)> = vec![];
let mut hashiras_set: HashSet<(i64, i64)> = HashSet::new();
for _ in 0..n {
let hashira: Vec<i64> = parse_line().unwrap();
hashiras_vec.push((hashira[0], hashira[1]));
hashiras_set.insert((hashira[0], hashira[1]));
}
let mut ans = 0;
for h in hashiras_vec.into_iter().combinations(2) {
let (left, right): ((i64, i64), (i64, i64)) = {
if h[0].0 > h[1].0 {
((h[1].0, h[1].1), (h[0].0, h[0].1))
} else {
((h[0].0, h[0].1), (h[1].0, h[1].1))
}
};
let disx = right.0 as i64 - left.0 as i64;
let disy = right.1 as i64 - left.1 as i64;
let onex = left.0 as i64 - disy;
let oney = left.1 as i64 + disx;
let twox = onex + disx;
let twoy = oney + disy;
if hashiras_set.contains(&(onex, oney)) && hashiras_set.contains(&(twox, twoy)) {
ans = max(ans, disx.pow(2) + disy.pow(2));
continue;
}
let onex = left.0 as i64 + disy;
let oney = left.1 as i64 - disx;
let twox = onex + disx;
let twoy = oney + disy;
if hashiras_set.contains(&(onex, oney)) && hashiras_set.contains(&(twox, twoy)) {
ans = max(ans, disx.pow(2) + disy.pow(2));
}
}
println!("{}", ans);
}
|
use arrow_util::assert_batches_sorted_eq;
use compactor_test_utils::{format_files, list_object_store, TestSetup};
use data_types::{CompactionLevel, ParquetFile, PartitionId};
mod layouts;
#[tokio::test]
async fn test_compact_no_file() {
test_helpers::maybe_start_logging();
// no files
let setup = TestSetup::builder().await.build().await;
let files = setup.list_by_table_not_to_delete().await;
assert!(files.is_empty());
// compact
setup.run_compact().await;
// verify catalog is still empty
let files = setup.list_by_table_not_to_delete().await;
assert!(files.is_empty());
}
#[tokio::test]
async fn test_num_files_over_limit() {
test_helpers::maybe_start_logging();
// Create a test setup with 6 files
let setup = TestSetup::builder()
.await
.with_files()
.await
// Set max num file to 4 (< num files) --> many L0s files, comppact 4 L0s into 2 L0s
.with_max_num_files_per_plan(4)
// Not compact L1s into L2s because tnumber of L1s < 5
.with_min_num_l1_files_to_compact(5)
.build()
.await;
// verify 6 files
let files = setup.list_by_table_not_to_delete().await;
assert_eq!(files.len(), 6);
// verify ID and compaction level of the files
assert_levels(
&files,
vec![
(1, CompactionLevel::FileNonOverlapped),
(2, CompactionLevel::Initial),
(3, CompactionLevel::Initial),
(4, CompactionLevel::FileNonOverlapped),
(5, CompactionLevel::Initial),
(6, CompactionLevel::Initial),
],
);
setup.run_compact().await;
//
// read files and verify 2 files
let files = setup.list_by_table_not_to_delete().await;
assert_eq!(files.len(), 2);
//
// verify ID and compaction level of the files
// Original IDs of files: 1, 2, 3, 4, 5, 6
// 4 L0s files are compacted into 2 new L0s files with IDs 7, 8
// Then these 2 new L0s files are compacted with overlapped L1 files into 2 new L1s files with IDs 9, 10
assert_levels(
&files,
vec![
(9, CompactionLevel::FileNonOverlapped),
(10, CompactionLevel::FileNonOverlapped),
],
);
}
#[tokio::test]
async fn test_compact_target_level() {
test_helpers::maybe_start_logging();
// Create a test setup with 6 files
let setup = TestSetup::builder()
.await
.with_files()
.await
// Ensure we have enough resource to compact the files
.with_max_num_files_per_plan(10)
.with_min_num_l1_files_to_compact(2)
.build()
.await;
// verify 6 files
let files = setup.list_by_table_not_to_delete().await;
assert_levels(
&files,
vec![
(1, CompactionLevel::FileNonOverlapped),
(2, CompactionLevel::Initial),
(3, CompactionLevel::Initial),
(4, CompactionLevel::FileNonOverlapped),
(5, CompactionLevel::Initial),
(6, CompactionLevel::Initial),
],
);
// verify ID and max_l0_created_at
let times = setup.test_times();
assert_max_l0_created_at(
&files,
vec![
(1, times.time_1_minute_future),
(2, times.time_2_minutes_future),
(3, times.time_5_minutes_future),
(4, times.time_1_minute_future),
(5, times.time_5_minutes_future),
(6, times.time_2_minutes_future),
],
);
// compact
setup.run_compact().await;
// verify number of files: 6 files are compacted into 2 files
let files = setup.list_by_table_not_to_delete().await;
assert_eq!(files.len(), 2);
assert_levels(
&files,
// This is the result of 2-round compaction fomr L0s -> L1s and then L1s -> L2s
// The first round will create two L1 files IDs 7 and 8
// The second round will create tow L2 file IDs 9 and 10
vec![(9, CompactionLevel::Final), (10, CompactionLevel::Final)],
);
assert_max_l0_created_at(
&files,
// both files have max_l0_created time_5_minutes_future
// which is the max of all L0 input's max_l0_created_at
vec![
(9, times.time_5_minutes_future),
(10, times.time_5_minutes_future),
],
);
// verify the content of files
// Compacted smaller file with the later data
let mut files = setup.list_by_table_not_to_delete().await;
let file1 = files.pop().unwrap();
let batches = setup.read_parquet_file(file1).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
"| field_int | tag1 | tag2 | tag3 | time |",
"+-----------+------+------+------+-----------------------------+",
"| 210 | | OH | 21 | 1970-01-01T00:00:00.000136Z |",
"+-----------+------+------+------+-----------------------------+",
],
&batches
);
// Compacted larger file with the earlier data
let file0 = files.pop().unwrap();
let batches = setup.read_parquet_file(file0).await;
assert_batches_sorted_eq!(
[
"+-----------+------+------+------+-----------------------------+",
"| field_int | tag1 | tag2 | tag3 | time |",
"+-----------+------+------+------+-----------------------------+",
"| 10 | VT | | | 1970-01-01T00:00:00.000006Z |",
"| 10 | VT | | | 1970-01-01T00:00:00.000010Z |",
"| 10 | VT | | | 1970-01-01T00:00:00.000068Z |",
"| 1500 | WA | | | 1970-01-01T00:00:00.000008Z |",
"| 1601 | | PA | 15 | 1970-01-01T00:00:00.000030Z |",
"| 22 | | OH | 21 | 1970-01-01T00:00:00.000036Z |",
"| 270 | UT | | | 1970-01-01T00:00:00.000025Z |",
"| 70 | UT | | | 1970-01-01T00:00:00.000020Z |",
"| 99 | OR | | | 1970-01-01T00:00:00.000012Z |",
"+-----------+------+------+------+-----------------------------+",
],
&batches
);
}
#[tokio::test]
async fn test_compact_large_overlapes() {
test_helpers::maybe_start_logging();
// Simulate a production scenario in which there are two L1 files but one overlaps with three L2 files
// and their total size > limit 256MB
// |----------L2.1----------||----------L2.2----------||-----L2.3----|
// |----------------------------------------L1.1---------------------------||--L1.2--|
let setup = TestSetup::builder()
.await
.with_3_l2_2_l1_scenario_1()
.await
// the test setup does not exceed number of files limit
.with_max_num_files_per_plan(10)
.with_min_num_l1_files_to_compact(2)
// the test setup to have total file size exceed max compact size limit
.with_max_desired_file_size_bytes((4 * 1024) as u64)
.build()
.await;
let files = setup.list_by_table_not_to_delete().await;
// verify 5 files
insta::assert_yaml_snapshot!(
format_files("initial", &files),
@r###"
---
- initial
- "L1 "
- "L1.4[6000,68000] 240s 3kb|------------------L1.4------------------| "
- "L1.5[136000,136000] 300s 2kb |L1.5|"
- "L2 "
- "L2.1[8000,12000] 60s 2kb |L2.1| "
- "L2.2[20000,30000] 120s 3kb |L2.2| "
- "L2.3[36000,36000] 180s 2kb |L2.3| "
"###
);
// compact
setup.run_compact().await;
let mut files = setup.list_by_table_not_to_delete().await;
insta::assert_yaml_snapshot!(
format_files("initial", &files),
@r###"
---
- initial
- "L2 "
- "L2.6[6000,36000] 300s 3kb|-------L2.6-------| "
- "L2.7[68000,68000] 300s 2kb |L2.7| "
- "L2.8[136000,136000] 300s 3kb |L2.8|"
"###
);
assert_eq!(files.len(), 3);
// order files on their min_time
files.sort_by_key(|f| f.min_time);
// time range: [6000,36000]
let file = files[0].clone();
let batches = setup.read_parquet_file(file).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
"| field_int | tag1 | tag2 | tag3 | time |",
"+-----------+------+------+------+-----------------------------+",
"| 10 | VT | | | 1970-01-01T00:00:00.000006Z |",
"| 10 | VT | | | 1970-01-01T00:00:00.000010Z |",
"| 1500 | WA | | | 1970-01-01T00:00:00.000008Z |",
"| 1601 | | PA | 15 | 1970-01-01T00:00:00.000028Z |",
"| 1601 | | PA | 15 | 1970-01-01T00:00:00.000030Z |",
"| 21 | | OH | 21 | 1970-01-01T00:00:00.000036Z |",
"| 270 | UT | | | 1970-01-01T00:00:00.000025Z |",
"| 70 | UT | | | 1970-01-01T00:00:00.000020Z |",
"| 99 | OR | | | 1970-01-01T00:00:00.000012Z |",
"+-----------+------+------+------+-----------------------------+",
],
&batches
);
// time range: [68000,68000]
let file = files[1].clone();
let batches = setup.read_parquet_file(file).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
"| field_int | tag1 | tag2 | tag3 | time |",
"+-----------+------+------+------+-----------------------------+",
"| 10 | VT | | | 1970-01-01T00:00:00.000068Z |",
"+-----------+------+------+------+-----------------------------+",
],
&batches
);
// time range: [136000,136000]
let file = files[2].clone();
let batches = setup.read_parquet_file(file).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
"| field_int | tag1 | tag2 | tag3 | time |",
"+-----------+------+------+------+-----------------------------+",
"| 210 | | OH | 21 | 1970-01-01T00:00:00.000136Z |",
"+-----------+------+------+------+-----------------------------+",
],
&batches
);
}
#[tokio::test]
async fn test_compact_large_overlape_2() {
test_helpers::maybe_start_logging();
// Simulate a production scenario in which there are two L1 files that overlap with more than 1 L2 file
// Scenario 2: two L1 files each overlaps with at least 2 L2 files
// |----------L2.1----------||----------L2.2----------||-----L2.3----|
// |----------------------------------------L1.1----||------L1.2--------|
let setup = TestSetup::builder()
.await
.with_3_l2_2_l1_scenario_2()
.await
// the test setup does not exceed number of files limit
.with_max_num_files_per_plan(10)
.with_min_num_l1_files_to_compact(2)
// the test setup to have total file size exceed max compact size limit
.with_max_desired_file_size_bytes((4 * 1024) as u64)
.build()
.await;
// verify 5 files
let files = setup.list_by_table_not_to_delete().await;
insta::assert_yaml_snapshot!(
format_files("initial", &files),
@r###"
---
- initial
- "L1 "
- "L1.4[6000,25000] 240s 2kb|---L1.4----| "
- "L1.5[28000,136000] 300s 3kb |----------------------------------L1.5----------------------------------| "
- "L2 "
- "L2.1[8000,12000] 60s 2kb |L2.1| "
- "L2.2[20000,30000] 120s 3kb |L2.2| "
- "L2.3[36000,36000] 180s 2kb |L2.3| "
"###
);
// compact
setup.run_compact().await;
let mut files = setup.list_by_table_not_to_delete().await;
insta::assert_yaml_snapshot!(
format_files("initial", &files),
@r###"
---
- initial
- "L2 "
- "L2.6[6000,36000] 300s 3kb|-------L2.6-------| "
- "L2.7[68000,68000] 300s 2kb |L2.7| "
- "L2.8[136000,136000] 300s 3kb |L2.8|"
"###
);
assert_eq!(files.len(), 3);
// order files on their min_time
files.sort_by_key(|f| f.min_time);
// time range: [6000,36000]
let file = files[0].clone();
let batches = setup.read_parquet_file(file).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
"| field_int | tag1 | tag2 | tag3 | time |",
"+-----------+------+------+------+-----------------------------+",
"| 10 | VT | | | 1970-01-01T00:00:00.000006Z |",
"| 10 | VT | | | 1970-01-01T00:00:00.000010Z |",
"| 1500 | WA | | | 1970-01-01T00:00:00.000008Z |",
"| 1601 | | PA | 15 | 1970-01-01T00:00:00.000028Z |",
"| 1601 | | PA | 15 | 1970-01-01T00:00:00.000030Z |",
"| 21 | | OH | 21 | 1970-01-01T00:00:00.000036Z |",
"| 270 | UT | | | 1970-01-01T00:00:00.000025Z |",
"| 70 | UT | | | 1970-01-01T00:00:00.000020Z |",
"| 99 | OR | | | 1970-01-01T00:00:00.000012Z |",
"+-----------+------+------+------+-----------------------------+",
],
&batches
);
// time range: [68000,68000]
let file = files[1].clone();
let batches = setup.read_parquet_file(file).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
"| field_int | tag1 | tag2 | tag3 | time |",
"+-----------+------+------+------+-----------------------------+",
"| 10 | VT | | | 1970-01-01T00:00:00.000068Z |",
"+-----------+------+------+------+-----------------------------+",
],
&batches
);
// time range: [136000,136000]
let file = files[2].clone();
let batches = setup.read_parquet_file(file).await;
assert_batches_sorted_eq!(
&[
"+-----------+------+------+------+-----------------------------+",
"| field_int | tag1 | tag2 | tag3 | time |",
"+-----------+------+------+------+-----------------------------+",
"| 210 | | OH | 21 | 1970-01-01T00:00:00.000136Z |",
"+-----------+------+------+------+-----------------------------+",
],
&batches
);
}
#[tokio::test]
async fn test_skip_compact() {
test_helpers::maybe_start_logging();
// Create a test setup with 6 files
let setup = TestSetup::builder().await.with_files().await.build().await;
let expected_files_and_levels = vec![
(1, CompactionLevel::FileNonOverlapped),
(2, CompactionLevel::Initial),
(3, CompactionLevel::Initial),
(4, CompactionLevel::FileNonOverlapped),
(5, CompactionLevel::Initial),
(6, CompactionLevel::Initial),
];
// verify 6 files
let files = setup.list_by_table_not_to_delete().await;
assert_levels(&files, expected_files_and_levels.clone());
// add the partition into skipped compaction
setup
.catalog
.add_to_skipped_compaction(setup.partition_info.partition_id, "test reason")
.await;
// compact but nothing will be compacted because the partition is skipped
setup.run_compact().await;
// verify still 6 files
let files = setup.list_by_table_not_to_delete().await;
assert_levels(&files, expected_files_and_levels.clone());
}
#[tokio::test]
async fn test_partition_fail() {
test_helpers::maybe_start_logging();
// Create a test setup with 6 files
let setup = TestSetup::builder().await.with_files().await.build().await;
let catalog_files_pre = setup.list_by_table_not_to_delete().await;
assert!(!catalog_files_pre.is_empty());
let object_store_files_pre = list_object_store(&setup.catalog.object_store).await;
assert!(!object_store_files_pre.is_empty());
setup.run_compact_failing().await;
let catalog_files_post = setup.list_by_table_not_to_delete().await;
assert_eq!(catalog_files_pre, catalog_files_post);
let object_store_files_post = list_object_store(&setup.catalog.object_store).await;
assert_eq!(object_store_files_pre, object_store_files_post);
assert_skipped_compactions(
&setup,
[(
setup.partition_info.partition_id,
"serialize\ncaused by\nJoin Error (panic)\ncaused by\nExternal error: Panic: foo",
)],
)
.await;
}
#[tokio::test]
async fn test_shadow_mode() {
test_helpers::maybe_start_logging();
// Create a test setup with 6 files
let setup = TestSetup::builder()
.await
.with_files()
.await
.with_shadow_mode()
.build()
.await;
let catalog_files_pre = setup.list_by_table_not_to_delete().await;
assert!(!catalog_files_pre.is_empty());
let object_store_files_pre = list_object_store(&setup.catalog.object_store).await;
assert!(!object_store_files_pre.is_empty());
setup.run_compact().await;
let catalog_files_post = setup.list_by_table_not_to_delete().await;
assert_eq!(catalog_files_pre, catalog_files_post);
let object_store_files_post = list_object_store(&setup.catalog.object_store).await;
assert_eq!(object_store_files_pre, object_store_files_post);
}
#[tokio::test]
async fn test_shadow_mode_partition_fail() {
test_helpers::maybe_start_logging();
// Create a test setup with 6 files
let setup = TestSetup::builder()
.await
.with_files()
.await
.with_shadow_mode()
.build()
.await;
let catalog_files_pre = setup.list_by_table_not_to_delete().await;
assert!(!catalog_files_pre.is_empty());
let object_store_files_pre = list_object_store(&setup.catalog.object_store).await;
assert!(!object_store_files_pre.is_empty());
setup.run_compact_failing().await;
let catalog_files_post = setup.list_by_table_not_to_delete().await;
assert_eq!(catalog_files_pre, catalog_files_post);
let object_store_files_post = list_object_store(&setup.catalog.object_store).await;
assert_eq!(object_store_files_pre, object_store_files_post);
assert_skipped_compactions(&setup, []).await;
}
#[track_caller]
fn assert_levels<'a>(
files: impl IntoIterator<Item = &'a ParquetFile>,
expected_files_and_levels: impl IntoIterator<Item = (i64, CompactionLevel)>,
) {
let files_and_levels: Vec<_> = files
.into_iter()
.map(|f| (f.id.get(), f.compaction_level))
.collect();
let expected_files_and_levels: Vec<_> = expected_files_and_levels.into_iter().collect();
assert_eq!(files_and_levels, expected_files_and_levels);
}
#[track_caller]
/// Asserts each parquet file has (id, max_l0_created_at)
fn assert_max_l0_created_at<'a>(
files: impl IntoIterator<Item = &'a ParquetFile>,
expected_files_and_max_l0_created_ats: impl IntoIterator<Item = (i64, i64)>,
) {
let files_and_max_l0_created_ats: Vec<_> = files
.into_iter()
.map(|f| (f.id.get(), f.max_l0_created_at.get()))
.collect();
let expected_files_and_max_l0_created_ats: Vec<_> =
expected_files_and_max_l0_created_ats.into_iter().collect();
assert_eq!(
files_and_max_l0_created_ats,
expected_files_and_max_l0_created_ats
);
}
async fn assert_skipped_compactions<const N: usize>(
setup: &TestSetup,
expected: [(PartitionId, &'static str); N],
) {
let skipped = setup
.catalog
.catalog
.repositories()
.await
.partitions()
.list_skipped_compactions()
.await
.unwrap();
let actual = skipped
.iter()
.map(|skipped| (skipped.partition_id, skipped.reason.as_str()))
.collect::<Vec<_>>();
assert_eq!(actual, expected);
}
|
#![deny(clippy::all)]
use num::BigUint;
pub mod mod_p;
/// Diffie-Hellman trait contains common API for any choice of underlying cyclic group
pub trait DH {
type GroupElement;
/// check an element is a valid member in the group
fn check_elm(&self, elm: &Self::GroupElement) -> bool;
/// returns the default group generator
fn get_generator(&self) -> Self::GroupElement;
/// returns g^e as a group element
fn exp(&self, e: &BigUint) -> Self::GroupElement;
/// returns (priKey, pubKey) keypair
fn key_gen(&self) -> (BigUint, Self::GroupElement);
/// return shared session key
fn kex(&self, sk: &BigUint, pk: &Self::GroupElement) -> Self::GroupElement;
}
|
/// Receives a word and an array of candidates and returns a
/// vector with the words that are an anagram to that word
/// # Examples
/// ```
/// let inputs = ["tan", "stand", "at"];
/// let outputs: Vec<&str> = vec!["tan"];
/// assert_eq!(anagram::anagrams_for("ant", &inputs), outputs);
/// ```
pub fn anagrams_for<'a>(word: &str, candidates: &'a [&str] ) -> Vec<&'a str> {
let normalized_word = normalize(word);
candidates.iter().filter(
|&cand| *cand != word && normalized_word == normalize(cand)
).cloned().collect()
}
fn normalize(word: &str) -> Vec<char> {
let mut upper_word: Vec<_> = word.to_uppercase().chars().collect();
upper_word.sort();
upper_word
}
|
use std::io::Read;
fn main() {
let mut input = String::new();
std::io::stdin().read_to_string(&mut input).unwrap();
let mut cups = u64::from_str_radix(input.trim(), 16).unwrap();
let cup_count = input.trim().len();
let pick_up_mask = (0..cup_count - 4).fold(0, |acc, n| acc | (0xF << n * 4));
let keep_masks: Vec<_> = (0..cup_count).map(|i| {
(0..i).fold(0, |acc, j| acc | 0xF << j * 4)
}).collect();
for _ in 0..100 {
let current_cup = cups >> (cup_count - 1) * 4;
let removed_cups = cups >> (cup_count - 4) * 4 & 0xFFF;
cups = current_cup << (cup_count - 4) * 4 | cups & pick_up_mask;
let destination = (current_cup as usize - 1..current_cup as usize + cup_count - 1).rev().find_map(|cup| {
let cup = cup % cup_count + 1;
if let Some(pos) = (0..cup_count).find(|pos| cups >> pos * 4 & 0xF == cup as u64) {
Some(pos)
} else {
None
}
}).unwrap();
cups = cups & keep_masks[destination] |
cups >> destination * 4 << (destination + 3) * 4 |
removed_cups << destination * 4;
cups = (cups & keep_masks[cup_count - 1]) << 4 | cups >> (cup_count - 1) * 4;
}
while cups & 0xF != 1 {
cups = (cups & 0xF) << (cup_count - 1) * 4 | cups >> 4;
}
println!("{:X}", cups >> 4);
}
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[link(name = "windows")]
extern "system" {}
pub const Catalog: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 1857169537, data2: 35353, data3: 4560, data4: [129, 182, 0, 160, 201, 35, 28, 41] };
pub const CatalogCollection: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 1857169539, data2: 35353, data3: 4560, data4: [129, 182, 0, 160, 201, 35, 28, 41] };
pub const CatalogObject: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 1857169538, data2: 35353, data3: 4560, data4: [129, 182, 0, 160, 201, 35, 28, 41] };
pub const ComponentUtil: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 1857169540, data2: 35353, data3: 4560, data4: [129, 182, 0, 160, 201, 35, 28, 41] };
pub type ICatalog = *mut ::core::ffi::c_void;
pub type IComponentUtil = *mut ::core::ffi::c_void;
pub type IPackageUtil = *mut ::core::ffi::c_void;
pub type IRemoteComponentUtil = *mut ::core::ffi::c_void;
pub type IRoleAssociationUtil = *mut ::core::ffi::c_void;
pub const PackageUtil: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 1857169541, data2: 35353, data3: 4560, data4: [129, 182, 0, 160, 201, 35, 28, 41] };
pub const RemoteComponentUtil: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 1857169542, data2: 35353, data3: 4560, data4: [129, 182, 0, 160, 201, 35, 28, 41] };
pub const RoleAssociationUtil: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 1857169543, data2: 35353, data3: 4560, data4: [129, 182, 0, 160, 201, 35, 28, 41] };
pub type __MIDL___MIDL_itf_mtxadmin_0107_0001 = i32;
pub const mtsInstallUsers: __MIDL___MIDL_itf_mtxadmin_0107_0001 = 1i32;
pub type __MIDL___MIDL_itf_mtxadmin_0107_0002 = i32;
pub const mtsExportUsers: __MIDL___MIDL_itf_mtxadmin_0107_0002 = 1i32;
pub type __MIDL___MIDL_itf_mtxadmin_0107_0003 = i32;
pub const mtsErrObjectErrors: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368511i32;
pub const mtsErrObjectInvalid: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368510i32;
pub const mtsErrKeyMissing: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368509i32;
pub const mtsErrAlreadyInstalled: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368508i32;
pub const mtsErrDownloadFailed: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368507i32;
pub const mtsErrPDFWriteFail: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368505i32;
pub const mtsErrPDFReadFail: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368504i32;
pub const mtsErrPDFVersion: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368503i32;
pub const mtsErrCoReqCompInstalled: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368496i32;
pub const mtsErrBadPath: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368502i32;
pub const mtsErrPackageExists: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368501i32;
pub const mtsErrRoleExists: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368500i32;
pub const mtsErrCantCopyFile: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368499i32;
pub const mtsErrNoTypeLib: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368498i32;
pub const mtsErrNoUser: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368497i32;
pub const mtsErrInvalidUserids: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368496i32;
pub const mtsErrNoRegistryCLSID: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368495i32;
pub const mtsErrBadRegistryProgID: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368494i32;
pub const mtsErrAuthenticationLevel: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368493i32;
pub const mtsErrUserPasswdNotValid: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368492i32;
pub const mtsErrNoRegistryRead: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368491i32;
pub const mtsErrNoRegistryWrite: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368490i32;
pub const mtsErrNoRegistryRepair: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368489i32;
pub const mtsErrCLSIDOrIIDMismatch: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368488i32;
pub const mtsErrRemoteInterface: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368487i32;
pub const mtsErrDllRegisterServer: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368486i32;
pub const mtsErrNoServerShare: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368485i32;
pub const mtsErrNoAccessToUNC: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368484i32;
pub const mtsErrDllLoadFailed: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368483i32;
pub const mtsErrBadRegistryLibID: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368482i32;
pub const mtsErrPackDirNotFound: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368481i32;
pub const mtsErrTreatAs: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368480i32;
pub const mtsErrBadForward: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368479i32;
pub const mtsErrBadIID: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368478i32;
pub const mtsErrRegistrarFailed: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368477i32;
pub const mtsErrCompFileDoesNotExist: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368476i32;
pub const mtsErrCompFileLoadDLLFail: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368475i32;
pub const mtsErrCompFileGetClassObj: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368474i32;
pub const mtsErrCompFileClassNotAvail: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368473i32;
pub const mtsErrCompFileBadTLB: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368472i32;
pub const mtsErrCompFileNotInstallable: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368471i32;
pub const mtsErrNotChangeable: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368470i32;
pub const mtsErrNotDeletable: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368469i32;
pub const mtsErrSession: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368468i32;
pub const mtsErrCompFileNoRegistrar: __MIDL___MIDL_itf_mtxadmin_0107_0003 = -2146368460i32;
|
pub mod formalargs;
pub mod selectors;
mod strings;
mod unit;
mod util;
pub mod value;
use self::formalargs::{call_args, formal_args};
use self::selectors::selectors;
use self::strings::{sass_string, sass_string_dq, sass_string_sq};
use self::util::{comment, ignore_space, name, opt_spacelike, spacelike};
use self::value::{
dictionary, function_call, single_value, value_expression,
};
use error::Error;
use functions::SassFunction;
use nom::types::CompleteByteSlice as Input;
use nom::Err;
#[cfg(test)]
use sass::{CallArgs, FormalArgs};
use sass::{Item, Value};
use selectors::Selectors;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::str::{from_utf8, Utf8Error};
use value::ListSeparator;
#[cfg(test)]
use value::{Number, Rgba, Unit};
/// Parse a scss value.
///
/// Returns a single value (or an error).
pub fn parse_value_data(data: &[u8]) -> Result<Value, Error> {
let (rest, result) = value_expression(Input(data))?;
assert!(rest.is_empty());
Ok(result)
}
/// Parse a scss file.
///
/// Returns a vec of the top level items of the file (or an error message).
pub fn parse_scss_file(file: &Path) -> Result<Vec<Item>, Error> {
let mut f = File::open(file).map_err(|e| Error::Input(file.into(), e))?;
let mut data = vec![];
f.read_to_end(&mut data)
.map_err(|e| Error::Input(file.into(), e))?;
parse_scss_data(&data)
}
/// Parse scss data from a buffer.
///
/// Returns a vec of the top level items of the file (or an error message).
pub fn parse_scss_data(data: &[u8]) -> Result<Vec<Item>, Error> {
match sassfile(Input(data)) {
Ok((Input(b""), items)) => Ok(items),
Ok((rest, _styles)) => {
let t = from_utf8(&rest)
.map(|s| s.to_string())
.unwrap_or_else(|_| format!("{:?}", rest));
Err(Error::S(format!(
"Failed to parse entire input: `{}` remains.",
t
)))
}
Err(Err::Incomplete(x)) => {
Err(Error::S(format!("Incomplete: {:?}", x)))
}
Err(x) => Err(Error::S(format!("Error: {:?}", x))),
}
}
named!(
sassfile<Input, Vec<Item>>,
preceded!(
opt!(tag!("\u{feff}".as_bytes())),
many0!(alt!(value!(Item::None, spacelike) |
import |
variable_declaration |
mixin_declaration |
each_loop |
for_loop |
while_loop |
function_declaration |
mixin_call |
if_statement |
at_rule |
rule |
map!(map_res!(comment, input_to_string), Item::Comment)
))
)
);
named!(
rule<Input, Item>,
do_parse!(
opt_spacelike
>> selectors: selectors
>> opt!(is_a!(", \t\n"))
>> body: body_block
>> (Item::Rule(selectors, body))
)
);
named!(
body_item<Input, Item>,
alt_complete!(
value!(Item::None, spacelike)
| mixin_declaration
| variable_declaration
| rule
| namespace_rule
| property
| each_loop
| for_loop
| while_loop
| function_declaration
| mixin_call
| import
| at_root
| if_statement
| return_stmt
| content_stmt
| at_rule
| value!(
Item::None,
delimited!(opt_spacelike, tag!(";"), opt_spacelike)
)
| map!(map_res!(comment, input_to_string), Item::Comment)
)
);
named!(
import<Input, Item>,
map!(
delimited!(tag!("@import "), value_expression, tag!(";")),
Item::Import
)
);
named!(
at_root<Input, Item>,
preceded!(
terminated!(tag!("@at-root"), opt_spacelike),
map!(
pair!(
map!(opt!(selectors), |s| s
.unwrap_or_else(Selectors::root)),
body_block
),
|(selectors, body)| Item::AtRoot { selectors, body }
)
)
);
named!(
mixin_call<Input, Item>,
do_parse!(
tag!("@include")
>> spacelike
>> name: name
>> opt_spacelike
>> args: opt!(call_args)
>> opt_spacelike
>> body: opt!(body_block)
>> opt_spacelike
>> opt!(complete!(tag!(";")))
>> (Item::MixinCall {
name,
args: args.unwrap_or_default(),
body: body.unwrap_or_default(),
})
)
);
named!(
at_rule<Input, Item>,
do_parse!(
tag!("@")
>> name: name
>> args: many0!(preceded!(
opt!(ignore_space),
alt!(
terminated!(
alt!(
function_call
| dictionary
| map!(sass_string, Value::Literal)
| map!(sass_string_dq, Value::Literal)
| map!(sass_string_sq, Value::Literal)
),
peek!(one_of!(" \n\t{;"))
) | map!(
map_res!(is_not!("\"'{};"), input_to_str),
|s| Value::Literal(s.trim_right().into(),
))
)
))
>> opt!(ignore_space)
>> body: alt!(
map!(body_block, Some) |
value!(None, eof!()) |
value!(None, tag!(";"))
)
>> (Item::AtRule {
name,
args: if args.len() == 1 {
args.into_iter().next().unwrap()
} else {
Value::List(args, ListSeparator::Space, false, false)
},
body,
})
)
);
named!(if_statement<Input, Item>, preceded!(tag!("@"), if_statement_inner));
named!(
if_statement_inner<Input, Item>,
do_parse!(
tag!("if")
>> spacelike
>> cond: value_expression
>> opt_spacelike
>> body: body_block
>> else_body:
opt!(complete!(preceded!(
delimited!(opt_spacelike, tag!("@else"), opt_spacelike),
alt_complete!(
body_block | map!(if_statement_inner, |s| vec![s])
)
)))
>> (Item::IfStatement(cond, body, else_body.unwrap_or_default()))
)
);
named!(
each_loop<Input, Item>,
map!(
tuple!(
preceded!(
terminated!(tag!("@each"), spacelike),
separated_nonempty_list!(
complete!(delimited!(
opt_spacelike,
tag!(","),
opt_spacelike
)),
preceded!(tag!("$"), name)
)
),
delimited!(
delimited!(spacelike, tag!("in"), spacelike),
value_expression,
spacelike
),
body_block
),
|(names, values, body)| Item::Each(names, values, body)
)
);
named!(
for_loop<Input, Item>,
do_parse!(
tag!("@for")
>> spacelike
>> tag!("$")
>> name: name
>> spacelike
>> tag!("from")
>> spacelike
>> from: single_value
>> spacelike
>> inclusive:
alt!(
value!(true, tag!("through")) | value!(false, tag!("to"))
)
>> spacelike
>> to: single_value
>> opt_spacelike
>> body: body_block
>> (Item::For {
name,
from: Box::new(from),
to: Box::new(to),
inclusive,
body,
})
)
);
named!(
while_loop<Input, Item>,
do_parse!(
tag!("@while")
>> spacelike
>> cond: value_expression
>> spacelike
>> body: body_block
>> (Item::While(cond, body))
)
);
named!(mixin_declaration<Input, Item>,
do_parse!(tag!("@mixin") >> spacelike >>
name: name >> opt_spacelike >>
args: opt!(formal_args) >> opt_spacelike >>
body: body_block >>
(Item::MixinDeclaration{
name,
args: args.unwrap_or_default(),
body,
})));
named!(
function_declaration<Input, Item>,
do_parse!(
tag!("@function")
>> spacelike
>> name: name
>> opt_spacelike
>> args: formal_args
>> opt_spacelike
>> body: body_block
>> (Item::FunctionDeclaration {
name,
func: SassFunction::new(args, body),
})
)
);
named!(
return_stmt<Input, Item>,
do_parse!(
tag!("@return")
>> spacelike
>> v: value_expression
>> opt_spacelike
>> opt!(tag!(";"))
>> (Item::Return(v))
)
);
named!(
content_stmt<Input, Item>,
do_parse!(
tag!("@content")
>> opt_spacelike
>> opt!(tag!(";"))
>> (Item::Content)
)
);
named!(property<Input, Item>,
do_parse!(opt_spacelike >>
name: sass_string >> opt_spacelike >>
tag!(":") >> opt_spacelike >>
val: value_expression >> opt_spacelike >>
opt!(tag!(";")) >> opt_spacelike >>
(Item::Property(name, val))));
named!(
namespace_rule<Input, Item>,
do_parse!(
opt_spacelike
>> n1: name
>> opt_spacelike
>> tag!(":")
>> opt_spacelike
>> value: opt!(value_expression)
>> opt_spacelike
>> body: body_block
>> (Item::NamespaceRule(n1, value.unwrap_or(Value::Null), body))
)
);
named!(
body_block<Input, Vec<Item>>,
delimited!(
preceded!(tag!("{"), opt_spacelike),
many0!(body_item),
tag!("}")
)
);
named!(
variable_declaration<Input, Item>,
do_parse!(
tag!("$")
>> name: name
>> opt_spacelike
>> tag!(":")
>> opt_spacelike
>> val: value_expression
>> opt_spacelike
>> default: map!(opt!(tag!("!default")), |d| d.is_some())
>> opt_spacelike
>> global: map!(opt!(tag!("!global")), |g| g.is_some())
>> opt_spacelike
>> tag!(";")
>> opt_spacelike
>> (Item::VariableDeclaration {
name,
val,
default,
global,
})
)
);
fn input_to_str<'a>(s: Input<'a>) -> Result<&str, Utf8Error> {
from_utf8(&s)
}
fn input_to_string(s: Input) -> Result<String, Utf8Error> {
from_utf8(&s).map(String::from)
}
#[cfg(test)]
fn percentage(v: isize) -> Value {
Value::Numeric(Number::from(v), Unit::Percent)
}
#[cfg(test)]
fn string(v: &str) -> Value {
Value::Literal(v.into())
}
#[test]
fn if_with_no_else() {
assert_eq!(
if_statement(Input(b"@if true { p { color: black; } }\n")),
Ok((
Input(b"\n"),
Item::IfStatement(
Value::True,
vec![
Item::Rule(
selectors(Input(b"p")).unwrap().1,
vec![Item::Property("color".into(), Value::black())],
),
Item::None,
],
vec![]
)
))
)
}
#[test]
fn test_mixin_call_noargs() {
assert_eq!(
mixin_call(Input(b"@include foo;\n")),
Ok((
Input(b"\n"),
Item::MixinCall {
name: "foo".to_string(),
args: CallArgs::new(vec![]),
body: vec![],
}
))
)
}
#[test]
fn test_mixin_call_pos_args() {
assert_eq!(
mixin_call(Input(b"@include foo(bar, baz);\n")),
Ok((
Input(b"\n"),
Item::MixinCall {
name: "foo".to_string(),
args: CallArgs::new(vec![
(None, string("bar")),
(None, string("baz")),
]),
body: vec![],
}
))
)
}
#[test]
fn test_mixin_call_named_args() {
assert_eq!(
mixin_call(Input(b"@include foo($x: bar, $y: baz);\n")),
Ok((
Input(b"\n"),
Item::MixinCall {
name: "foo".to_string(),
args: CallArgs::new(vec![
(Some("x".into()), string("bar")),
(Some("y".into()), string("baz")),
]),
body: vec![],
}
))
)
}
#[test]
fn test_mixin_declaration_empty() {
assert_eq!(
mixin_declaration(Input(b"@mixin foo() {}\n")),
Ok((
Input(b"\n"),
Item::MixinDeclaration {
name: "foo".into(),
args: FormalArgs::default(),
body: vec![],
}
))
)
}
#[test]
fn test_mixin_declaration() {
assert_eq!(
mixin_declaration(Input(
b"@mixin foo($x) {\n foo-bar: baz $x;\n}\n"
)),
Ok((
Input(b"\n"),
Item::MixinDeclaration {
name: "foo".into(),
args: FormalArgs::new(vec![("x".into(), Value::Null)], false),
body: vec![Item::Property(
"foo-bar".into(),
Value::List(
vec![string("baz"), Value::Variable("x".into())],
ListSeparator::Space,
false,
false,
),
)],
}
))
)
}
#[test]
fn test_mixin_declaration_default_and_subrules() {
assert_eq!(
mixin_declaration(Input(
b"@mixin bar($a, $b: flug) {\n \
foo-bar: baz;\n \
foo, bar {\n \
property: $b;\n \
}\n\
}\n"
)),
Ok((
Input(b"\n"),
Item::MixinDeclaration {
name: "bar".into(),
args: FormalArgs::new(
vec![
("a".into(), Value::Null),
("b".into(), string("flug")),
],
false
),
body: vec![
Item::Property("foo-bar".into(), string("baz")),
Item::Rule(
selectors(Input(b"foo, bar")).unwrap().1,
vec![Item::Property(
"property".into(),
Value::Variable("b".into()),
)],
),
Item::None,
],
}
))
)
}
#[test]
fn test_simple_property() {
assert_eq!(
property(Input(b"color: red;\n")),
Ok((
Input(b""),
Item::Property(
"color".into(),
Value::Color(Rgba::from_rgb(255, 0, 0), Some("red".into())),
)
))
)
}
#[test]
fn test_property_2() {
assert_eq!(
property(Input(b"background-position: 90% 50%;\n")),
Ok((
Input(b""),
Item::Property(
"background-position".into(),
Value::List(
vec![percentage(90), percentage(50)],
ListSeparator::Space,
false,
false
),
)
))
)
}
#[test]
fn test_variable_declaration_simple() {
assert_eq!(
variable_declaration(Input(b"$foo: bar;\n")),
Ok((
Input(b""),
Item::VariableDeclaration {
name: "foo".into(),
val: string("bar"),
default: false,
global: false,
}
))
)
}
#[test]
fn test_variable_declaration_global() {
assert_eq!(
variable_declaration(Input(b"$y: some value !global;\n")),
Ok((
Input(b""),
Item::VariableDeclaration {
name: "y".into(),
val: Value::List(
vec![string("some"), string("value")],
ListSeparator::Space,
false,
false
),
default: false,
global: true,
}
))
)
}
#[test]
fn test_variable_declaration_default() {
assert_eq!(
variable_declaration(Input(b"$y: some value !default;\n")),
Ok((
Input(b""),
Item::VariableDeclaration {
name: "y".into(),
val: Value::List(
vec![string("some"), string("value")],
ListSeparator::Space,
false,
false
),
default: true,
global: false,
}
))
)
}
|
use chrono::prelude::*;
use crate::hash::sha256;
fn get_hash_for_block(prev_block_hash: &str, data: &str, timestamp: i64) -> String {
let timestamp_bytes = timestamp.to_le_bytes();
let headers = [
prev_block_hash.as_bytes(),
data.as_bytes(),
×tamp_bytes[..],
]
.concat();
sha256(&headers)
}
pub struct Block {
pub timestamp: i64,
pub data: String,
pub prev_block_hash: String,
pub hash: String,
}
impl Block {
pub fn new(data: &str, prev_block_hash: &str) -> Self {
let now = Utc::now();
let timestamp = now.timestamp_nanos();
Block {
timestamp,
data: data.to_string(),
prev_block_hash: prev_block_hash.to_string(),
hash: get_hash_for_block(prev_block_hash, data, timestamp),
}
}
pub fn new_genesis_block() -> Self {
Block::new("Genesis Block", "")
}
}
|
#[derive(Debug)]
enum Never { }
fn f() -> Result<usize, Never> {
Ok(0)
}
fn main() {
println!("{}", f().unwrap());
}
|
mod error;
mod loading;
mod standard;
mod unsupported;
use std::cmp::{Ord, Ordering, PartialOrd};
use chrono::{DateTime, TimeZone};
use failure::Error;
use slack::api;
use models::{AppState, Canvas, ChannelID};
pub use self::error::ErrorMessage;
pub use self::loading::LoadingMessage;
pub use self::standard::StandardMessage;
pub use self::unsupported::UnsupportedMessage;
mod prelude {
pub use super::{HistoryEntry, Message, MessageID, MessageSideChannel};
pub use models::{AppState, Canvas, ChannelID};
}
#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub struct MessageID(String);
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum Message {
Standard(StandardMessage),
Unsupported(UnsupportedMessage),
Error(ErrorMessage),
}
pub trait HistoryEntry {
fn id(&self) -> &MessageID;
fn channel_id(&self) -> &ChannelID;
fn render_as_canvas(&self, state: &AppState, width: u16) -> Canvas;
fn into_message(self) -> Message;
}
#[derive(Debug, Default, Clone)]
pub struct MessageSideChannel {
pub channel_id: Option<ChannelID>,
}
fn unsupported(
id: &Option<String>,
channel: &Option<String>,
from: &Option<String>,
text: &Option<String>,
subtype: &Option<String>,
side_channel: &MessageSideChannel,
) -> Result<Option<Message>, Error> {
match UnsupportedMessage::from_slack_message(id, channel, from, text, subtype, side_channel) {
Ok(message) => Ok(Some(message.into_message())),
Err(error) => Err(error),
}
}
impl Message {
pub fn from_slack_message<'a, S>(
msg: &api::Message,
side_channel: S,
) -> Result<Option<Self>, Error>
where
S: Into<Option<&'a MessageSideChannel>>,
{
use self::api::Message as S;
let side_channel = side_channel.into().cloned().unwrap_or_default();
match *msg {
S::Standard(ref msg) => {
StandardMessage::from_slack(msg, &side_channel).map(|m| Some(m.into_message()))
}
// TODO: slack_api does not have the "channel" key for a lot of messages.
// Underlying cause: The https://github.com/slack-rs/slack-api-schemas repo does not
// know what do wo with `"channel": { ... }` in the samples for these messages.
S::BotMessage(_) => Ok(None),
S::ChannelArchive(_) => Ok(None),
S::ChannelJoin(_) => Ok(None),
S::ChannelLeave(_) => Ok(None),
S::ChannelName(_) => Ok(None),
S::ChannelPurpose(_) => Ok(None),
S::ChannelTopic(_) => Ok(None),
S::ChannelUnarchive(_) => Ok(None),
S::FileComment(_) => Ok(None),
S::FileMention(_) => Ok(None),
S::FileShare(_) => Ok(None),
S::GroupArchive(_) => Ok(None),
S::GroupJoin(_) => Ok(None),
S::GroupLeave(_) => Ok(None),
S::GroupName(_) => Ok(None),
S::GroupPurpose(_) => Ok(None),
S::GroupTopic(_) => Ok(None),
S::GroupUnarchive(_) => Ok(None),
S::MeMessage(_) => Ok(None),
S::MessageChanged(ref msg) => unsupported(
&msg.ts,
&msg.channel,
&msg.message.as_ref().and_then(|m| m.user.clone()),
&msg.message.as_ref().and_then(|c| c.text.clone()),
&msg.subtype,
&side_channel,
),
S::MessageDeleted(ref msg) => unsupported(
&msg.ts,
&msg.channel,
&Some(String::from("Message deleted")),
&Some(String::from("Message was deleted")),
&msg.subtype,
&side_channel,
),
S::MessageReplied(ref msg) => unsupported(
&msg.ts,
&msg.channel,
&msg.message.as_ref().and_then(|m| m.user.clone()),
&msg.message.as_ref().and_then(|c| c.text.clone()),
&msg.subtype,
&side_channel,
),
S::PinnedItem(ref msg) => unsupported(
&msg.ts,
&msg.channel,
&msg.user,
&msg.text,
&msg.subtype,
&side_channel,
),
S::ReplyBroadcast(ref msg) => unsupported(
&msg.ts,
&msg.channel,
&msg.user,
&Some(String::from("Message got a broadcasted reply")),
&msg.subtype,
&side_channel,
),
S::UnpinnedItem(ref msg) => unsupported(
&msg.ts,
&msg.channel,
&msg.user,
&msg.text,
&msg.subtype,
&side_channel,
),
}
}
}
impl HistoryEntry for Message {
fn id(&self) -> &MessageID {
use self::Message::*;
match *self {
Standard(ref msg) => msg.id(),
Unsupported(ref msg) => msg.id(),
Error(ref msg) => msg.id(),
}
}
fn channel_id(&self) -> &ChannelID {
use self::Message::*;
match *self {
Standard(ref msg) => msg.channel_id(),
Unsupported(ref msg) => msg.channel_id(),
Error(ref msg) => msg.channel_id(),
}
}
fn render_as_canvas(&self, state: &AppState, width: u16) -> Canvas {
use self::Message::*;
match *self {
Standard(ref msg) => msg.render_as_canvas(state, width),
Unsupported(ref msg) => msg.render_as_canvas(state, width),
Error(ref msg) => msg.render_as_canvas(state, width),
}
}
fn into_message(self) -> Message {
self
}
}
impl PartialOrd for Message {
fn partial_cmp(&self, rhs: &Message) -> Option<Ordering> {
self.id().partial_cmp(rhs.id())
}
}
impl Ord for Message {
fn cmp(&self, rhs: &Message) -> Ordering {
self.partial_cmp(rhs).unwrap()
}
}
impl MessageID {
pub fn as_str(&self) -> &str {
&self.0
}
pub fn as_string(&self) -> String {
self.0.clone()
}
}
impl From<String> for MessageID {
fn from(s: String) -> Self {
MessageID(s)
}
}
impl<'a> From<&'a str> for MessageID {
fn from(s: &'a str) -> Self {
MessageID(s.to_owned())
}
}
impl<Z: TimeZone> From<DateTime<Z>> for MessageID {
fn from(time: DateTime<Z>) -> Self {
MessageID(format!(
"{}.{:06}",
time.timestamp(),
time.timestamp_subsec_micros()
))
}
}
#[cfg(test)]
mod tests {
use super::*;
mod message_id {
use super::*;
#[test]
fn it_sorts_by_oldest_first() {
let older = "1403051575.000407".into();
let newer = "1403051575.000408".into();
let newest = "1403051575.000409".into();
let mut ids: Vec<&MessageID> = vec![&newest, &older, &newer];
ids.sort();
assert_eq!(&ids, &[&older, &newer, &newest]);
}
#[test]
fn it_is_constructed_from_microsecond_timestamps() {
use chrono::prelude::*;
let expected_id = "1403051575.000407";
let time = Utc.timestamp(1403051575, 407_000);
let id: MessageID = time.clone().into();
assert_eq!(&id.0, expected_id);
}
}
}
|
#![ doc = include_str!("../README.md")]
#[macro_use]
extern crate clap;
mod app;
mod error;
mod project_config;
mod tmux;
use app::{actions, cli};
use error::AppErrorForDisplay;
fn main() -> Result<(), AppErrorForDisplay> {
let matches = cli::get_cli_command_parser().get_matches();
if let Some(project_name) = matches.get_one::<String>("project") {
return actions::run_project(project_name).map_err(|e| e.into());
}
match matches.subcommand() {
Some(("list", _)) => actions::list_projects(),
Some(("doctor", _)) => actions::check_config(),
Some(("debug", debug_matches)) => {
actions::debug_project(debug_matches.get_one::<String>("project").unwrap())
}
Some(("run", run_matches)) => {
actions::run_project(run_matches.get_one::<String>("project").unwrap())
}
Some(("edit", edit_matches)) => {
actions::edit_project(edit_matches.get_one::<String>("project").unwrap())
}
Some(("delete", delete_matches)) => {
actions::delete_project(delete_matches.get_one::<String>("project").unwrap())
}
Some(("new", new_matches)) => actions::new_project(
new_matches.get_one::<String>("project").unwrap(),
*new_matches.get_one::<bool>("blank").unwrap(),
),
Some(("copy", copy_matches)) => actions::copy_project(
copy_matches.get_one::<String>("existing").unwrap(),
copy_matches.get_one::<String>("new").unwrap(),
),
Some(("stop", stop_matches)) => {
actions::stop(stop_matches.get_one::<String>("project").unwrap())
}
_ => unreachable!(),
}
.map_err(|e| e.into())
}
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test checks for namespace pollution by private tests.
// Tests used to marked as public causing name conflicts with normal
// functions only in test builds.
// compile-flags: --test
mod a {
pub fn foo() -> bool {
true
}
}
mod b {
#[test]
fn foo() {
local_name(); // ensure the local name still works
}
#[test]
fn local_name() {}
}
use a::*;
use b::*;
pub fn conflict() {
let _: bool = foo();
}
|
// Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BTreeMap;
use std::hash::Hash;
use common_ast::ast::Query;
use common_ast::ast::TableAlias;
use common_catalog::plan::InternalColumn;
use common_exception::ErrorCode;
use common_exception::Result;
use common_exception::Span;
use common_expression::types::DataType;
use common_expression::ColumnId;
use common_expression::DataField;
use common_expression::DataSchemaRef;
use common_expression::DataSchemaRefExt;
use dashmap::DashMap;
use super::AggregateInfo;
use super::INTERNAL_COLUMN_FACTORY;
use crate::binder::window::WindowInfo;
use crate::normalize_identifier;
use crate::optimizer::SExpr;
use crate::plans::ScalarExpr;
use crate::ColumnSet;
use crate::IndexType;
use crate::MetadataRef;
use crate::NameResolutionContext;
/// Context of current expression, this is used to check if
/// the expression is valid in current context.
#[derive(Debug, Clone, Default)]
pub enum ExprContext {
SelectClause,
WhereClause,
HavingClause,
OrderByClause,
LimitClause,
InSetReturningFunction,
InAggregateFunction,
#[default]
Unknown,
}
#[derive(Clone, Debug, Eq, PartialEq, serde::Deserialize, serde::Serialize)]
pub enum Visibility {
// Default for a column
Visible,
// Inner column of struct
InVisible,
// Consider the sql: `select * from t join t1 using(a)`.
// The result should only contain one `a` column.
// So we need make `t.a` or `t1.a` invisible in unqualified
UnqualifiedWildcardInVisible,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct ColumnBinding {
/// Database name of this `ColumnBinding` in current context
pub database_name: Option<String>,
/// Table name of this `ColumnBinding` in current context
pub table_name: Option<String>,
/// Column name of this `ColumnBinding` in current context
pub column_name: String,
/// Column index of ColumnBinding
pub index: IndexType,
pub data_type: Box<DataType>,
pub visibility: Visibility,
}
impl PartialEq for ColumnBinding {
fn eq(&self, other: &Self) -> bool {
self.index == other.index
}
}
impl Eq for ColumnBinding {}
impl Hash for ColumnBinding {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.index.hash(state);
}
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct InternalColumnBinding {
/// Database name of this `InternalColumnBinding` in current context
pub database_name: Option<String>,
/// Table name of this `InternalColumnBinding` in current context
pub table_name: Option<String>,
/// Column index of InternalColumnBinding
pub index: IndexType,
pub internal_column: InternalColumn,
}
impl PartialEq for InternalColumnBinding {
fn eq(&self, other: &Self) -> bool {
self.index == other.index
}
}
impl Eq for InternalColumnBinding {}
impl Hash for InternalColumnBinding {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.index.hash(state);
}
}
#[derive(Debug, Clone)]
pub enum NameResolutionResult {
Column(ColumnBinding),
InternalColumn(InternalColumnBinding),
Alias { alias: String, scalar: ScalarExpr },
}
/// `BindContext` stores all the free variables in a query and tracks the context of binding procedure.
#[derive(Clone, Debug)]
pub struct BindContext {
pub parent: Option<Box<BindContext>>,
pub columns: Vec<ColumnBinding>,
// map internal column id to (table_index, column_index)
pub bound_internal_columns: BTreeMap<ColumnId, (IndexType, IndexType)>,
pub aggregate_info: AggregateInfo,
pub windows: Vec<WindowInfo>,
/// True if there is aggregation in current context, which means
/// non-grouping columns cannot be referenced outside aggregation
/// functions, otherwise a grouping error will be raised.
pub in_grouping: bool,
pub ctes_map: Box<DashMap<String, CteInfo>>,
/// If current binding table is a view, record its database and name.
///
/// It's used to check if the view has a loop dependency.
pub view_info: Option<(String, String)>,
/// Set-returning functions in current context.
/// The key is the `Expr::to_string` of the function.
pub srfs: DashMap<String, ScalarExpr>,
pub expr_context: ExprContext,
}
#[derive(Clone, Debug)]
pub struct CteInfo {
pub columns_alias: Vec<String>,
pub query: Query,
}
impl BindContext {
pub fn new() -> Self {
Self {
parent: None,
columns: Vec::new(),
bound_internal_columns: BTreeMap::new(),
aggregate_info: AggregateInfo::default(),
windows: Vec::new(),
in_grouping: false,
ctes_map: Box::new(DashMap::new()),
view_info: None,
srfs: DashMap::new(),
expr_context: ExprContext::default(),
}
}
pub fn with_parent(parent: Box<BindContext>) -> Self {
BindContext {
parent: Some(parent.clone()),
columns: vec![],
bound_internal_columns: BTreeMap::new(),
aggregate_info: Default::default(),
windows: vec![],
in_grouping: false,
ctes_map: parent.ctes_map.clone(),
view_info: None,
srfs: DashMap::new(),
expr_context: ExprContext::default(),
}
}
/// Create a new BindContext with self's parent as its parent
pub fn replace(&self) -> Self {
let mut bind_context = BindContext::new();
bind_context.parent = self.parent.clone();
bind_context.ctes_map = self.ctes_map.clone();
bind_context
}
/// Generate a new BindContext and take current BindContext as its parent.
pub fn push(self) -> Self {
Self::with_parent(Box::new(self))
}
/// Returns all column bindings in current scope.
pub fn all_column_bindings(&self) -> &[ColumnBinding] {
&self.columns
}
pub fn add_column_binding(&mut self, column_binding: ColumnBinding) {
self.columns.push(column_binding);
}
/// Apply table alias like `SELECT * FROM t AS t1(a, b, c)`.
/// This method will rename column bindings according to table alias.
pub fn apply_table_alias(
&mut self,
alias: &TableAlias,
name_resolution_ctx: &NameResolutionContext,
) -> Result<()> {
for column in self.columns.iter_mut() {
column.database_name = None;
column.table_name = Some(normalize_identifier(&alias.name, name_resolution_ctx).name);
}
if alias.columns.len() > self.columns.len() {
return Err(ErrorCode::SemanticError(format!(
"table has {} columns available but {} columns specified",
self.columns.len(),
alias.columns.len()
)));
}
for (index, column_name) in alias
.columns
.iter()
.map(|ident| normalize_identifier(ident, name_resolution_ctx).name)
.enumerate()
{
self.columns[index].column_name = column_name;
}
Ok(())
}
/// Try to find a column binding with given table name and column name.
/// This method will return error if the given names are ambiguous or invalid.
pub fn resolve_name(
&self,
database: Option<&str>,
table: Option<&str>,
column: &str,
span: Span,
available_aliases: &[(String, ScalarExpr)],
) -> Result<NameResolutionResult> {
let mut result = vec![];
let mut bind_context: &BindContext = self;
// Lookup parent context to resolve outer reference.
loop {
// TODO(leiysky): use `Identifier` for alias instead of raw string
for (alias, scalar) in available_aliases {
if database.is_none() && table.is_none() && column == alias {
result.push(NameResolutionResult::Alias {
alias: alias.clone(),
scalar: scalar.clone(),
});
}
}
// We will lookup alias first. If there are matched aliases, we will skip
// looking up `BindContext` to avoid ambiguity.
if !result.is_empty() {
break;
}
for column_binding in bind_context.columns.iter() {
if Self::match_column_binding(database, table, column, column_binding) {
result.push(NameResolutionResult::Column(column_binding.clone()));
}
}
if !result.is_empty() {
break;
}
// look up internal column
if let Some(internal_column) = INTERNAL_COLUMN_FACTORY.get_internal_column(column) {
let column_binding = InternalColumnBinding {
database_name: database.map(|n| n.to_owned()),
table_name: table.map(|n| n.to_owned()),
index: bind_context.columns.len(),
internal_column,
};
result.push(NameResolutionResult::InternalColumn(column_binding));
break;
}
if let Some(ref parent) = bind_context.parent {
bind_context = parent;
} else {
break;
}
}
if result.is_empty() {
Err(ErrorCode::SemanticError(format!("column {column} doesn't exist")).set_span(span))
} else if result.len() > 1 {
Err(ErrorCode::SemanticError(format!(
"column {column} reference is ambiguous, got {result:?}"
))
.set_span(span))
} else {
Ok(result.remove(0))
}
}
pub fn match_column_binding(
database: Option<&str>,
table: Option<&str>,
column: &str,
column_binding: &ColumnBinding,
) -> bool {
match (
(database, column_binding.database_name.as_ref()),
(table, column_binding.table_name.as_ref()),
) {
// No qualified table name specified
((None, _), (None, None)) | ((None, _), (None, Some(_)))
if column == column_binding.column_name =>
{
column_binding.visibility != Visibility::UnqualifiedWildcardInVisible
}
// Qualified column reference without database name
((None, _), (Some(table), Some(table_name)))
if table == table_name && column == column_binding.column_name =>
{
true
}
// Qualified column reference with database name
((Some(db), Some(db_name)), (Some(table), Some(table_name)))
if db == db_name && table == table_name && column == column_binding.column_name =>
{
true
}
_ => false,
}
}
/// Get result columns of current context in order.
/// For example, a query `SELECT b, a AS b FROM t` has `[(index_of(b), "b"), index_of(a), "b"]` as
/// its result columns.
///
/// This method is used to retrieve the physical representation of result set of
/// a query.
pub fn result_columns(&self) -> Vec<(IndexType, String)> {
self.columns
.iter()
.map(|col| (col.index, col.column_name.clone()))
.collect()
}
/// Return data scheme.
pub fn output_schema(&self) -> DataSchemaRef {
let fields = self
.columns
.iter()
.map(|column_binding| {
DataField::new(
&column_binding.column_name,
*column_binding.data_type.clone(),
)
})
.collect();
DataSchemaRefExt::create(fields)
}
fn get_internal_column_table_index(
column_binding: &InternalColumnBinding,
metadata: MetadataRef,
) -> (IndexType, Option<String>, Option<String>) {
let metadata = metadata.read();
let (database_name, table_name) =
match (&column_binding.database_name, &column_binding.table_name) {
(Some(database_name), Some(table_name)) => {
(Some(database_name.clone()), Some(table_name.clone()))
}
(None, Some(table_name)) => (None, Some(table_name.clone())),
(database_name, None) => {
// If table_name is None, assert that metadata.tables has only one table
debug_assert!(metadata.tables().len() == 1);
return (metadata.table(0).index(), database_name.clone(), None);
}
};
(
metadata
.get_table_index(
database_name.as_deref(),
table_name.as_ref().unwrap().as_str(),
)
.unwrap(),
database_name,
table_name,
)
}
// Add internal column binding into `BindContext`
// Convert `InternalColumnBinding` to `ColumnBinding`
pub fn add_internal_column_binding(
&mut self,
column_binding: &InternalColumnBinding,
metadata: MetadataRef,
) {
let column_id = column_binding.internal_column.column_id();
if let std::collections::btree_map::Entry::Vacant(e) =
self.bound_internal_columns.entry(column_id)
{
// New added internal column MUST at the end of `columns` array.
debug_assert_eq!(column_binding.index, self.columns.len());
let (table_index, database_name, table_name) =
BindContext::get_internal_column_table_index(column_binding, metadata.clone());
let mut metadata = metadata.write();
metadata.add_internal_column(table_index, column_binding.internal_column.clone());
self.columns.push(ColumnBinding {
database_name,
table_name,
column_name: column_binding.internal_column.column_name().clone(),
index: column_binding.index,
data_type: Box::new(column_binding.internal_column.data_type()),
visibility: Visibility::Visible,
});
e.insert((table_index, column_binding.index));
}
}
pub fn add_internal_column_into_expr(&self, s_expr: SExpr) -> SExpr {
let bound_internal_columns = &self.bound_internal_columns;
let mut s_expr = s_expr;
for (table_index, column_index) in bound_internal_columns.values() {
s_expr = SExpr::add_internal_column_index(&s_expr, *table_index, *column_index);
}
s_expr
}
pub fn column_set(&self) -> ColumnSet {
self.columns.iter().map(|c| c.index).collect()
}
pub fn set_expr_context(&mut self, expr_context: ExprContext) {
self.expr_context = expr_context;
}
}
impl Default for BindContext {
fn default() -> Self {
BindContext::new()
}
}
|
use std::fs::File;
use std::io::Write;
use std::vec::Vec;
use getopts::Options;
mod sdf;
use sdf::SDF;
fn main() {
let args: Vec<String> = std::env::args().collect();
let program_name = args[0].clone();
if args.len() == 1 {
println!("Error: \nUsage: cargo run input.png output.png");
return;
}
let mut opts = Options::new();
let parsed_opts = match opts.parse(&args[1..]) {
Ok(v) => v,
Err(e) => panic!(e.to_string()),
};
let input_image_path = &parsed_opts.free[0];
let output_image_path = &parsed_opts.free[1];
let mut sdf = SDF::new(input_image_path, output_image_path);
sdf.generate();
}
|
use liblumen_alloc::erts::exception::InternalResult;
use liblumen_alloc::erts::term::prelude::*;
use liblumen_alloc::erts::Process;
use super::u32;
use crate::distribution::external_term_format::try_split_at;
pub fn decode<'a>(process: &Process, bytes: &'a [u8]) -> InternalResult<(Term, &'a [u8])> {
let (len_u32, after_len_bytes) = u32::decode(bytes)?;
let len_usize = len_u32 as usize;
try_split_at(after_len_bytes, len_usize).map(|(data_bytes, after_data_bytes)| {
let binary_term = process.binary_from_bytes(data_bytes);
(binary_term, after_data_bytes)
})
}
|
use std::borrow::Cow;
use actix_web::{App, Error, HttpServer, Result, dev::{self, ServiceFactory}, web};
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
use reqwest_tracing::TracingMiddleware;
mod api;
mod config;
mod pokemon;
mod translations;
#[actix_web::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Parse the app config
let config = config::parse()?;
// Initialise stdout logging
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.init();
// Create a new reqwest client with logging
let client = reqwest::Client::builder().build()?;
let client = ClientBuilder::new(client).with(TracingMiddleware).build();
// Create a http server and await the future
Ok(
HttpServer::new(move || new_service(client.clone(), &APP_CONFIG))
.bind(("0.0.0.0", config.port))?
.run()
.await?,
)
}
/// Default [`AppConfig`] with the production api endpoints configured
pub static APP_CONFIG: AppConfig = AppConfig {
pokemon_url: Cow::Borrowed("https://pokeapi.co"),
translations_url: Cow::Borrowed("https://api.funtranslations.com"),
};
#[derive(Clone)]
pub struct AppConfig {
pokemon_url: Cow<'static, str>,
translations_url: Cow<'static, str>,
}
/// Create a new actix_web App Service.
/// Configuring it in a function allows for easy access to the app service for testing
pub fn new_service(
client: ClientWithMiddleware,
api_config: &AppConfig,
) -> App<
impl ServiceFactory<
dev::ServiceRequest,
Config = (),
Response = dev::ServiceResponse<dev::AnyBody>,
Error = Error,
InitError = (),
>,
dev::AnyBody,
> {
App::new()
.app_data(web::Data::new(client))
.external_resource(
"pokemon_species",
api_config.pokemon_url.to_string() + "/api/v2/pokemon-species/{pokemon_name}/",
)
.external_resource(
"translations",
api_config.translations_url.to_string() + "/translate/{translation}",
)
.service(api::get_pokemon)
.service(api::get_pokemon_translated)
}
#[cfg(test)]
mod tests;
|
use std::net::{TcpStream, Shutdown};
use std::io::{Write};
use std::io::{self};
fn main() {
match TcpStream::connect("localhost:3333") {
Ok(mut stream) => {
println!("Successfully connected to server in port 3333");
loop {
let mut buffer = String::new();
io::stdin().read_line(&mut buffer).expect("Failed to read from stdin");
let message = buffer.trim().to_string();
stream.write(buffer.as_bytes()).unwrap();
// if input is exit from stdin, shutdown write channel and break
if message == "exit" {
//stream.shutdown(Shutdown::Both).unwrap();
stream.shutdown(Shutdown::Write).unwrap();
break;
}
}
},
Err(e) => {
println!("Failed to connect: {}", e);
}
}
println!("Terminated.");
} |
//! Counter counts recurrent elements of iterables. It is based on [the Python
//! implementation](https://docs.python.org/3/library/collections.html#collections.Counter).
//!
//! The struct [`Counter`](struct.Counter.html) is the entry-point type for this module.
//!
//! # Math Underpinnings
//!
//! Mathematically, a `Counter` implements a hash-based version of a [multiset],
//! or bag. This is simply an extension of the notion of a set to the idea that
//! we care not only about whether an entity exists within the set, but the number
//! of occurrences within the set. Normal set operations such as intersection,
//! union, etc. are of course still supported.
//!
//! [multiset]: https://en.wikipedia.org/wiki/Set_(abstract_data_type)#Multiset
//!
//! # Examples
//!
//! ## Just count an iterable
//!
//! ```rust
//! use counter::Counter;
//! let char_counts = "barefoot".chars().collect::<Counter<_>>();
//! let counts_counts = char_counts.values().collect::<Counter<_>>();
//! ```
//!
//! ## Update a count
//!
//! ```rust
//! # use counter::Counter;
//! let mut counts = "aaa".chars().collect::<Counter<_>>();
//! counts[&'a'] += 1;
//! counts[&'b'] += 1;
//! ```
//!
//! ```rust
//! # use counter::Counter;
//! let mut counts = "able babble table babble rabble table able fable scrabble"
//! .split_whitespace().collect::<Counter<_>>();
//! // add or subtract an iterable of the same type
//! counts += "cain and abel fable table cable".split_whitespace();
//! // or add or subtract from another Counter of the same type
//! let other_counts = "scrabble cabbie fable babble"
//! .split_whitespace().collect::<Counter<_>>();
//! let difference = counts - other_counts;
//! ```
//!
//! ## Extend a `Counter` with another `Counter`:
//! ```rust
//! # use counter::Counter;
//! # use std::collections::HashMap;
//! let mut counter = "abbccc".chars().collect::<Counter<_>>();
//! let another = "bccddd".chars().collect::<Counter<_>>();
//! counter.extend(&another);
//! let expect = [('a', 1), ('b', 3), ('c', 5), ('d', 3)].iter()
//! .cloned().collect::<HashMap<_, _>>();
//! assert_eq!(counter.into_map(), expect);
//! ```
//! ## Get items with keys
//!
//! ```rust
//! # use counter::Counter;
//! let counts = "aaa".chars().collect::<Counter<_>>();
//! assert_eq!(counts[&'a'], 3);
//! assert_eq!(counts[&'b'], 0);
//! ```
//!
//! ## Get the most common items
//!
//! [`most_common_ordered()`] uses the natural ordering of keys which are [`Ord`].
//!
//! [`most_common_ordered()`]: Counter::most_common_ordered
//! [`Ord`]: https://doc.rust-lang.org/stable/std/cmp/trait.Ord.html
//!
//! ```rust
//! # use counter::Counter;
//! let by_common = "eaddbbccc".chars().collect::<Counter<_>>().most_common_ordered();
//! let expected = vec![('c', 3), ('b', 2), ('d', 2), ('a', 1), ('e', 1)];
//! assert!(by_common == expected);
//! ```
//!
//! [`k_most_common_ordered()`] takes an argument `k` of type `usize` and returns the top `k` most
//! common items. This is functionally equivalent to calling `most_common_ordered()` and then
//! truncating the result to length `k`. However, if `k` is smaller than the length of the counter
//! then `k_most_common_ordered()` can be more efficient, often much more so.
//!
//! ```rust
//! # use counter::Counter;
//! let by_common = "eaddbbccc".chars().collect::<Counter<_>>().k_most_common_ordered(2);
//! let expected = vec![('c', 3), ('b', 2)];
//! assert!(by_common == expected);
//! ```
//!
//! [`k_most_common_ordered()`]: Counter::k_most_common_ordered
//! [`most_common_ordered()`]: Counter::most_common_ordered
//!
//! ## Get the most common items using your own ordering
//!
//! For example, here we break ties reverse alphabetically.
//!
//! ```rust
//! # use counter::Counter;
//! let counter = "eaddbbccc".chars().collect::<Counter<_>>();
//! let by_common = counter.most_common_tiebreaker(|&a, &b| b.cmp(&a));
//! let expected = vec![('c', 3), ('d', 2), ('b', 2), ('e', 1), ('a', 1)];
//! assert!(by_common == expected);
//! ```
//!
//! ## Test counters against another
//!
//! Counters are multi-sets and so can be sub- or supersets of each other.
//!
//! A counter is a _subset_ of another if for all its elements, the other
//! counter has an equal or higher count. Test for this with [`is_subset()`]:
//!
//! ```rust
//! # use counter::Counter;
//! let counter = "aaabb".chars().collect::<Counter<_>>();
//! let superset = "aaabbbc".chars().collect::<Counter<_>>();
//! let not_a_superset = "aaae".chars().collect::<Counter<_>>();
//! assert!(counter.is_subset(&superset));
//! assert!(!counter.is_subset(¬_a_superset));
//! ```
//!
//! Testing for a _superset_ is the inverse, [`is_superset()`] is true if the counter can contain another counter in its entirety:
//!
//! ```rust
//! # use counter::Counter;
//! let counter = "aaabbbc".chars().collect::<Counter<_>>();
//! let subset = "aabbb".chars().collect::<Counter<_>>();
//! let not_a_subset = "aaae".chars().collect::<Counter<_>>();
//! assert!(counter.is_superset(&subset));
//! assert!(!counter.is_superset(¬_a_subset));
//! ```
//!
//! These relationships continue to work when [using a _signed_ integer type for the counter][signed]: all values in the subset must be equal or lower to the values in the superset. Negative
//! values are interpreted as 'missing' those values, and the subset would need to miss those
//! same elements, or be short more, to still be a subset:
//!
//! ```rust
//! # use counter::Counter;
//! let mut subset = "aaabb".chars().collect::<Counter<_, i8>>();
//! subset.insert('e', -2); // short 2 'e's
//! subset.insert('f', -1); // and 1 'f'
//! let mut superset = "aaaabbb".chars().collect::<Counter<_, i8>>();
//! superset.insert('e', -1); // short 1 'e'
//! assert!(subset.is_subset(&superset));
//! assert!(superset.is_superset(&subset));
//! ```
//!
//! [`is_subset()`]: Counter::is_subset
//! [`is_superset()`]: Counter::is_superset
//! [signed]: #use-your-own-type-for-the-count
//!
//! ## Counter intersection and union
//!
//! You can intersect two counters, giving you the minimal counts of their
//! combined elements using the [`&` bitwise and operator][BitAnd], and produce
//! their union with the maximum counts using [`|` bitwise or][BitOr]:
//!
//! ```rust
//! # use counter::Counter;
//! let a = "aaabb".chars().collect::<Counter<_>>();
//! let b = "aabbbbe".chars().collect::<Counter<_>>();
//!
//! let intersection = a & b;
//! let expected_intersection = "aabb".chars().collect::<Counter<_>>();
//! assert_eq!(intersection, expected_intersection);
//!
//! let c = "aaabb".chars().collect::<Counter<_>>();
//! let d = "aabbbbe".chars().collect::<Counter<_>>();
//!
//! let union = c | d;
//! let expected_union = "aaabbbbe".chars().collect::<Counter<_>>();
//! assert_eq!(union, expected_union)
//! ```
//!
//! The in-place [`&=`] and [`|=`] operations are also supported.
//!
//! [BitAnd]: https://doc.rust-lang.org/std/ops/trait.BitAnd.html
//! [BitOr]: https://doc.rust-lang.org/std/ops/trait.BitOr.html
//! [`&=`]: https://doc.rust-lang.org/std/ops/trait.BitAndAssign.html
//! [`|=`]: https://doc.rust-lang.org/std/ops/trait.BitOrAssign.html
//!
//! ## Treat it like a `HashMap`
//!
//! `Counter<T, N>` implements [`Deref`]`<Target=HashMap<T, N>>` and
//! [`DerefMut`]`<Target=HashMap<T, N>>`, which means that you can perform any operations
//! on it which are valid for a [`HashMap`].
//!
//! [`HashMap`]: https://doc.rust-lang.org/std/collections/struct.HashMap.html
//! [`Deref`]: https://doc.rust-lang.org/stable/std/ops/trait.Deref.html
//! [`DerefMut`]: https://doc.rust-lang.org/stable/std/ops/trait.DerefMut.html
//!
//! ```rust
//! # use counter::Counter;
//! let mut counter = "aa-bb-cc".chars().collect::<Counter<_>>();
//! counter.remove(&'-');
//! assert!(counter == "aabbcc".chars().collect::<Counter<_>>());
//! ```
//!
//! Note that `Counter<T, N>` itself implements [`Index`]. `Counter::index` returns a reference to
//! a [`Zero::zero`] value for missing keys.
//!
//! [`Index`]: https://doc.rust-lang.org/stable/std/ops/trait.Index.html
//! [`Zero::zero`]: https://docs.rs/num-traits/latest/num_traits/identities/trait.Zero.html#tymethod.zero
//!
//! ```rust
//! # use counter::Counter;
//! let counter = "aaa".chars().collect::<Counter<_>>();
//! assert_eq!(counter[&'b'], 0);
//! // panics
//! // assert_eq!((*counter)[&'b'], 0);
//! ```
//!
//! # Advanced Usage
//!
//! ## Count any iterable which is `Hash + Eq`
//!
//! You can't use the `most_common*` functions unless `T` is also [`Clone`], but simple counting
//! works fine on a minimal data type.
//!
//! [`Clone`]: https://doc.rust-lang.org/stable/std/clone/trait.Clone.html
//!
//! ```rust
//! # use counter::Counter;
//! #[derive(Debug, Hash, PartialEq, Eq)]
//! struct Inty {
//! i: usize,
//! }
//!
//! impl Inty {
//! pub fn new(i: usize) -> Inty {
//! Inty { i: i }
//! }
//! }
//!
//! // <https://en.wikipedia.org/wiki/867-5309/Jenny>
//! let intys = vec![
//! Inty::new(8),
//! Inty::new(0),
//! Inty::new(0),
//! Inty::new(8),
//! Inty::new(6),
//! Inty::new(7),
//! Inty::new(5),
//! Inty::new(3),
//! Inty::new(0),
//! Inty::new(9),
//! ];
//!
//! let inty_counts = intys.iter().collect::<Counter<_>>();
//! println!("{:?}", inty_counts);
//! // {Inty { i: 8 }: 2, Inty { i: 0 }: 3, Inty { i: 9 }: 1, Inty { i: 3 }: 1,
//! // Inty { i: 7 }: 1, Inty { i: 6 }: 1, Inty { i: 5 }: 1}
//! assert!(inty_counts.get(&Inty { i: 8 }) == Some(&2));
//! assert!(inty_counts.get(&Inty { i: 0 }) == Some(&3));
//! assert!(inty_counts.get(&Inty { i: 6 }) == Some(&1));
//! ```
//!
//! ## Use your own type for the count
//!
//! Sometimes [`usize`] just isn't enough. If you find yourself overflowing your
//! machine's native size, you can use your own type. Here, we use an [`i8`], but
//! you can use most numeric types, including bignums, as necessary.
//!
//! [`usize`]: https://doc.rust-lang.org/stable/std/primitive.usize.html
//! [`i8`]: https://doc.rust-lang.org/stable/std/primitive.i8.html
//!
//! ```rust
//! # use counter::Counter;
//! # use std::collections::HashMap;
//! let counter: Counter<_, i8> = "abbccc".chars().collect();
//! let expected: HashMap<char, i8> = [('a', 1), ('b', 2), ('c', 3)].iter().cloned().collect();
//! assert!(counter.into_map() == expected);
//! ```
mod impls;
use num_traits::{One, Zero};
use std::collections::{BinaryHeap, HashMap};
use std::hash::Hash;
use std::iter;
use std::ops::{AddAssign, SubAssign};
#[cfg(test)]
mod unit_tests;
type CounterMap<T, N> = HashMap<T, N>;
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Counter<T: Hash + Eq, N = usize> {
map: CounterMap<T, N>,
// necessary for `Index::index` since we cannot declare generic `static` variables.
zero: N,
}
impl<T, N> Counter<T, N>
where
T: Hash + Eq,
{
/// Consumes this counter and returns a [`HashMap`] mapping the items to the counts.
///
/// [`HashMap`]: https://doc.rust-lang.org/stable/std/collections/struct.HashMap.html
pub fn into_map(self) -> HashMap<T, N> {
self.map
}
/// Returns the sum of the counts.
///
/// Use [`len`] to get the number of elements in the counter and use `total` to get the sum of
/// their counts.
///
/// [`len`]: struct.Counter.html#method.len
///
/// # Examples
///
/// ```
/// # use counter::Counter;
/// let counter = Counter::init("abracadabra".chars());
/// assert_eq!(counter.total::<usize>(), 11);
/// assert_eq!(counter.len(), 5);
/// ```
pub fn total<'a, S>(&'a self) -> S
where
S: iter::Sum<&'a N>,
{
self.map.values().sum()
}
}
impl<T, N> Counter<T, N>
where
T: Hash + Eq,
N: AddAssign + Zero + One,
{
/// Add the counts of the elements from the given iterable to this counter.
pub fn update<I>(&mut self, iterable: I)
where
I: IntoIterator<Item = T>,
{
for item in iterable {
let entry = self.map.entry(item).or_insert_with(N::zero);
*entry += N::one();
}
}
}
impl<T, N> Counter<T, N>
where
T: Hash + Eq,
N: PartialOrd + SubAssign + Zero + One,
{
/// Remove the counts of the elements from the given iterable to this counter.
///
/// Non-positive counts are automatically removed.
///
/// ```rust
/// # use counter::Counter;
/// # use std::collections::HashMap;
/// let mut counter = "abbccc".chars().collect::<Counter<_>>();
/// counter.subtract("abba".chars());
/// let expect = [('c', 3)].iter().cloned().collect::<HashMap<_, _>>();
/// assert_eq!(counter.into_map(), expect);
/// ```
pub fn subtract<I>(&mut self, iterable: I)
where
I: IntoIterator<Item = T>,
{
for item in iterable {
let mut remove = false;
if let Some(entry) = self.map.get_mut(&item) {
if *entry > N::zero() {
*entry -= N::one();
}
remove = *entry == N::zero();
}
if remove {
self.map.remove(&item);
}
}
}
}
impl<T, N> Counter<T, N>
where
T: Hash + Eq + Clone,
N: Clone + Ord,
{
/// Create a vector of `(elem, frequency)` pairs, sorted most to least common.
///
/// ```rust
/// # use counter::Counter;
/// let mc = "pappaopolo".chars().collect::<Counter<_>>().most_common();
/// let expected = vec![('p', 4), ('o', 3), ('a', 2), ('l', 1)];
/// assert_eq!(mc, expected);
/// ```
///
/// Note that the ordering of duplicates is unstable.
pub fn most_common(&self) -> Vec<(T, N)> {
use std::cmp::Ordering;
self.most_common_tiebreaker(|_a, _b| Ordering::Equal)
}
/// Create a vector of `(elem, frequency)` pairs, sorted most to least common.
///
/// In the event that two keys have an equal frequency, use the supplied ordering function
/// to further arrange the results.
///
/// For example, we can sort reverse-alphabetically:
///
/// ```rust
/// # use counter::Counter;
/// let counter = "eaddbbccc".chars().collect::<Counter<_>>();
/// let by_common = counter.most_common_tiebreaker(|&a, &b| b.cmp(&a));
/// let expected = vec![('c', 3), ('d', 2), ('b', 2), ('e', 1), ('a', 1)];
/// assert_eq!(by_common, expected);
/// ```
pub fn most_common_tiebreaker<F>(&self, mut tiebreaker: F) -> Vec<(T, N)>
where
F: FnMut(&T, &T) -> ::std::cmp::Ordering,
{
let mut items = self
.map
.iter()
.map(|(key, count)| (key.clone(), count.clone()))
.collect::<Vec<_>>();
items.sort_unstable_by(|(a_item, a_count), (b_item, b_count)| {
b_count
.cmp(a_count)
.then_with(|| tiebreaker(a_item, b_item))
});
items
}
}
impl<T, N> Counter<T, N>
where
T: Hash + Eq + Clone + Ord,
N: Clone + Ord,
{
/// Create a vector of `(elem, frequency)` pairs, sorted most to least common.
///
/// In the event that two keys have an equal frequency, use the natural ordering of the keys
/// to further sort the results.
///
/// # Examples
///
/// ```rust
/// # use counter::Counter;
/// let mc = "abracadabra".chars().collect::<Counter<_>>().most_common_ordered();
/// let expect = vec![('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)];
/// assert_eq!(mc, expect);
/// ```
///
/// # Time complexity
///
/// *O*(*n* \* log *n*), where *n* is the number of items in the counter. If all you want is
/// the top *k* items and *k* < *n* then it can be more efficient to use
/// [`k_most_common_ordered`].
///
/// [`k_most_common_ordered`]: Counter::k_most_common_ordered
pub fn most_common_ordered(&self) -> Vec<(T, N)> {
self.most_common_tiebreaker(Ord::cmp)
}
/// Returns the `k` most common items in decreasing order of their counts.
///
/// The returned vector is the same as would be obtained by calling `most_common_ordered` and
/// then truncating the result to length `k`. In particular, items with the same count are
/// sorted in *increasing* order of their keys. Further, if `k` is greater than the length of
/// the counter then the returned vector will have length equal to that of the counter, not
/// `k`.
///
/// # Examples
///
/// ```rust
/// # use counter::Counter;
/// let counter: Counter<_> = "abracadabra".chars().collect();
/// let top3 = counter.k_most_common_ordered(3);
/// assert_eq!(top3, vec![('a', 5), ('b', 2), ('r', 2)]);
/// ```
///
/// # Time complexity
///
/// This method can be much more efficient than [`most_common_ordered`] when *k* is much
/// smaller than the length of the counter *n*. When *k* = 1 the algorithm is equivalent
/// to finding the minimum (or maximum) of *n* items, which requires *n* \- 1 comparisons. For
/// a fixed value of *k* > 1, the number of comparisons scales with *n* as *n* \+ *O*(log *n*)
/// and the number of swaps scales as *O*(log *n*). As *k* approaches *n*, this algorithm
/// approaches a heapsort of the *n* items, which has complexity *O*(*n* \* log *n*).
///
/// For values of *k* close to *n* the sorting algorithm used by [`most_common_ordered`] will
/// generally be faster than the heapsort used by this method by a small constant factor.
/// Exactly where the crossover point occurs will depend on several factors. For small *k*
/// choose this method. If *k* is a substantial fraction of *n*, it may be that
/// [`most_common_ordered`] is faster. If performance matters in your application then it may
/// be worth experimenting to see which of the two methods is faster.
///
/// [`most_common_ordered`]: Counter::most_common_ordered
pub fn k_most_common_ordered(&self, k: usize) -> Vec<(T, N)> {
use std::cmp::Reverse;
if k == 0 {
return vec![];
}
// The quicksort implementation used by `most_common_ordered()` is generally faster than
// the heapsort used below when sorting the entire counter.
if k >= self.map.len() {
return self.most_common_ordered();
}
// Clone the counts as we iterate over the map to eliminate an extra indirection when
// comparing counts. This will be an improvement in the typical case where `N: Copy`.
// Defer cloning the keys until we have selected the top `k` items so that we clone only
// `k` keys instead of all of them.
let mut items = self.map.iter().map(|(t, n)| (Reverse(n.clone()), t));
// Step 1. Make a heap out of the first `k` items; this makes O(k) comparisons.
let mut heap: BinaryHeap<_> = items.by_ref().take(k).collect();
// Step 2. Successively compare each of the remaining `n - k` items to the top of the heap,
// replacing the root (and subsequently sifting down) whenever the item is less than the
// root. This takes at most n - k + k * (1 + log2(k)) * (H(n) - H(k)) comparisons, where
// H(i) is the ith [harmonic number](https://en.wikipedia.org/wiki/Harmonic_number). For
// fixed `k`, this scales as *n* + *O*(log(*n*)).
items.for_each(|item| {
// If `items` is nonempty at this point then we know the heap contains `k > 0`
// elements.
let mut root = heap.peek_mut().expect("the heap is empty");
if *root > item {
*root = item;
}
});
// Step 3. Sort the items in the heap with the second phases of heapsort. The number of
// comparisons is 2 * k * log2(k) + O(k).
heap.into_sorted_vec()
.into_iter()
.map(|(Reverse(n), t)| (t.clone(), n))
.collect()
}
}
impl<T, N> Counter<T, N>
where
T: Hash + Eq,
N: PartialOrd + Zero,
{
/// Test whether this counter is a superset of another counter.
/// This is true if for all elements in this counter and the other,
/// the count in this counter is greater than or equal to the count in the other.
///
/// `c.is_superset(&d);` -> `c.iter().all(|(x, n)| n >= d[x]) && d.iter().all(|(x, n)| c[x] >= n)`
///
/// ```rust
/// # use counter::Counter;
/// # use std::collections::HashMap;
/// let c = "aaabbc".chars().collect::<Counter<_>>();
/// let mut d = "abb".chars().collect::<Counter<_>>();
///
/// assert!(c.is_superset(&d));
/// d[&'e'] = 1;
/// assert!(!c.is_superset(&d));
/// ```
pub fn is_superset(&self, other: &Self) -> bool {
// need to test keys from both counters, because if N is signed, counts in `self`
// could be < 0 for elements missing in `other`. For the unsigned case, only elements
// from `other` would need to be tested.
self.keys()
.chain(other.keys())
.all(|key| self[key] >= other[key])
}
/// Test whether this counter is a subset of another counter.
/// This is true if for all elements in this counter and the other,
/// the count in this counter is less than or equal to the count in the other.
///
/// `c.is_subset(&d);` -> `c.iter().all(|(x, n)| n <= d[x]) && d.iter().all(|(x, n)| c[x] <= n)`
///
/// ```rust
/// # use counter::Counter;
/// # use std::collections::HashMap;
/// let mut c = "abb".chars().collect::<Counter<_>>();
/// let mut d = "aaabbc".chars().collect::<Counter<_>>();
///
/// assert!(c.is_subset(&d));
/// c[&'e'] = 1;
/// assert!(!c.is_subset(&d));
/// ```
pub fn is_subset(&self, other: &Self) -> bool {
// need to test keys from both counters, because if N is signed, counts in `other`
// could be < 0 for elements missing in `self`. For the unsigned case, only elements
// from `self` would need to be tested.
self.keys()
.chain(other.keys())
.all(|key| self[key] <= other[key])
}
}
|
#![allow(dead_code, non_snake_case)]
use crate::base::{
alu::alu,
dff::Clock,
dff::ClockState::{Tick, Tock},
logic::bit::I,
logic::{and, bit, mux16, not, or, Word},
pc::PC,
register::Register,
};
pub struct CPU {
pc: PC,
d_register: Register,
a_register: Register,
}
impl CPU {
pub fn new() -> Self {
CPU {
pc: PC::new(),
a_register: Register::new(),
d_register: Register::new(),
}
}
// -> outM, writeM, addressM[15], pc[15]
pub fn run(
&mut self,
clock_t: &Clock,
in_m: Word,
instruction: Word,
reset: bit,
) -> (Word, bit, [bit; 15], [bit; 15]) {
let clock_t_1 = match clock_t.state {
Tick => {
let mut c = Clock::new();
c.next();
c
}
Tock => Clock::new(),
};
let current_a_value = self.a_register.output(&clock_t_1);
let current_d_value = self.d_register.output(&clock_t_1);
// println!("d_reg: {}, a_reg: {}", current_d_value, current_a_value);
let (i, a, cccccc, ddd, jjj) = CPU::decode(instruction);
let (alu, zr, ng) = alu(
current_d_value,
mux16(current_a_value, in_m, a),
cccccc[0],
cccccc[1],
cccccc[2],
cccccc[3],
cccccc[4],
cccccc[5],
);
self.a_register.input(
clock_t,
mux16(instruction, alu, i),
or(/* A命令 */ not(i), /* C命令 */ ddd[0]),
);
self.d_register.input(clock_t, alu, and(ddd[1], i));
let is_jump = or(
or(and(jjj[0], ng), and(jjj[1], zr)),
and(jjj[2], not(or(zr, ng))),
);
self.pc
.run(clock_t, current_a_value, I, and(is_jump, i), reset);
// clock_t_1 = clock_t_+1
// この2つは更新後の値を使う
let next_a_value = self.a_register.output(&clock_t_1);
let next_pc_value = self.pc.output(&clock_t_1);
(
alu,
and(i, ddd[2]),
[
next_a_value[1],
next_a_value[2],
next_a_value[3],
next_a_value[4],
next_a_value[5],
next_a_value[6],
next_a_value[7],
next_a_value[8],
next_a_value[9],
next_a_value[10],
next_a_value[11],
next_a_value[12],
next_a_value[13],
next_a_value[14],
next_a_value[15],
],
[
next_pc_value[1],
next_pc_value[2],
next_pc_value[3],
next_pc_value[4],
next_pc_value[5],
next_pc_value[6],
next_pc_value[7],
next_pc_value[8],
next_pc_value[9],
next_pc_value[10],
next_pc_value[11],
next_pc_value[12],
next_pc_value[13],
next_pc_value[14],
next_pc_value[15],
],
)
}
fn decode(word: Word) -> (bit, bit, [bit; 6], [bit; 3], [bit; 3]) {
(
word[0],
word[3],
[word[4], word[5], word[6], word[7], word[8], word[9]],
[word[10], word[11], word[12]],
[word[13], word[14], word[15]],
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use bit::O;
#[test]
fn for_cpu() {
let mut clock = Clock::new();
let mut cpu = CPU::new();
let word0 = Word::new([O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, O]);
let word1 = Word::new([I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I]);
// CLOCK: TICK
let (outM, writeM, addressM, pc) = cpu.run(
&clock,
word0,
/* A命令 addr: [O, I, I, O, O, O, O, O, O, I, I, I, O, O, I] */
Word::new([O, O, I, I, O, O, O, O, O, O, I, I, I, O, O, I]),
O,
);
assert_eq!(outM, word0);
assert_eq!(writeM, O);
assert_eq!(addressM, [O, I, I, O, O, O, O, O, O, I, I, I, O, O, I]); // A命令なのでAレジスタにセットした値が返る
assert_eq!(pc, [O, O, O, O, O, O, O, O, O, O, O, O, O, O, I]); // PCは1つ上がる
clock.next();
clock.next();
// // CLOCK: TICK
let (outM, writeM, addressM, pc) = cpu.run(
&clock,
word0,
/* comp: A, dest: D -> D=A */
Word::new([I, I, I, O, I, I, O, O, O, O, O, I, O, O, O, O]),
O,
);
assert_eq!(
outM,
Word::new([O, O, I, I, O, O, O, O, O, O, I, I, I, O, O, I]) // Aレジスタの値
);
assert_eq!(writeM, O);
assert_eq!(addressM, [O, I, I, O, O, O, O, O, O, I, I, I, O, O, I]);
assert_eq!(pc, [O, O, O, O, O, O, O, O, O, O, O, O, O, I, O]);
assert_eq!(
cpu.a_register.output(&clock),
Word::new([O, O, I, I, O, O, O, O, O, O, I, I, I, O, O, I])
);
clock.next();
clock.next();
// // CLOCK: TICK
let (outM, writeM, addressM, pc) = cpu.run(
&clock,
word0,
/* comp: A, dest: D -> D=A */
Word::new([I, I, I, O, I, I, O, O, O, O, O, I, O, O, O, O]),
O,
);
assert_eq!(
outM,
Word::new([O, O, I, I, O, O, O, O, O, O, I, I, I, O, O, I])
);
assert_eq!(writeM, O);
assert_eq!(addressM, [O, I, I, O, O, O, O, O, O, I, I, I, O, O, I]);
assert_eq!(pc, [O, O, O, O, O, O, O, O, O, O, O, O, O, I, I]);
assert_eq!(
cpu.a_register.output(&clock),
Word::new([O, O, I, I, O, O, O, O, O, O, I, I, I, O, O, I])
);
clock.next();
clock.next();
// CLOCK: TICK
let (outM, writeM, addressM, pc) = cpu.run(
&clock,
word1,
/* comp: D-M, dest: D -> D=D-M */
/* D=12345, M=word1=-1 */
Word::new([I, I, I, I, O, I, O, O, I, I, O, I, O, O, O, O]),
O,
);
assert_eq!(
outM,
// 12346
Word::new([O, O, I, I, O, O, O, O, O, O, I, I, I, O, I, O]) // D-M=12345-(-1)
);
assert_eq!(writeM, O);
assert_eq!(addressM, [O, I, I, O, O, O, O, O, O, I, I, I, O, O, I]);
assert_eq!(pc, [O, O, O, O, O, O, O, O, O, O, O, O, I, O, O]);
assert_eq!(
cpu.a_register.output(&clock),
Word::new([O, O, I, I, O, O, O, O, O, O, I, I, I, O, O, I])
);
clock.next();
clock.next();
// CLOCK: TICK
let (outM, writeM, addressM, pc) = cpu.run(
&clock,
word1,
/* comp: D-M, dest: D -> D=D-M */
/* D=12345, M=word1=-1 */
Word::new([I, I, I, I, O, I, O, O, I, I, O, I, O, O, O, O]),
I,
);
assert_eq!(
outM,
Word::new([O, O, I, I, O, O, O, O, O, O, I, I, I, O, I, I])
);
assert_eq!(writeM, O);
assert_eq!(addressM, [O, I, I, O, O, O, O, O, O, I, I, I, O, O, I]);
assert_eq!(pc, [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O]); // reset
assert_eq!(
cpu.a_register.output(&clock),
Word::new([O, O, I, I, O, O, O, O, O, O, I, I, I, O, O, I])
);
}
}
|
use std::io::IoError;
use std::error::{Error, FromError};
/// Possible parser errors
#[derive(Show, PartialEq)]
pub enum ParserErrorKind {
/// Parser met EOF before parsing a proper datum
UnexpectedEOF,
/// Unexpected token: the first string describes expected token, and the second describes
/// actual token
UnexpectedToken(String, String),
/// Lexer met character not allowed in source code
InvalidCharacter(char),
/// Parser met un-parseable token
InvalidToken(String),
/// Parser met IoError while reading the underlying stream
UnderlyingError(IoError)
}
/// Parser error
#[derive(Show, PartialEq)]
pub struct ParserError {
pub line: usize,
pub column: usize,
pub kind: ParserErrorKind,
}
impl Error for ParserError {
fn description(&self) -> &str {
""
}
fn detail(&self) -> Option<String> {
None
}
fn cause(&self) -> Option<&Error> {
match self.kind {
ParserErrorKind::UnderlyingError(ref e) => Some(e as &Error),
_ => None
}
}
}
impl FromError<IoError> for ParserError {
fn from_error(err: IoError) -> ParserError {
ParserError {
line: 0,
column: 0,
kind: ParserErrorKind::UnderlyingError(err)
}
}
}
/// Possible compiler errors
#[derive(Show, PartialEq, Copy)]
pub enum CompileErrorKind {
/// The syntax is not implemented yet
NotImplemented,
/// Trying to evaluate `()`
NullEval,
/// Trying to evaluate non-proper list, such as `(a b c . d)`
DottedEval,
/// Expression body is non-proper list, such as `(a b c . d)`
DottedBody,
/// Invalid lambda syntax
BadLambdaSyntax,
/// Trying to apply non-function constant
NotCallable,
/// Trying to refer a syntax variable
SyntaxReference,
/// Trying to refer an unbound variable
UnboundVariable
}
/// Compiler error
#[derive(Show, PartialEq, Copy)]
pub struct CompileError {
pub kind: CompileErrorKind
}
/// Errors raised in runtime
#[derive(Show, PartialEq, Copy, Clone)]
pub enum RuntimeErrorKind {
/// Number of arguments did not match
NumArgs,
/// Argument type did not match
InvalidType
}
/// Errors raised in runtime
#[derive(Show, PartialEq, Clone)]
pub struct RuntimeError {
pub kind: RuntimeErrorKind,
pub desc: String
}
|
pub struct Solution;
impl Solution {
pub fn three_sum_closest(mut nums: Vec<i32>, target: i32) -> i32 {
nums.sort();
let l = nums.len();
let mut closest = nums[0] + nums[1] + nums[2];
let mut distance = (closest - target).abs();
for i in 0..l {
if 3 * nums[i] >= target + distance {
break;
}
if i > 0 && nums[i] == nums[i - 1] {
continue;
}
let mut j = i + 1;
let mut k = l - 1;
while j < k {
let sum = nums[i] + nums[j] + nums[k];
if (sum - target).abs() < distance {
closest = sum;
distance = (closest - target).abs();
}
if sum < target {
j += 1;
} else {
k -= 1;
}
}
}
closest
}
}
#[test]
fn test0016() {
assert_eq!(Solution::three_sum_closest(vec![-1, 2, 1, -4], 1), 2)
}
|
//! `fsopen` and related functions in Linux's `mount` API.
use crate::backend::mount::types::{
FsMountFlags, FsOpenFlags, FsPickFlags, MountAttrFlags, MoveMountFlags, OpenTreeFlags,
};
use crate::fd::{BorrowedFd, OwnedFd};
use crate::{backend, io, path};
/// `fsopen(fs_name, flags)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsopen.md
#[inline]
pub fn fsopen<Fs: path::Arg>(fs_name: Fs, flags: FsOpenFlags) -> io::Result<OwnedFd> {
fs_name.into_with_c_str(|fs_name| backend::mount::syscalls::fsopen(fs_name, flags))
}
/// `fsmount(fs_fd, flags, attr_flags)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsmount.md
#[inline]
pub fn fsmount(
fs_fd: BorrowedFd<'_>,
flags: FsMountFlags,
attr_flags: MountAttrFlags,
) -> io::Result<()> {
backend::mount::syscalls::fsmount(fs_fd, flags, attr_flags)
}
/// `move_mount(from_dfd, from_pathname, to_dfd, to_pathname, flags)`
///
/// This is not the same as `mount` with the `MS_MOVE` flag. If you want to
/// use that, use [`mount_move`] instead.
///
/// # References
/// - [Unfinished draft]
///
/// [`mount_move`]: crate::mount::mount_move
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/move_mount.md
#[inline]
pub fn move_mount<From: path::Arg, To: path::Arg>(
from_dfd: BorrowedFd<'_>,
from_pathname: From,
to_dfd: BorrowedFd<'_>,
to_pathname: To,
flags: MoveMountFlags,
) -> io::Result<()> {
from_pathname.into_with_c_str(|from_pathname| {
to_pathname.into_with_c_str(|to_pathname| {
backend::mount::syscalls::move_mount(
from_dfd,
from_pathname,
to_dfd,
to_pathname,
flags,
)
})
})
}
/// `open_tree(dfd, filename, flags)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/open_tree.md
#[inline]
pub fn open_tree<Path: path::Arg>(
dfd: BorrowedFd<'_>,
filename: Path,
flags: OpenTreeFlags,
) -> io::Result<OwnedFd> {
filename.into_with_c_str(|filename| backend::mount::syscalls::open_tree(dfd, filename, flags))
}
/// `fspick(dfd, path, flags)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fspick.md
#[inline]
pub fn fspick<Path: path::Arg>(
dfd: BorrowedFd<'_>,
path: Path,
flags: FsPickFlags,
) -> io::Result<OwnedFd> {
path.into_with_c_str(|path| backend::mount::syscalls::fspick(dfd, path, flags))
}
/// `fsconfig(fs_fd, FSCONFIG_SET_FLAG, key, NULL, 0)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsconfig.md
#[inline]
#[doc(alias = "fsconfig")]
pub fn fsconfig_set_flag<Key: path::Arg>(fs_fd: BorrowedFd<'_>, key: Key) -> io::Result<()> {
key.into_with_c_str(|key| backend::mount::syscalls::fsconfig_set_flag(fs_fd, key))
}
/// `fsconfig(fs_fd, FSCONFIG_SET_STRING, key, value, 0)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsconfig.md
#[inline]
#[doc(alias = "fsconfig")]
pub fn fsconfig_set_string<Key: path::Arg, Value: path::Arg>(
fs_fd: BorrowedFd<'_>,
key: Key,
value: Value,
) -> io::Result<()> {
key.into_with_c_str(|key| {
value.into_with_c_str(|value| {
backend::mount::syscalls::fsconfig_set_string(fs_fd, key, value)
})
})
}
/// `fsconfig(fs_fd, FSCONFIG_SET_BINARY, key, value, value.len())`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsconfig.md
#[inline]
#[doc(alias = "fsconfig")]
pub fn fsconfig_set_binary<Key: path::Arg>(
fs_fd: BorrowedFd<'_>,
key: Key,
value: &[u8],
) -> io::Result<()> {
key.into_with_c_str(|key| backend::mount::syscalls::fsconfig_set_binary(fs_fd, key, value))
}
/// `fsconfig(fs_fd, FSCONFIG_SET_PATH, key, path, fd)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsconfig.md
#[inline]
#[doc(alias = "fsconfig")]
pub fn fsconfig_set_path<Key: path::Arg, Path: path::Arg>(
fs_fd: BorrowedFd<'_>,
key: Key,
path: Path,
fd: BorrowedFd<'_>,
) -> io::Result<()> {
key.into_with_c_str(|key| {
path.into_with_c_str(|path| {
backend::mount::syscalls::fsconfig_set_path(fs_fd, key, path, fd)
})
})
}
/// `fsconfig(fs_fd, FSCONFIG_SET_PATH_EMPTY, key, "", fd)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsconfig.md
#[inline]
#[doc(alias = "fsconfig")]
pub fn fsconfig_set_path_empty<Key: path::Arg>(
fs_fd: BorrowedFd<'_>,
key: Key,
fd: BorrowedFd<'_>,
) -> io::Result<()> {
key.into_with_c_str(|key| backend::mount::syscalls::fsconfig_set_path_empty(fs_fd, key, fd))
}
/// `fsconfig(fs_fd, FSCONFIG_SET_FD, key, NULL, fd)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsconfig.md
#[inline]
#[doc(alias = "fsconfig")]
pub fn fsconfig_set_fd<Key: path::Arg>(
fs_fd: BorrowedFd<'_>,
key: Key,
fd: BorrowedFd<'_>,
) -> io::Result<()> {
key.into_with_c_str(|key| backend::mount::syscalls::fsconfig_set_fd(fs_fd, key, fd))
}
/// `fsconfig(fs_fd, FSCONFIG_CMD_CREATE, key, NULL, 0)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsconfig.md
#[inline]
#[doc(alias = "fsconfig")]
pub fn fsconfig_create(fs_fd: BorrowedFd<'_>) -> io::Result<()> {
backend::mount::syscalls::fsconfig_create(fs_fd)
}
/// `fsconfig(fs_fd, FSCONFIG_CMD_RECONFIGURE, key, NULL, 0)`
///
/// # References
/// - [Unfinished draft]
///
/// [Unfinished draft]: https://github.com/sunfishcode/linux-mount-api-documentation/blob/main/fsconfig.md
#[inline]
#[doc(alias = "fsconfig")]
pub fn fsconfig_reconfigure(fs_fd: BorrowedFd<'_>) -> io::Result<()> {
backend::mount::syscalls::fsconfig_reconfigure(fs_fd)
}
|
// use std::process::Command;
fn main() {
//ffmpeg -i "001_Minhajulmuslim Bab Adab Pasal Ke-1 Adab Niat.MP4" -b:a 192K -vn "001_Minhajul-muslim-Bab-Adab Pasal Ke-1 Adab Niat.mp3"
let files = vec![
"001_Minhajulmuslim Bab Adab Pasal Ke-1 Adab Niat.MP4",
"002_Minhajul Muslim Bab Adab Pasal Ke-2 Adab Terhadap Allah SWT.MP4",
"003_Minhajulmuslim, Bab Adab, Pasal 3- Adab Terhadap Kalamullah (Al-Qur'anul Karim).MP4",
"004_Minhajulmuslim, Bab Adab, Pasal Ke-4- Adab Terhadap Rasulullah SAW.MKV",
"005_Minhjulmuslim Bab Adab Pasal Ke-5 Adab Terhadap Diri Sendiri.MP4",
];
let path = "C:/Users/susilo/Downloads/Video/Others/Minhajul Muslim";
for x in files {
println!("{}", path.to_owned()+x);
}
// let mut child = Command::new("ls")
// .arg("D:/")
// .spawn()
// .expect("failed to execute child");
// let ecode = child.wait().expect("failed to wait on child");
// assert!(ecode.success());
} |
//! Subcommand implementations and dispatch function `run()`.
pub mod info;
pub mod list;
pub mod scan;
pub mod staged;
pub mod stashed;
pub mod status;
pub mod unstaged;
use config::Config;
use errors::{GitGlobalError, Result};
use report::Report;
/// Run the subcommand matching the provided `str`, returning a `Report`.
pub fn run(command: &str, config: Config) -> Result<Report> {
match command {
"info" => info::execute(config),
"list" => list::execute(config),
"scan" => scan::execute(config),
"staged" => staged::execute(config),
"stashed" => stashed::execute(config),
"status" => status::execute(config),
"unstaged" => unstaged::execute(config),
cmd => Err(GitGlobalError::BadSubcommand(cmd.to_string())),
}
}
/// Return the list of all subcommand names and descriptions.
///
/// Used for building the clap::App in the cli module.
pub fn get_subcommands() -> Vec<(&'static str, &'static str)> {
vec![
("info", "Shows meta-information about git-global"),
("list", "Lists all known repos"),
("scan", "Updates cache of known repos"),
(
"staged",
"Show git index status for repos with staged changes",
),
("stashed", "Shows repos with stashed changes"),
(
"status",
"Shows status (`git status -s`) for repos with any changes",
),
(
"unstaged",
"Show working dir status for repos with unstaged changes",
),
]
}
|
mod block;
mod blocks;
mod chunk;
mod chunks;
mod succinct_bit_vector;
mod succinct_bit_vector_builder;
use super::bit_string::BitString;
use super::internal_data_structure::popcount_table::PopcountTable;
use super::internal_data_structure::raw_bit_vector::RawBitVector;
use std::collections::HashSet;
/// Succinct bit vector.
///
/// This class can handle bit sequence of virtually **arbitrary length.**
///
/// In fact, _N_ (bit vector's length) is designed to be limited to: _N <= 2^64_.<br>
/// It should be enough for almost all usecases since a binary data of length of _2^64_ consumes _2^21 = 2,097,152_ TB (terabyte), which is hard to handle by state-of-the-art computer architecture.
///
/// # Examples
/// ```
/// extern crate succinct_rs;
///
/// use succinct_rs::{BitString, SuccinctBitVectorBuilder};
///
/// // Construction -------------------------
/// // `01001` built by `from_bit_string()`
/// let bv = SuccinctBitVectorBuilder::from_bit_string(BitString::new("0100_1")).build(); // Tips: BitString::new() ignores '_'.
///
/// // `01001` built by `from_length()` and `add_bit()`
/// let bv = SuccinctBitVectorBuilder::from_length(0)
/// .add_bit(false)
/// .add_bit(true)
/// .add_bit(false)
/// .add_bit(false)
/// .add_bit(true)
/// .build();
///
/// // Basic operations ---------------------
/// assert_eq!(bv.access(0), false); // [0]1001; 0th bit is '0' (false)
/// assert_eq!(bv.access(1), true); // 0[1]001; 1st bit is '1' (true)
/// assert_eq!(bv.access(4), true); // 0100[1]; 4th bit is '1' (true)
///
/// assert_eq!(bv.rank(0), 0); // [0]1001; Range [0, 0] has no '1'
/// assert_eq!(bv.rank(3), 1); // [0100]1; Range [0, 3] has 1 '1'
/// assert_eq!(bv.rank(4), 2); // [01001]; Range [0, 4] has 2 '1's
///
/// assert_eq!(bv.select(0), Some(0)); // []01001; Minimum i where range [0, i] has 0 '1's is i=0
/// assert_eq!(bv.select(1), Some(1)); // 0[1]001; Minimum i where range [0, i] has 1 '1's is i=1
/// assert_eq!(bv.select(2), Some(4)); // 0100[1]; Minimum i where range [0, i] has 2 '1's is i=4
/// assert_eq!(bv.select(3), None); // There is no i where range [0, i] has 3 '1's
///
/// // rank0, select0 -----------------------
/// assert_eq!(bv.rank0(0), 1); // [0]1001; Range [0, 0] has no '0'
/// assert_eq!(bv.rank0(3), 3); // [0100]1; Range [0, 3] has 3 '0's
/// assert_eq!(bv.rank0(4), 3); // [01001]; Range [0, 4] has 3 '0's
///
/// assert_eq!(bv.select0(0), Some(0)); // []01001; Minimum i where range [0, i] has 0 '0's is i=0
/// assert_eq!(bv.select0(1), Some(0)); // [0]1001; Minimum i where range [0, i] has 1 '0's is i=0
/// assert_eq!(bv.select0(2), Some(2)); // 01[0]01; Minimum i where range [0, i] has 2 '0's is i=2
/// assert_eq!(bv.select0(4), None); // There is no i where range [0, i] has 4 '0's
/// ```
///
/// # Complexity
/// See [README](https://github.com/laysakura/succinct.rs/blob/master/README.md#succinct-bit-vector-complexity).
///
/// # Implementation detail
/// [access()](#method.access)'s implementation is trivial.
///
/// [select()](#method.select) just uses binary search of `rank()` results.
///
/// [rank()](#method.rank)'s implementation is standard but non-trivial.
/// So here explains implementation of _rank()_.
///
/// ## [rank()](#method.rank)'s implementation
/// Say you have the following bit vector.
///
/// ```text
/// 00001000 01000001 00000100 11000000 00100000 00000101 10100000 00010000 001 ; (N=67)
/// ```
///
/// Answer _rank(48)_ in _O(1)_ time-complexity and _o(N)_ space-complexity.
///
/// Naively, you can count the number of '1' from left to right.
/// You will find _rank(48) == 10_ but it took _O(N)_ time-complexity.
///
/// To reduce time-complexity to _O(1)_, you can use _memonization_ technique.<br>
/// Of course, you can memonize results of _rank(i)_ for every _i ([0, N-1])_.
///
/// ```text
/// Bit vector; 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 [1] 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 ; (N=67)
/// Memo rank(i); 0 0 0 0 1 1 1 1 1 2 2 2 2 2 2 3 3 3 3 3 3 4 4 4 5 6 6 6 6 6 6 6 6 6 7 7 7 7 7 7 7 7 7 7 7 8 8 9 10 10 11 11 11 11 11 11 11 11 11 12 12 12 12 12 12 12 13
/// ```
///
/// From this memo, you can answer _rank(48) == 10_ in constant time, although space-complexity for this memo is _O(N) > o(N)_.
///
/// To reduce space-complexity using memonization, we divide the bit vector into **Chunk** and **Block**.
///
/// ```text
/// Bit vector; 00001000 01000001 00000100 11000000 00100000 00000101 [1]0100000 00010000 001 ; (N=67)
/// Chunk; | 7 | 12 | ; (size = (log N)^2 = 36)
/// Block; |0 |1 |1 |2 |2 |3 |3 |4 |6 |6 |6 |7 |0 |0 |0 |2 |4 |4 |4 |5 |5 |5 |6| ; (size = (log N) / 2 = 3)
/// ```
///
/// - A **Chunk** has size of _(log N)^2_. Its value is _rank(<u>index of the last bit of the chunk</u>)_.
/// - A **Block** has size of _(log N) / 2_. A chunk has many blocks. Block's value is the number of '1's in _[<u>index of the first bit of the chunk the block belongs to</u>, <u>index of the last bit of the block</u>]_ (note that the value is reset to 0 at the first bit of a chunk).
///
/// Now you want to answer _rank(48)_. 48-th bit is in the 2nd chunk, and in the 5th block in the chunk.<br>
/// So the _rank(48)_ is at least:
///
/// _<u>7 (value of 1st chunk)</u> + <u>2 (value of 4th block in the 2nd chunk)</u>_
///
/// Then, focus on 3 bits in 5th block in the 2nd chunk; `[1]01`.<br>
/// As you can see, only 1 '1' is included up to 48-th bit (`101` has 2 '1's but 2nd '1' is 50-th bit, irrelevant to _rank(48)_).
///
/// Therefore, the _rank(48)_ is calculated as:
///
/// _<u>7 (value of 1st chunk)</u> + <u>2 (value of 4th block in the 2nd chunk)</u> + <u>1 ('1's in 5th block up to 48-th bit)</u>_
///
/// OK. That's all... Wait!<br>
/// _rank()_ must be in _O(1)_ time-complexity.
///
/// - _<u>7 (value of 1st chunk)</u>_: _O(1)_ if you store chunk value in array structure.
/// - _<u>2 (value of 4th block in the 2nd chunk)</u>_: Same as above.
/// - _<u>1 ('1's in 5th block up to 48-th bit)</u>_: **_O(<u>length of block</u>) = O(log N)_** !
///
/// Counting '1's in a block must also be _O(1)_, while using _o(N)_ space.<br>
/// We use **Table** for this purpose.
///
/// | Block content | Number of '1's in block |
/// |---------------|-------------------------|
/// | `000` | 0 |
/// | `001` | 1 |
/// | `010` | 1 |
/// | `011` | 2 |
/// | `100` | 1 |
/// | `101` | 2 |
/// | `110` | 2 |
/// | `111` | 3 |
///
/// This table is constructed in `build()`. So we can find the number of '1's in block in _O(1)_ time.<br>
/// Note that this table has _O(log N) = o(N)_ length.
///
/// In summary:
///
/// _rank() = (value of left chunk) + (value of left block) + (value of table keyed by inner block bits)_.
pub struct SuccinctBitVector {
/// Raw data.
rbv: RawBitVector,
/// Total popcount of _[0, <u>last bit of the chunk</u>]_.
///
/// Each chunk takes _2^64_ at max (when every bit is '1' for bit vector of length of _2^64_).
/// A chunk has blocks.
chunks: Chunks,
/// Table to calculate inner-block `rank()` in _O(1)_.
table: PopcountTable,
}
/// Builder of [SuccinctBitVector](struct.SuccinctBitVector.html).
pub struct SuccinctBitVectorBuilder {
seed: SuccinctBitVectorSeed,
bits_set: HashSet<u64>,
}
enum SuccinctBitVectorSeed {
Length(u64),
BitStr(BitString),
}
/// Collection of Chunk.
struct Chunks {
chunks: Vec<Chunk>,
chunks_cnt: u64,
}
/// Total popcount of _[0, <u>last bit of the chunk</u>]_ of a bit vector.
///
/// Each chunk takes _2^64_ at max (when every bit is '1' for SuccinctBitVector of length of _2^64_).
struct Chunk {
value: u64, // popcount
blocks: Blocks,
#[allow(dead_code)]
length: u16,
}
/// Collection of Block in a Chunk.
struct Blocks {
blocks: Vec<Block>,
blocks_cnt: u16,
}
/// Total popcount of _[_first bit of the chunk which the block belongs to_, _last bit of the block_]_ of a bit vector.
///
/// Each block takes (log 2^64)^2 = 64^2 = 2^16 at max (when every bit in a chunk is 1 for SuccinctBitVector of length of 2^64)
struct Block {
value: u16, // popcount
length: u8,
}
|
pub struct Solution;
impl Solution {
pub fn read_binary_watch(num: i32) -> Vec<String> {
Solver::new().solve(num)
}
}
#[derive(Default)]
struct Solver {
res: Vec<String>,
}
impl Solver {
fn new() -> Self {
Self::default()
}
fn solve(mut self, num: i32) -> Vec<String> {
if num == 0 {
self.add(0, 0);
} else {
self.rec_minute(0, 0, 1, num);
self.rec_hour(0, 1, num);
}
self.res
}
fn rec_hour(&mut self, hour: i32, bit: i32, num: i32) {
if hour + bit < 12 && num > 0 {
if num == 1 {
self.add(hour + bit, 0);
} else if num - 1 <= 6 {
self.rec_minute(hour + bit, 0, 1, num - 1);
}
self.rec_hour(hour, bit << 1, num);
self.rec_hour(hour + bit, bit << 1, num - 1);
}
}
fn rec_minute(&mut self, hour: i32, minute: i32, bit: i32, num: i32) {
if minute + bit < 60 && num > 0 {
if num == 1 {
self.add(hour, minute + bit);
}
self.rec_minute(hour, minute, bit << 1, num);
self.rec_minute(hour, minute + bit, bit << 1, num - 1);
}
}
fn add(&mut self, hour: i32, minute: i32) {
self.res.push(format!("{}:{:02}", hour, minute));
}
}
#[test]
fn test0401() {
fn case(num: i32, want: Vec<&str>) {
let mut got = Solution::read_binary_watch(num);
let mut want = want.iter().map(|s| s.to_string()).collect::<Vec<String>>();
got.sort();
want.sort();
assert_eq!(got, want);
}
case(
1,
vec![
"1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16", "0:32",
],
);
case(
2,
vec![
"0:03", "0:05", "0:06", "0:09", "0:10", "0:12", "0:17", "0:18", "0:20", "0:24", "0:33",
"0:34", "0:36", "0:40", "0:48", "1:01", "1:02", "1:04", "1:08", "1:16", "1:32", "2:01",
"2:02", "2:04", "2:08", "2:16", "2:32", "3:00", "4:01", "4:02", "4:04", "4:08", "4:16",
"4:32", "5:00", "6:00", "8:01", "8:02", "8:04", "8:08", "8:16", "8:32", "9:00",
"10:00",
],
);
}
|
// DragonflyBSD also!
mod acpi;
mod device;
mod iterator;
mod manager;
pub use self::device::IoCtlDevice;
pub use self::iterator::IoCtlIterator;
pub use self::manager::IoCtlManager;
|
// Copyright © 2019 George Burton
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
// and associated documentation files (the "Software"), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial
// portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
// LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#![warn(unused_imports, missing_docs)]
#![deny(bare_trait_objects)]
//! # About this library
//!
//! A small library for loading and manipulating BioVision motion files.
//!
//! ## The bvh file format
//!
//! The `Bvh` file format is comprised of two main sections: the 'Heirarchy' section,
//! which defines the joints of the skeleton, and the 'Motion' section, which defines
//! the motion values for each channel.
//!
//! This project contains some samples in the [`data` directory][`data` directory].
//!
//! ### Heierarchy
//!
//! The 'Heirarchy' section defines the skeleton as a tree of joints, where there is
//! a single root joint, and a chain of child joints extending out from each joint,
//! terminated by an 'End Site' section.
//!
//! Each joint has:
//!
//! * A list of channels, which are the degrees of freedom in which the joint may move.
//! Channels are listed in the order in which the transformation should be applied
//! to the global transform for the root.
//! * An offset, which is the vector distance from the parent joint.
//! * An optional end site, which is used to cap off a chain of joints. This is only used
//! to calculate the length of the final bone in the chain.
//!
//! ```text
//! HEIRARCHY
//! ROOT <Root-name>
//! {
//! OFFSET <Root-offset-x> <Root-offset-y> <Root-offset-z>
//! CHANNELS <Num-root-joint-channels> Xposition Yposition <other-root-channels ...>
//! JOINT <Joint-1-name>
//! {
//! OFFSET <Joint-1-offset-x> <Joint-1-offset-y> <Joint-1-offset-z>
//! CHANNELS <Num-joint-1-channels> <Joint-1-channels ...>
//! JOINT <Joint-2-name>
//! {
//! OFFSET <Joint-2-offset-x> <Joint-2-offset-y> <Joint-2-offset-z>
//! CHANNELS <Num-joint-2-channels> <Joint-2-channels ...>
//! JOINT <Joint-with-end-site>
//! {
//! OFFSET ...
//! CHANNELS ...
//! End Site
//! {
//! OFFSET <end-site-offset-x> <end-site-offset-y> <end-site-offset-z>
//! }
//! }
//! ... More child joints
//! }
//! JOINT <Joint-3-name>
//! {
//! OFFSET <Joint-3-offset-x> <Joint-3-offset-y> <Joint-3-offset-z>
//! CHANNELS <Num-joint-3-channels> <Joint-3-channels ...>
//! ... More child joints
//! }
//! ... More child joints
//! }
//! ... More child joints
//! }
//! ```
//!
//! Note that the bvh data is defined in terms of a right-handed coordinate system, where
//! the positive y-axis is the up vector.
//!
//! ### Motion
//!
//! The `MOTION` section of the bvh file records the number of frames, the frame time, and
//! defines the full range of motions for each channel, frame by frame.
//!
//! ```text
//! MOTION
//! Frames: <num-frames>
//! Frame Time: <frame-time>
//! <frame-0-channel-0-value> <frame-0-channel-1-value> <frame-0-channel-2-value> ...
//! <frame-1-channel-0-value> <frame-1-channel-1-value> <frame-1-channel-2-value> ...
//! <frame-2-channel-0-value> <frame-2-channel-1-value> <frame-2-channel-2-value> ...
//! ⋮
//! ```
//!
//! The frame time is recorded in seconds, and tells the animation system how long each frame
//! of the animation should last for. This value is usually around 0.033333333, which is close
//! to 30 frames per second.
//!
//! The list of motion values is a matrix, where each row represents a frame. Each column of
//! the row represents a transformation around the channel axis - for example a motion value
//! of 130.0 for an `Xposition` channel would correspond to a rotation of 130.0 degrees around
//! the x-axis.
//!
//! Note that rotations are conventionally in degrees, although it will be up to your application
//! how to interpret each motion's value.
//!
//! ## Using this library.
//!
//! ### Creating a [`Bvh`][`Bvh`] struct:
//!
//! There are a few ways to create a [`Bvh`][`Bvh`] struct:
//!
//! * You can use the [`from_reader`][`from_reader`] function, which will parse a `BufRead`
//! as a bvh file. The [`from_bytes`][`from_bytes`] function is a convenient wrapper function
//! to parse an in-memory slice of bytes as a `bvh` file. Note that the file does not need to
//! be strictly UTF-8, although it should be an ascii-compatible encoding. These functions are
//! also available as associated methods on the `Bvh` type directly as [`Bvh::from_reader`]
//! [`Bvh::from_reader`] and [`Bvh::from_bytes`][`Bvh::from_bytes`]
//!
//! * You can use the [`bvh!`][`bvh!`] macro to construct a [`Bvh`][`Bvh`] instance in your source files
//! using the same syntax as you would use for a standard bvh file.
//!
//! * You can use the [`builder`][`builder`] module to dynamically construct a bvh. This is useful
//! for converting data from other formats into a [`Bvh`][`Bvh`] struct.
//!
//! * You can create an empty [`Bvh`][`Bvh`] using the [`Bvh::new`][`Bvh::new`] or [`Default::default`]
//! [`Default::default`] methods.
//!
//! ### Other operations:
//!
//! Once you have a valid [`Bvh`][`Bvh`] struct, there are a number of ways you can inspect and
//! manipulate it:
//!
//! * The [`Bvh::joints`][`Bvh::joints`] method can be used to iterate through each [`Joint`][`Joint`]
//! of the [`Bvh`][`Bvh`]. Each [`Joint`][`Joint`] can be inspected through its [`JointData`]
//! [`JointData`], which can be obtained with the [`Joint::data`][`Joint::data`] method.
//!
//! * The [`Bvh::frames`][`Bvh::frames`] method returns a [`Frames`][`Frames`] iterator over each
//! frame of the animation. A [`Frame`][`Frame`] can only be indexed by a [`Channel`][`Channel`]
//! belonging to an associated [`Joint`][`Joint`] of the [`Bvh`][`Bvh`], although you can convert
//! it into an [`&[`][`slice`][`f32`][`f32`][`]`][`slice`] using the [`Frame::as_slice`][`Frame::as_slice`] method.
//!
//! * You can serialise the [`Bvh`][`Bvh`] into a [`Write`][`Write`] type using the [`Bvh::write_to`]
//! [`Bvh::write_to`] method. There is also the [`Bvh::to_bstring`][`Bvh::to_bstring`] method, which
//! converts the [`Bvh`][`Bvh`] into a [`BString`][`BString`]. Various aspects of the formatting
//! can be customised using the [`WriteOptions`][`WriteOptions`] type, such as the line termination
//! style, indentation method, and floating point accuracy.
//!
//! ## Examples
//!
//! This library comes with some example applications, which can be viewed on [Github][Github].
//!
//! ## Other resources
//!
//! * More information on this file format can be found [here][bvh_html].
//! * A large library of bvh files is freely available from [CMU's motion capture database]
//! [CMU's motion capture database].
//!
//! [`data` directory]: https://github.com/burtonageo/bvh_anim/tree/master/data
//! [`bvh`]: struct.Bvh.html
//! [`from_reader`]: fn.from_reader.html
//! [`from_bytes`]: fn.from_bytes.html
//! [`Bvh::from_reader`]: struct.Bvh.html#method.from_reader
//! [`Bvh::from_bytes`]: struct.Bvh.html#method.from_bytes
//! [`bvh!`]: macro.bvh.html
//! [`builder`]: builder/index.html
//! [`Bvh::new`]: struct.Bvh.html#method.new
//! [`Default::default`]: https://doc.rust-lang.org/stable/std/default/trait.Default.html#tymethod.default
//! [`Bvh::joints`]: struct.Bvh.html#method.joints
//! [`Joint`]: struct.Joint.html
//! [`JointData`]: enum.JointData.html
//! [`Joint::data`]: struct.Joint.html#method.data
//! [`Bvh::frames`]: struct.Bvh.html#method.frames
//! [`Frames`]: struct.Frames.html
//! [`Frame`]: struct.Frame.html
//! [`slice`]: https://doc.rust-lang.org/std/primitive.slice.html
//! [`f32`]: https://doc.rust-lang.org/stable/std/primitive.f32.html
//! [`Channel`]: struct.Channel.html
//! [`Frame::as_slice`]: struct.Frame.html#method.as_slice
//! [`Write`]: https://doc.rust-lang.org/stable/std/io/trait.Write.html
//! [`Bvh::write_to`]: struct.Bvh.html#method.write_to
//! [`Bvh::to_bstring`]: struct.Bvh.html#method.to_bstring
//! [`BString`]: https://docs.rs/bstr/0.1.2/bstr/struct.BString.html
//! [`WriteOptions`]: write/struct.WriteOptions.html
//! [Github]: https://github.com/burtonageo/bvh_anim/tree/master/examples
//! [bvh_html]: https://research.cs.wisc.edu/graphics/Courses/cs-838-1999/Jeff/BVH.html
//! [CMU's motion capture database]: https://sites.google.com/a/cgspeed.com/cgspeed/motion-capture/daz-friendly-release
#[macro_use]
mod macros;
pub mod builder;
pub mod errors;
#[cfg(feature = "ffi")]
pub mod ffi;
pub mod write;
mod joint;
mod parse;
use bstr::{
io::{BufReadExt, ByteLines},
BStr, BString, ByteSlice,
};
use mint::Vector3;
use num_traits::{one, zero, One, Zero};
use std::{
convert::TryFrom,
fmt,
io::{self, Cursor, Write},
iter::Enumerate,
mem,
ops::{Index, IndexMut, Range},
slice::{Iter, IterMut},
str::{self, FromStr},
time::Duration,
};
pub use joint::{Joint, JointName};
#[doc(hidden)]
pub use macros::BvhLiteralBuilder;
use errors::{LoadError, ParseChannelError, SetMotionError};
struct CachedEnumerate<I> {
iter: Enumerate<I>,
last_enumerator: Option<usize>,
}
impl<I> CachedEnumerate<I> {
#[inline]
fn new(iter: Enumerate<I>) -> Self {
CachedEnumerate {
iter,
last_enumerator: None,
}
}
#[inline]
fn last_enumerator(&self) -> Option<usize> {
self.last_enumerator
}
}
impl<I: Iterator> Iterator for CachedEnumerate<I> {
type Item = <Enumerate<I> as Iterator>::Item;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
let (curr, item) = self.iter.next()?;
self.last_enumerator = Some(curr);
Some((curr, item))
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
type EnumeratedLines<'a> = CachedEnumerate<ByteLines<&'a mut dyn BufReadExt>>;
impl EnumeratedLines<'_> {
pub fn next_non_empty_line(&mut self) -> Option<<Self as Iterator>::Item> {
let mut next = self.next();
loop {
match next {
None => return None,
Some((idx, result)) => {
let string = match result {
Ok(s) => s,
Err(e) => return Some((idx, Err(e))),
};
if string.trim().is_empty() {
next = self.next()
} else {
return Some((idx, Ok(string)));
}
}
}
}
}
}
/// Loads the `Bvh` from the `reader`.
#[inline]
pub fn from_reader<R: BufReadExt>(data: R) -> Result<Bvh, LoadError> {
Bvh::from_reader(data)
}
/// Parse a sequence of bytes as if it were an in-memory `Bvh` file.
///
/// # Examples
///
/// ```
/// # use bvh_anim::{self, from_bytes};
/// let bvh_string = br#"
/// HIERARCHY
/// ROOT Hips
/// {
/// OFFSET 0.0 0.0 0.0
/// CHANNELS 3 Xposition Yposition Zposition
/// End Site
/// {
/// OFFSET 0.0 0.0 0.0
/// }
/// }
/// MOTION
/// Frames: 1
/// Frame Time: 0.033333333
/// 0.0 0.0 0.0
/// "#;
///
/// let bvh = from_bytes(&bvh_string[..])?;
/// # let _ = bvh;
/// # Result::<(), bvh_anim::errors::LoadError>::Ok(())
/// ```
#[inline]
pub fn from_bytes<B: AsRef<[u8]>>(bytes: B) -> Result<Bvh, LoadError> {
Bvh::from_bytes(bytes)
}
/// Parse a `str` as if it were an in-memory `Bvh` file.
///
/// # Examples
///
/// ```
/// # use bvh_anim::{self, from_str};
/// let bvh_string = "
/// HIERARCHY
/// ROOT Hips
/// {
/// OFFSET 0.0 0.0 0.0
/// CHANNELS 3 Xposition Yposition Zposition
/// End Site
/// {
/// OFFSET 0.0 0.0 0.0
/// }
/// }
/// MOTION
/// Frames: 1
/// Frame Time: 0.033333333
/// 0.0 0.0 0.0
/// ";
///
/// let bvh = from_str(bvh_string)?;
/// # let _ = bvh;
/// # Result::<(), bvh_anim::errors::LoadError>::Ok(())
/// ```
#[inline]
pub fn from_str(string: &str) -> Result<Bvh, LoadError> {
Bvh::from_str(string)
}
/// A complete `bvh` file.
///
/// See the [module documentation](index.html#using-this-library)
/// for more information.
#[derive(Clone, Default, Debug, PartialEq)]
pub struct Bvh {
/// The list of joints. If the root joint exists, it is always at
/// index `0`.
joints: Vec<Joint>,
/// The motion values of the `Frame`.
motion_values: Vec<f32>,
/// The number of frames in the bvh.
num_frames: usize,
/// The number of `Channel`s in the bvh.
num_channels: usize,
/// The total time it takes to play one frame.
frame_time: Duration,
}
impl Bvh {
/// Create an empty `Bvh`.
#[inline]
pub const fn new() -> Self {
Self {
joints: Vec::new(),
motion_values: Vec::new(),
num_frames: 0,
num_channels: 0,
frame_time: Duration::from_secs(0),
}
}
/// Parse a sequence of bytes as if it were an in-memory `Bvh` file.
///
/// # Examples
///
/// ```
/// # use bvh_anim::{self, Bvh};
/// let bvh_string = br#"
/// HIERARCHY
/// ROOT Hips
/// {
/// OFFSET 0.0 0.0 0.0
/// CHANNELS 3 Xposition Yposition Zposition
/// End Site
/// {
/// OFFSET 0.0 0.0 0.0
/// }
/// }
/// MOTION
/// Frames: 1
/// Frame Time: 0.033333333
/// 0.0 0.0 0.0
/// "#;
///
/// let bvh = Bvh::from_bytes(&bvh_string[..])?;
/// # let _ = bvh;
/// # Result::<(), bvh_anim::errors::LoadError>::Ok(())
/// ```
#[inline]
pub fn from_bytes<B: AsRef<[u8]>>(bytes: B) -> Result<Self, LoadError> {
Bvh::from_reader(Cursor::new(bytes))
}
/// Loads the `Bvh` from the `reader`.
pub fn from_reader<R: BufReadExt>(mut reader: R) -> Result<Self, LoadError> {
Self::from_reader_(reader.by_ref())
}
fn from_reader_(reader: &mut dyn BufReadExt) -> Result<Self, LoadError> {
let mut lines = CachedEnumerate::new(reader.byte_lines().enumerate());
let mut bvh = Bvh::default();
bvh.read_joints(&mut lines)?;
bvh.read_motion(&mut lines)?;
Ok(bvh)
}
/// Writes the `Bvh` using the `bvh` file format to the `writer`, with
/// the default formatting options.
///
/// # Notes
///
/// To customise the formatting, see the [`WriteOptions`][`WriteOptions`] type.
///
/// # Examples
///
/// ```no_run
/// # use bvh_anim::bvh;
/// # use std::io;
/// # use std::fs::File;
/// let bvh = bvh! {
/// // fields unspecified
/// };
///
/// let mut out_file = File::create("./out_file.bvh")?;
/// bvh.write_to(&mut out_file)?;
/// # Result::<(), io::Error>::Ok(())
/// ```
///
/// [`WriteOptions`]: write/struct.WriteOptions.html
#[inline]
pub fn write_to<W: Write>(&self, writer: &mut W) -> io::Result<()> {
write::WriteOptions::default().write(self, writer)
}
/// Writes the `Bvh` using the `bvh` file format into a `BString` with
/// the default formatting options.
///
/// # Notes
///
/// To customise the formatting, see the [`WriteOptions`][`WriteOptions`] type.
///
/// [`WriteOptions`]: write/struct.WriteOptions.html
#[inline]
pub fn to_bstring(&self) -> BString {
write::WriteOptions::default().write_to_string(self)
}
/// Returns the root joint if it exists, or `None` if the skeleton is empty.
///
/// # Examples
///
/// ```
/// # use bvh_anim::{Bvh, bvh};
/// let bvh = Bvh::new();
/// assert!(bvh.root_joint().is_none());
///
/// let bvh = bvh! {
/// HIERARCHY
/// ROOT Hips
/// {
/// # OFFSET 0.0 0.0 0.0
/// # CHANNELS 0
/// # End Site
/// # {
/// # OFFSET 0.0 0.0 0.0
/// # }
/// // Joints...
/// }
/// MOTION
/// # Frames: 0
/// # Frame Time: 0.033333333
/// // Frames...
/// };
///
/// assert!(bvh.root_joint().is_some());
/// ```
#[inline]
pub fn root_joint(&self) -> Option<&Joint> {
self.joints.get(0)
}
/// Returns an iterator over all the `Joint`s in the `Bvh`.
#[inline]
pub fn joints(&self) -> Iter<'_, Joint> {
self.joints.iter()
}
/// Returns a mutable iterator over all the joints in the `Bvh`.
pub fn joints_mut(&mut self) -> IterMut<'_, Joint> {
self.joints.iter_mut()
}
/// Returns a `Frames` iterator over the frames of the bvh.
#[inline]
pub fn frames(&self) -> Frames<'_> {
Frames {
motion_values: &self.motion_values[..],
num_channels: self.num_channels,
num_frames: self.num_frames,
curr_frame: 0,
}
}
/// Returns a mutable iterator over the frames of the bvh.
#[inline]
pub fn frames_mut(&mut self) -> FramesMut<'_> {
FramesMut {
motion_values: &mut self.motion_values[..],
num_channels: self.num_channels,
num_frames: self.num_frames,
curr_frame: 0,
}
}
/// Gets the motion value at `frame` and `Channel`.
///
/// # Panics
///
/// This method will panic if `frame` is greater than `self.num_frames()`.
#[inline]
pub fn get_motion(&self, frame: usize, channel: &Channel) -> f32 {
*self.frames().nth(frame).unwrap().index(channel)
}
/// Returns the motion value at `frame` and `channel` if they are in bounds,
/// `None` otherwise.
#[inline]
pub fn try_get_motion(&self, frame: usize, channel: &Channel) -> Option<f32> {
self.frames()
.nth(frame)
.and_then(|f| f.get(channel))
.map(|m| *m)
}
/// Updates the `motion` value at `frame` and `channel` to `new_motion`.
///
/// # Panics
///
/// This method will panic if `frame` is greater than `self.num_frames()`.
#[inline]
pub fn set_motion(&mut self, frame: usize, channel: &Channel, new_motion: f32) {
self.try_set_motion(frame, channel, new_motion).unwrap();
}
/// Updates the `motion` value at `frame` and `channel` to `new_motion`.
///
/// # Notes
///
/// Returns `Ok(())` if the `motion` value was successfully set, and `Err(_)` if
/// the operation was out of bounds.
#[inline]
pub fn try_set_motion<'a>(
&mut self,
frame: usize,
channel: &'a Channel,
new_motion: f32,
) -> Result<(), SetMotionError<'a>> {
let m = self
.frames_mut()
.nth(frame)
.ok_or(SetMotionError::BadFrame(frame))
.and_then(|f| {
f.get_mut(channel)
.ok_or(SetMotionError::BadChannel(channel))
})?;
*m = new_motion;
Ok(())
}
/// Get the number of frames in the `Bvh`.
#[inline]
pub const fn num_frames(&self) -> usize {
self.num_frames
}
/// Get the number of channels in the `Bvh`.
#[inline]
pub const fn num_channels(&self) -> usize {
self.num_channels
}
/// Get the duration each frame should play for in the `Bvh`.
#[inline]
pub const fn frame_time(&self) -> &Duration {
&self.frame_time
}
/// Set the duration each frame should play for in the `Bvh` to `new_frame_time`.
#[inline]
pub fn set_frame_time(&mut self, new_frame_time: Duration) {
self.frame_time = new_frame_time;
}
}
impl fmt::Display for Bvh {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.to_bstring(), f)
}
}
impl FromStr for Bvh {
type Err = LoadError;
#[inline]
fn from_str(string: &str) -> Result<Self, Self::Err> {
Bvh::from_bytes(string.as_bytes())
}
}
impl TryFrom<&'_ str> for Bvh {
type Error = LoadError;
#[inline]
fn try_from(string: &'_ str) -> Result<Self, Self::Error> {
FromStr::from_str(string)
}
}
impl TryFrom<&'_ BStr> for Bvh {
type Error = LoadError;
#[inline]
fn try_from(string: &'_ BStr) -> Result<Self, Self::Error> {
Bvh::from_bytes(string.as_bytes())
}
}
impl TryFrom<&'_ [u8]> for Bvh {
type Error = LoadError;
#[inline]
fn try_from(bytes: &'_ [u8]) -> Result<Self, Self::Error> {
Bvh::from_bytes(bytes)
}
}
/// A `Channel` composed of a `ChannelType` and an index into the
/// corresponding motion data.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Channel {
/// The type of the `Channel`.
channel_type: ChannelType,
/// The index into the `Frame` which corresponds to this `Channel`.
motion_index: usize,
}
impl Channel {
#[inline]
const fn new(channel_type: ChannelType, motion_index: usize) -> Self {
Channel {
channel_type,
motion_index,
}
}
/// Returns the `ChannelType` to which this `Channel` corresponds.
#[inline]
pub const fn channel_type(&self) -> ChannelType {
self.channel_type
}
/// Returns the index of the motion value to which this `Channel` corresponds.
#[inline]
pub const fn motion_index(&self) -> usize {
self.motion_index
}
}
/// The available degrees of freedom along which a `Joint` may be manipulated.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum ChannelType {
/// Can be rotated along the `x` axis.
RotationX,
/// Can be rotated along the `y` axis.
RotationY,
/// Can be rotated along the `z` axis.
RotationZ,
/// Can be translated along the `x` axis.
PositionX,
/// Can be translated along the `y` axis.
PositionY,
/// Can be translated along the `z` axis.
PositionZ,
}
impl ChannelType {
/// Attempt to parse a bvh channel byte string into a `ChannelType`.
/// Returns `Err` if the string cannot be parsed.
///
/// # Examples
///
/// ```
/// # use bvh_anim::ChannelType;
/// assert_eq!(
/// ChannelType::from_bytes("Xrotation").unwrap(),
/// ChannelType::RotationX);
///
/// let err = ChannelType::from_bytes("Hello").unwrap_err();
/// assert_eq!(err.into_inner(), "Hello");
/// ```
#[inline]
pub fn from_bytes<B>(s: &B) -> Result<Self, ParseChannelError>
where
B: AsRef<[u8]> + ?Sized,
{
let s = s.as_ref();
match s {
b"Xrotation" => Ok(ChannelType::RotationX),
b"Yrotation" => Ok(ChannelType::RotationY),
b"Zrotation" => Ok(ChannelType::RotationZ),
b"Xposition" => Ok(ChannelType::PositionX),
b"Yposition" => Ok(ChannelType::PositionY),
b"Zposition" => Ok(ChannelType::PositionZ),
_ => Err(ParseChannelError::from(s)),
}
}
/// Returns `true` if this channel corresponds to a rotational
/// transform, otherwise `false`.
///
/// # Example
///
/// ```
/// # use bvh_anim::ChannelType;
/// let channel_type = ChannelType::RotationX;
/// assert!(channel_type.is_rotation());
/// ```
#[inline]
pub fn is_rotation(&self) -> bool {
match *self {
ChannelType::RotationX | ChannelType::RotationY | ChannelType::RotationZ => true,
_ => false,
}
}
/// Returns `true` if this channel corresponds to a positional
/// transform, otherwise `false`.
///
/// # Example
///
/// ```
/// # use bvh_anim::ChannelType;
/// let channel_type = ChannelType::PositionZ;
/// assert!(channel_type.is_position());
/// ```
#[inline]
pub fn is_position(&self) -> bool {
!self.is_rotation()
}
/// Get the `Axis` about which this `Channel` transforms.
///
/// # Example
///
/// ```
/// # use bvh_anim::{Axis, ChannelType};
/// let channel_type = ChannelType::PositionX;
/// assert_eq!(channel_type.axis(), Axis::X);
/// ```
#[inline]
pub fn axis(&self) -> Axis {
match *self {
ChannelType::RotationX | ChannelType::PositionX => Axis::X,
ChannelType::RotationY | ChannelType::PositionY => Axis::Y,
ChannelType::RotationZ | ChannelType::PositionZ => Axis::Z,
}
}
/// Returns the `Vector3` of the channel axis. See the [`Axis::vector`]
/// [`Axis::vector`] method for more info.
///
/// [`Axis::vector`]: enum.Axis.html#method.vector
#[inline]
// @TODO: remove `Clone` bound when
// https://github.com/kvark/mint/commit/8c6c501e442152e776a17322dff10e723bf0eeda
// is published
pub fn axis_vector<T: Clone + One + Zero>(&self) -> Vector3<T> {
self.axis().vector::<T>()
}
/// Returns the string representation of the `ChannelType`.
#[inline]
pub fn as_str(&self) -> &'static str {
match *self {
ChannelType::RotationX => "Xrotation",
ChannelType::RotationY => "Yrotation",
ChannelType::RotationZ => "Zrotation",
ChannelType::PositionX => "Xposition",
ChannelType::PositionY => "Yposition",
ChannelType::PositionZ => "Zposition",
}
}
/// Returns the string representation of the `ChannelType`.
#[inline]
pub fn as_bstr(&self) -> &'static BStr {
<&BStr>::from(self.as_str())
}
}
impl TryFrom<&'_ BStr> for ChannelType {
type Error = ParseChannelError;
#[inline]
fn try_from(string: &BStr) -> Result<Self, Self::Error> {
ChannelType::from_bytes(string)
}
}
impl TryFrom<&'_ [u8]> for ChannelType {
type Error = ParseChannelError;
#[inline]
fn try_from(string: &[u8]) -> Result<Self, Self::Error> {
ChannelType::from_bytes(string)
}
}
impl TryFrom<&'_ str> for ChannelType {
type Error = ParseChannelError;
#[inline]
fn try_from(string: &str) -> Result<Self, Self::Error> {
ChannelType::from_str(string)
}
}
impl FromStr for ChannelType {
type Err = ParseChannelError;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> {
ChannelType::from_bytes(s)
}
}
impl fmt::Display for ChannelType {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// An enum which represents an axis along a direction in 3D space.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Axis {
/// `x` axis.
X,
/// `y` axis.
Y,
/// `z` axis.
Z,
}
impl Axis {
/// Returns the `Vector3` which represents the axis.
///
/// # Examples
///
/// ```
/// # use bvh_anim::Axis;
/// assert_eq!(Axis::X.vector(), [1.0, 0.0, 0.0].into());
/// assert_eq!(Axis::Y.vector(), [0.0, 1.0, 0.0].into());
/// assert_eq!(Axis::Z.vector(), [0.0, 0.0, 1.0].into());
/// ```
#[inline]
// @TODO: remove `Clone` bound when
// https://github.com/kvark/mint/commit/8c6c501e442152e776a17322dff10e723bf0eeda
// is published
pub fn vector<T: Clone + One + Zero>(&self) -> Vector3<T> {
let (_1, _0) = (one, zero);
match *self {
Axis::X => [_1(), _0(), _0()].into(),
Axis::Y => [_0(), _1(), _0()].into(),
Axis::Z => [_0(), _0(), _1()].into(),
}
}
}
impl fmt::Display for Axis {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match *self {
Axis::X => "x",
Axis::Y => "y",
Axis::Z => "z",
};
f.write_str(s)
}
}
/// An iterator over the frames of a `Bvh`.
#[derive(Debug)]
pub struct Frames<'a> {
motion_values: &'a [f32],
num_channels: usize,
num_frames: usize,
curr_frame: usize,
}
impl Frames<'_> {
/// Returns the number of `Frame`s left to iterate over.
#[inline]
pub const fn len(&self) -> usize {
self.num_frames - self.curr_frame
}
/// Returns `true` if the number of `Frame`s left to iterate over is `0`.
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<'a> Iterator for Frames<'a> {
type Item = &'a Frame;
fn next(&mut self) -> Option<Self::Item> {
let range = frames_iter_logic(self.num_channels, self.num_frames, self.curr_frame)?;
self.curr_frame += 1;
Some(Frame::from_slice(&self.motion_values[range]))
}
}
/// A mutable iterator over the frames of a `Bvh`.
#[derive(Debug)]
pub struct FramesMut<'a> {
motion_values: &'a mut [f32],
num_channels: usize,
num_frames: usize,
curr_frame: usize,
}
impl FramesMut<'_> {
/// Returns the number of `Frame`s left to iterate over.
#[inline]
pub const fn len(&self) -> usize {
self.num_frames - self.curr_frame
}
/// Returns `true` if the number of `Frame`s left to iterate over is `0`.
#[inline]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<'a> Iterator for FramesMut<'a> {
type Item = &'a mut Frame;
fn next(&mut self) -> Option<Self::Item> {
let range = frames_iter_logic(self.num_channels, self.num_frames, self.curr_frame)?;
self.curr_frame += 1;
unsafe {
// Cast the anonymous lifetime to the 'a lifetime to avoid E0495.
// @TODO: is this safe?
Some(mem::transmute::<&mut Frame, &'a mut Frame>(
Frame::from_mut_slice(&mut self.motion_values[range]),
))
}
}
}
#[inline(always)]
fn frames_iter_logic(
num_channels: usize,
num_frames: usize,
curr_frame: usize,
) -> Option<Range<usize>> {
if num_frames == 0 || curr_frame >= num_frames {
return None;
}
let start = curr_frame * num_channels;
let end = start + num_channels;
Some(Range { start, end })
}
/// A wrapper for a slice of motion values, so that they can be indexed by `Channel`.
#[derive(PartialEq)]
pub struct Frame([f32]);
impl fmt::Debug for Frame {
#[inline]
fn fmt(&self, fmtr: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.0, fmtr)
}
}
impl Frame {
#[inline]
fn from_slice<'a>(frame_motions: &'a [f32]) -> &'a Frame {
unsafe { &*(frame_motions as *const [f32] as *const Frame) }
}
#[inline]
fn from_mut_slice<'a>(frame_motions: &'a mut [f32]) -> &'a mut Frame {
unsafe { &mut *(frame_motions as *mut [f32] as *mut Frame) }
}
/// Returns the number of motion values in the `Frame`.
#[inline]
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns true if the `Frame` has a length of 0.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns a reference to the motion element corresponding to `Channel`, or `None`
/// if out of bounds.
#[inline]
pub fn get(&self, channel: &Channel) -> Option<&f32> {
self.0.get(channel.motion_index)
}
/// Returns a mutable reference to the motion element corresponding to `Channel`,
/// or `None` if out of bounds.
#[inline]
pub fn get_mut(&mut self, channel: &Channel) -> Option<&mut f32> {
self.0.get_mut(channel.motion_index)
}
/// Get the `Frame` as a slice of `f32` values.
pub fn as_slice(&self) -> &[f32] {
&self.0[..]
}
/// Get the `Frame` as a mutable slice of `f32` values.
pub fn as_mut_slice(&mut self) -> &mut [f32] {
&mut self.0[..]
}
}
impl Index<&Channel> for Frame {
type Output = f32;
#[inline]
fn index(&self, channel: &Channel) -> &Self::Output {
self.0.index(channel.motion_index)
}
}
impl IndexMut<&Channel> for Frame {
fn index_mut(&mut self, channel: &Channel) -> &mut Self::Output {
self.0.index_mut(channel.motion_index)
}
}
const NSEC_FACTOR: f64 = 1000_000_000.0;
#[inline]
fn fraction_seconds_to_duration(x: f64) -> Duration {
Duration::from_nanos((x * NSEC_FACTOR) as u64)
}
#[inline]
fn duation_to_fractional_seconds(duration: &Duration) -> f64 {
duration.subsec_nanos() as f64 / NSEC_FACTOR
}
|
use std::path::Path;
use gfapi_sys::gluster::*;
use libc::{
timespec, O_APPEND, O_CREAT, O_RDWR, O_TRUNC, SEEK_SET, S_IRGRP, S_IROTH, S_IRUSR, S_IWUSR,
S_IXUSR,
};
fn main() {
let cluster = match Gluster::connect("test", "localhost", 24007) {
Ok(c) => c,
Err(e) => {
println!("connection failed: {:?}", e);
return;
}
};
// mkdir with 644 permissions ( User Read + Write, Group Read, Other Read)
match cluster.mkdir(&Path::new("gfapi"), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH | S_IXUSR) {
Ok(_) => println!("mkdir gfapi success"),
Err(e) => {
println!("mkdir failed: {:?}", e);
}
}
let file_handle = match cluster.create(
&Path::new("gfapi/test"),
O_CREAT | O_RDWR | O_TRUNC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH,
) {
Ok(file_handle) => file_handle,
Err(e) => {
println!("create file failed: {:?}", e);
return;
}
};
match file_handle.write(b"hello world", O_APPEND) {
Ok(bytes_written) => {
println!("Wrote {} bytes", bytes_written);
}
Err(e) => {
println!("writing to file failed: {:?}", e);
return;
}
};
match file_handle.lseek(0, SEEK_SET) {
Ok(_) => {
println!("Seek back to 0");
}
Err(e) => {
println!("Seeking in file failed: {:?}", e);
return;
}
};
let mut read_buff: Vec<u8> = Vec::with_capacity(1024);
match file_handle.read(&mut read_buff, 1024, 0) {
Ok(bytes_read) => {
println!("Read {} bytes", bytes_read);
read_buff.truncate(bytes_read as usize);
println!("Contents: {:?}", read_buff);
}
Err(e) => {
println!("writing to file failed: {:?}", e);
return;
}
};
// Zero out the access and modified times
println!("Setting access and modified times");
let file_times = [
timespec {
tv_sec: 0,
tv_nsec: 0,
},
timespec {
tv_sec: 0,
tv_nsec: 0,
},
];
cluster
.utimens(&Path::new("gfapi/test"), &file_times)
.unwrap();
let d = cluster.opendir(&Path::new("gfapi")).unwrap();
for dir_entry in d {
println!("Dir_entry: {:?}", dir_entry);
}
}
|
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
pub enum LogLevel {
Trace,
Debug,
Info,
Warn,
Error,
} |
use crate::geometry::*;
use ordered_float::OrderedFloat;
use approx::relative_eq;
trait SliceUp: Sized {
type PerSlice: Iterator<Item = Self>;
type Out: Iterator<Item = Self::PerSlice>;
fn slice_up_x(&self, planes: PlaneSet) -> Self::Out;
fn slice_up_y(&self, planes: PlaneSet) -> Self::Out;
}
type LineIter = ::std::option::IntoIter<Line>;
#[derive(Debug)]
struct LineSliceIter {
l: Line,
m: f32,
c: f32,
planes: PlaneSet,
i: usize,
}
impl Iterator for LineSliceIter {
type Item = LineIter;
fn next(&mut self) -> Option<LineIter> {
if self.i >= self.planes.count {
return None;
}
if self.m == 0.0 {
self.i += 1;
return Some(Some(self.l).into_iter());
}
let lower = self.i as f32;
let upper = lower + 1.0;
let lower_d = self.planes.start + self.planes.step * lower;
let upper_d = self.planes.start + self.planes.step * upper;
let mut lower_t = (lower_d - self.c) / self.m;
let mut upper_t = (upper_d - self.c) / self.m;
lower_t = lower_t.max(0.0).min(1.0);
upper_t = upper_t.max(0.0).min(1.0);
if self.m < 0.0 {
::std::mem::swap(&mut lower_t, &mut upper_t);
}
self.i += 1;
if !relative_eq!(lower_t, upper_t) {
let p = &self.l.p;
let v = p[1] - p[0];
Some(
Some(Line {
p: [p[0] + v * lower_t, p[0] + v * upper_t],
})
.into_iter(),
)
} else {
Some(None.into_iter())
}
}
}
impl SliceUp for Line {
type PerSlice = LineIter;
type Out = LineSliceIter;
fn slice_up_x(&self, planes: PlaneSet) -> LineSliceIter {
let p = &self.p;
LineSliceIter {
l: *self,
planes,
i: 0,
m: p[1].x - p[0].x,
c: p[0].x,
}
}
fn slice_up_y(&self, planes: PlaneSet) -> LineSliceIter {
let p = &self.p;
LineSliceIter {
l: *self,
planes,
i: 0,
m: p[1].y - p[0].y,
c: p[0].y,
}
}
}
type CurveIter = arrayvec::IntoIter<[Curve; 2]>;
struct CurveSliceIter {
curve: Curve,
planes: PlaneSet,
i: usize,
a: f32,
b: f32,
c_shift: f32,
}
impl Iterator for CurveSliceIter {
type Item = CurveIter;
fn next(&mut self) -> Option<Self::Item> {
use crate::geometry::solve_quadratic_real as solve;
use crate::geometry::Cut;
use crate::geometry::RealQuadraticSolution as RQS;
use arrayvec::ArrayVec;
if self.i >= self.planes.count {
return None;
}
let lower = self.i as f32;
self.i += 1;
let upper = lower + self.planes.step;
let lower_d = self.planes.start + self.planes.step * lower;
let upper_d = self.planes.start + self.planes.step * upper;
let l_sol = solve(self.a, self.b, self.c_shift - lower_d);
let u_sol = solve(self.a, self.b, self.c_shift - upper_d);
let mut result = ArrayVec::<[Curve; 2]>::new();
match (l_sol.in_order(), u_sol.in_order()) {
(RQS::Two(a, b), RQS::Two(c, d)) => {
// Two pieces
let (a, b, c, d) = if self.a > 0.0 {
(c, a, b, d)
} else {
(a, c, d, b)
};
let (a, b, c, d) = (
a.min(1.0).max(0.0),
b.min(1.0).max(0.0),
c.min(1.0).max(0.0),
d.min(1.0).max(0.0),
);
if !relative_eq!(a, b) {
result.push(self.curve.cut_from_to(a, b));
}
if !relative_eq!(c, d) {
result.push(self.curve.cut_from_to(c, d));
}
}
(RQS::Two(a, b), RQS::None)
| (RQS::Two(a, b), RQS::Touch(_))
| (RQS::None, RQS::Two(a, b))
| (RQS::Touch(_), RQS::Two(a, b))
| (RQS::One(a), RQS::One(b)) => {
// One piece
let (a, b) = if a > b { (b, a) } else { (a, b) };
let a = a.min(1.0).max(0.0);
let b = b.min(1.0).max(0.0);
if !relative_eq!(a, b) {
result.push(self.curve.cut_from_to(a, b));
}
}
(RQS::All, RQS::None) | (RQS::None, RQS::All) => {
// coincident with one plane
result.push(self.curve);
}
(RQS::None, RQS::None) => {
if self.a == 0.0
&& self.b == 0.0
&& self.c_shift >= lower_d
&& self.c_shift <= upper_d
{
// parallel to planes, inbetween
result.push(self.curve);
}
}
_ => unreachable!(), // impossible
}
Some(result.into_iter())
}
}
#[derive(Debug)]
struct PlaneSet {
start: f32,
step: f32,
count: usize,
}
impl SliceUp for Curve {
type PerSlice = CurveIter;
type Out = CurveSliceIter;
fn slice_up_x(&self, planes: PlaneSet) -> CurveSliceIter {
let p = &self.p;
CurveSliceIter {
curve: *self,
planes,
i: 0,
a: p[0].x - 2.0 * p[1].x + p[2].x,
b: 2.0 * (p[1].x - p[0].x),
c_shift: p[0].x,
}
}
fn slice_up_y(&self, planes: PlaneSet) -> CurveSliceIter {
let p = &self.p;
CurveSliceIter {
curve: *self,
planes,
i: 0,
a: p[0].y - 2.0 * p[1].y + p[2].y,
b: 2.0 * (p[1].y - p[0].y),
c_shift: p[0].y,
}
}
}
pub fn rasterize<O: FnMut(u32, u32, f32)>(
lines: &[Line],
curves: &[Curve],
width: u32,
height: u32,
mut output: O,
) {
let mut lines: Vec<_> = lines.iter().map(|&l| (l, l.bounding_box())).collect();
lines.sort_by_key(|&(_, ref a)| OrderedFloat(a.min.y));
let mut curves: Vec<_> = curves.iter().map(|&c| (c, c.bounding_box())).collect();
curves.sort_by_key(|&(_, ref a)| OrderedFloat(a.min.y));
let mut y = 0;
let mut next_line = 0;
let mut next_curve = 0;
let mut active_lines_y = Vec::new();
let mut active_curves_y = Vec::new();
let mut active_lines_x = Vec::new();
let mut active_curves_x = Vec::new();
let mut scanline_lines = Vec::new();
let mut lines_to_remove = Vec::new();
let mut scanline_curves = Vec::new();
let mut curves_to_remove = Vec::new();
while y < height
&& (next_line != lines.len()
|| next_curve != curves.len()
|| !active_lines_y.is_empty()
|| !active_curves_y.is_empty())
{
let lower = y as f32;
let upper = (y + 1) as f32;
// Add newly active segments
for &(ref line, ref bb) in lines[next_line..].iter().take_while(|p| p.1.min.y < upper) {
let planes = PlaneSet {
start: lower,
step: 1.0,
count: (bb.max.y.ceil() - lower).max(1.0) as usize,
};
active_lines_y.push(line.slice_up_y(planes));
next_line += 1;
}
for &(ref curve, ref bb) in curves[next_curve..]
.iter()
.take_while(|p| p.1.min.y < upper)
{
let planes = PlaneSet {
start: lower,
step: 1.0,
count: (bb.max.y.ceil() - lower).max(1.0) as usize,
};
active_curves_y.push(curve.slice_up_y(planes));
next_curve += 1;
}
// get y sliced segments for this scanline
scanline_lines.clear();
scanline_curves.clear();
for (k, itr) in active_lines_y.iter_mut().enumerate() {
if let Some(itr) = itr.next() {
for line in itr {
scanline_lines.push((line, line.x_bounds()))
}
} else {
lines_to_remove.push(k);
}
}
for (k, itr) in active_curves_y.iter_mut().enumerate() {
if let Some(itr) = itr.next() {
for curve in itr {
scanline_curves.push((curve, curve.x_bounds()))
}
} else {
curves_to_remove.push(k);
}
}
// remove deactivated segments
for k in lines_to_remove.drain(..).rev() {
active_lines_y.swap_remove(k);
}
for k in curves_to_remove.drain(..).rev() {
active_curves_y.swap_remove(k);
}
// sort scanline for traversal
scanline_lines.sort_by_key(|a| OrderedFloat((a.1).0));
scanline_curves.sort_by_key(|a| OrderedFloat((a.1).0));
// Iterate through x, slice scanline segments into each cell.
// Evaluate, accumulate and output.
{
let mut next_line = 0;
let mut next_curve = 0;
let mut x = 0;
let mut acc = 0.0;
active_lines_x.clear();
active_curves_x.clear();
while x < width
&& (next_line != scanline_lines.len()
|| next_curve != scanline_curves.len()
|| !active_lines_x.is_empty()
|| !active_curves_x.is_empty())
{
let offset = vector(x as f32, y as f32);
let lower = x as f32;
let upper = (x + 1) as f32;
//add newly active segments
for &(ref line, (_, ref max)) in scanline_lines[next_line..]
.iter()
.take_while(|p| (p.1).0 < upper)
{
let planes = PlaneSet {
start: lower,
step: 1.0,
count: (max.ceil() - lower).max(1.0) as usize,
};
active_lines_x.push(line.slice_up_x(planes));
next_line += 1;
}
for &(ref curve, (_, ref max)) in scanline_curves[next_curve..]
.iter()
.take_while(|p| (p.1).0 < upper)
{
let planes = PlaneSet {
start: lower,
step: 1.0,
count: (max.ceil() - lower).max(1.0) as usize,
};
active_curves_x.push(curve.slice_up_x(planes));
next_curve += 1;
}
//process x sliced segments for this pixel
let mut pixel_value = acc;
let mut pixel_acc = 0.0;
for (k, itr) in active_lines_x.iter_mut().enumerate() {
if let Some(itr) = itr.next() {
for mut line in itr {
let p = &mut line.p;
p[0] = p[0] - offset;
p[1] = p[1] - offset;
let a = p[0].y - p[1].y;
let v = (1.0 - (p[0].x + p[1].x) * 0.5) * a;
pixel_value += v;
pixel_acc += a;
}
} else {
lines_to_remove.push(k);
}
}
for (k, itr) in active_curves_x.iter_mut().enumerate() {
if let Some(itr) = itr.next() {
for mut curve in itr {
let p = &mut curve.p;
p[0] = p[0] - offset;
p[1] = p[1] - offset;
p[2] = p[2] - offset;
let a = p[0].y - p[2].y;
let b = p[0].y - p[1].y;
let c = p[1].y - p[2].y;
let v = (b * (6.0 - 3.0 * p[0].x - 2.0 * p[1].x - p[2].x)
+ c * (6.0 - p[0].x - 2.0 * p[1].x - 3.0 * p[2].x))
/ 6.0;
pixel_value += v;
pixel_acc += a;
}
} else {
curves_to_remove.push(k);
}
}
//output
output(x, y, pixel_value.abs());
acc += pixel_acc;
// remove deactivated segments
for k in lines_to_remove.drain(..).rev() {
active_lines_x.swap_remove(k);
}
for k in curves_to_remove.drain(..).rev() {
active_curves_x.swap_remove(k);
}
x += 1;
}
// fill remaining pixels
for x in x..width {
output(x, y, acc.abs());
}
}
y += 1;
}
// fill remaining scanlines with 0.0
for y in y..height {
for x in 0..width {
output(x, y, 0.0);
}
}
}
|
#![cfg_attr(not(feature = "std"), no_std)]
//! **WARNING**: v0.1.x is incompatible with v0.2.x onwards.
//!
//! API to use files as a lock. Supports non-std crates by disabling feature
//! `std`.
//!
//! # Types
//! Currently, only one type is provided: [`LockFile`]. It does not destroy the
//! file after closed. Locks are per-handle and not by per-process in any
//! platform. On Unix, however, under `fork` file descriptors might be
//! duplicated sharing the same lock, but `fork` is usually `unsafe` in Rust.
//!
//! # Example
//! ```
//! use fslock::LockFile;
//! fn main() -> Result<(), fslock::Error> {
//!
//! let mut file = LockFile::open("testfiles/mylock.lock")?;
//! file.lock()?;
//! do_stuff();
//! file.unlock()?;
//!
//! Ok(())
//! }
//! # fn do_stuff() {
//! # // doing stuff here.
//! # }
//! ```
#[cfg(test)]
mod test;
#[cfg(unix)]
mod unix;
#[cfg(unix)]
use crate::unix as sys;
mod string;
mod fmt;
#[cfg(windows)]
mod windows;
#[cfg(windows)]
use crate::windows as sys;
pub use crate::{
string::{EitherOsStr, IntoOsString, ToOsStr},
sys::{Error, OsStr, OsString},
};
#[derive(Debug)]
/// A handle to a file that is lockable. Does not delete the file. On both
/// Unix and Windows, the lock is held by an individual handle, and not by the
/// whole process. On Unix, however, under `fork` file descriptors might be
/// duplicated sharing the same lock, but `fork` is usually `unsafe` in Rust.
///
/// # Example
/// ```
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/mylock.lock")?;
/// file.lock()?;
/// do_stuff();
/// file.unlock()?;
///
/// # Ok(())
/// # }
/// # fn do_stuff() {
/// # // doing stuff here.
/// # }
/// ```
pub struct LockFile {
locked: bool,
desc: sys::FileDesc,
}
impl LockFile {
/// Opens a file for locking, with OS-dependent locking behavior. On Unix,
/// if the path is nul-terminated (ends with 0), no extra allocation will be
/// made.
///
/// # Compatibility
///
/// This crate used to behave differently in regards to Unix and Windows,
/// when locks on Unix were per-process and not per-handle. However, the
/// current version locks per-handle on any platform. On Unix, however,
/// under `fork` file descriptors might be duplicated sharing the same lock,
/// but `fork` is usually `unsafe` in Rust.
///
/// # Panics
/// Panics if the path contains a nul-byte in a place other than the end.
///
/// # Example
///
/// ```
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/regular.lock")?;
///
/// # Ok(())
/// # }
/// ```
///
/// # Panicking Example
///
/// ```should_panic
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("my\0lock")?;
///
/// # Ok(())
/// # }
/// ```
pub fn open<P>(path: &P) -> Result<Self, Error>
where
P: ToOsStr + ?Sized,
{
let path = path.to_os_str()?;
let desc = sys::open(path.as_ref())?;
Ok(Self { locked: false, desc })
}
/// Locks this file. Blocks while it is not possible to lock (i.e. someone
/// else already owns a lock). After locked, if no attempt to unlock is
/// made, it will be automatically unlocked on the file handle drop.
///
/// # Panics
/// Panics if this handle already owns the file.
///
/// # Example
///
/// ```
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/target.lock")?;
/// file.lock()?;
/// do_stuff();
/// file.unlock()?;
///
/// # Ok(())
/// # }
/// # fn do_stuff() {
/// # // doing stuff here.
/// # }
/// ```
///
/// # Panicking Example
///
/// ```should_panic
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/panicking.lock")?;
/// file.lock()?;
/// file.lock()?;
///
/// # Ok(())
/// # }
/// ```
pub fn lock(&mut self) -> Result<(), Error> {
if self.locked {
panic!("Cannot lock if already owning a lock");
}
sys::lock(self.desc)?;
self.locked = true;
Ok(())
}
/// Locks this file and writes this process's PID into the file, which will
/// be erased on unlock. Like [`LockFile::lock`], blocks while it is not
/// possible to lock. After locked, if no attempt to unlock is made, it will
/// be automatically unlocked on the file handle drop.
///
/// # Panics
/// Panics if this handle already owns the file.
///
/// # Example
///
/// ```
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
/// # #[cfg(feature = "std")]
/// use std::fs::read_to_string;
///
/// let mut file = LockFile::open("testfiles/withpid.lock")?;
/// file.lock_with_pid()?;
/// # #[cfg(feature = "std")]
/// # {
/// do_stuff()?;
/// # }
/// file.unlock()?;
///
/// # #[cfg(feature = "std")]
/// fn do_stuff() -> Result<(), fslock::Error> {
/// let mut content = read_to_string("testfiles/withpid.lock")?;
/// assert!(content.trim().len() > 0);
/// assert!(content.trim().chars().all(|ch| ch.is_ascii_digit()));
/// Ok(())
/// }
///
/// # Ok(())
/// # }
/// ```
pub fn lock_with_pid(&mut self) -> Result<(), Error> {
if let Err(error) = self.lock() {
return Err(error);
}
let result = writeln!(fmt::Writer(self.desc), "{}", sys::pid());
if result.is_err() {
let _ = self.unlock();
}
result
}
/// Locks this file. Does NOT block if it is not possible to lock (i.e.
/// someone else already owns a lock). After locked, if no attempt to
/// unlock is made, it will be automatically unlocked on the file handle
/// drop.
///
/// # Panics
/// Panics if this handle already owns the file.
///
/// # Example
///
/// ```
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/attempt.lock")?;
/// if file.try_lock()? {
/// do_stuff();
/// file.unlock()?;
/// }
///
/// # Ok(())
/// # }
/// # fn do_stuff() {
/// # // doing stuff here.
/// # }
/// ```
///
/// # Panicking Example
///
/// ```should_panic
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/attempt_panic.lock")?;
/// file.lock()?;
/// file.try_lock()?;
///
/// # Ok(())
/// # }
/// ```
pub fn try_lock(&mut self) -> Result<bool, Error> {
if self.locked {
panic!("Cannot lock if already owning a lock");
}
let lock_result = sys::try_lock(self.desc);
if let Ok(true) = lock_result {
self.locked = true;
}
lock_result
}
/// Locks this file and writes this process's PID into the file, which will
/// be erased on unlock. Does NOT block if it is not possible to lock (i.e.
/// someone else already owns a lock). After locked, if no attempt to
/// unlock is made, it will be automatically unlocked on the file handle
/// drop.
///
/// # Panics
/// Panics if this handle already owns the file.
///
/// # Example
///
/// ```
/// # #[cfg(feature = "std")]
/// # use std::fs::read_to_string;
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/pid_attempt.lock")?;
/// if file.try_lock_with_pid()? {
/// # #[cfg(feature = "std")]
/// # {
/// do_stuff()?;
/// # }
/// file.unlock()?;
/// }
///
/// # Ok(())
/// # }
/// # #[cfg(feature = "std")]
/// fn do_stuff() -> Result<(), fslock::Error> {
/// let mut content = read_to_string("testfiles/pid_attempt.lock")?;
/// assert!(content.trim().len() > 0);
/// assert!(content.trim().chars().all(|ch| ch.is_ascii_digit()));
/// Ok(())
/// }
/// ```
///
/// # Panicking Example
///
/// ```should_panic
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/pid_attempt_panic.lock")?;
/// file.lock_with_pid()?;
/// file.try_lock_with_pid()?;
///
/// # Ok(())
/// # }
/// ```
pub fn try_lock_with_pid(&mut self) -> Result<bool, Error> {
match self.try_lock() {
Ok(true) => (),
Ok(false) => return Ok(false),
Err(error) => return Err(error),
}
let result = sys::truncate(self.desc)
.and_then(|_| writeln!(fmt::Writer(self.desc), "{}", sys::pid()));
if result.is_err() {
let _ = self.unlock();
}
result.map(|_| true)
}
/// Returns whether this file handle owns the lock.
///
/// # Example
/// ```
/// use fslock::LockFile;
/// # fn main() -> Result<(), fslock::Error> {
///
/// let mut file = LockFile::open("testfiles/maybeowned.lock")?;
/// do_stuff_with_lock(&mut file);
/// if !file.owns_lock() {
/// file.lock()?;
/// do_stuff();
/// file.unlock()?;
/// }
///
/// # Ok(())
/// # }
/// # fn do_stuff_with_lock(_lock: &mut LockFile) {
/// # // doing stuff here.
/// # }
/// # fn do_stuff() {
/// # // doing stuff here.
/// # }
/// ```
pub fn owns_lock(&self) -> bool {
self.locked
}
/// Unlocks this file. This file handle must own the file lock. If not
/// called manually, it is automatically called on `drop`.
///
/// # Panics
/// Panics if this handle does not own the file.
///
/// # Example
///
/// ```
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/endinglock.lock")?;
/// file.lock()?;
/// do_stuff();
/// file.unlock()?;
///
/// # Ok(())
/// # }
/// # fn do_stuff() {
/// # // doing stuff here.
/// # }
/// ```
///
/// # Panicking Example
///
/// ```should_panic
/// # fn main() -> Result<(), fslock::Error> {
/// use fslock::LockFile;
///
/// let mut file = LockFile::open("testfiles/endinglock.lock")?;
/// file.unlock()?;
///
/// # Ok(())
/// # }
/// ```
pub fn unlock(&mut self) -> Result<(), Error> {
if !self.locked {
panic!("Attempted to unlock already unlocked lockfile");
}
self.locked = false;
sys::unlock(self.desc)?;
sys::truncate(self.desc)?;
Ok(())
}
}
impl Drop for LockFile {
fn drop(&mut self) {
if self.locked {
let _ = self.unlock();
}
sys::close(self.desc);
}
}
// Safe because:
// 1. We never actually access the contents of the pointer that represents the
// Windows Handle.
//
// 2. We require a mutable reference to actually mutate the file
// system.
#[cfg(windows)]
unsafe impl Send for LockFile {}
#[cfg(windows)]
unsafe impl Sync for LockFile {}
|
#[path = "with_async_true/with_info_false.rs"]
mod with_info_false;
#[path = "with_async_true/with_info_true.rs"]
mod with_info_true;
#[path = "with_async_true/without_info.rs"]
mod without_info;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.