text stringlengths 8 4.13M |
|---|
use rustler::schedule::SchedulerFlags::*;
mod atoms;
mod backend;
rustler::rustler_exports_nifs!(
"Elixir.Backend",
{
{"from_bytes", 1, backend::from_bytes, DirtyIo},
{"resize", 3, mirage::resizem DirtyCpu},
},
Some(load)
);
fn load(env: rustler::Env, _info: rustler::Term) -> bool {
backend::load(env)
} |
use argh::FromArgs;
use env_logger::{Builder, Target};
use serde::{Deserialize, Serialize};
/// Server options
#[derive(Clone, Debug, Serialize, Deserialize, FromArgs)]
pub struct RemitsConfig {
#[argh(option, short = 'p')]
/// what port to start remits on
pub port: Option<String>,
// v can change dont care
#[argh(option, short = 'v')]
/// verbosity of logs
pub log_level: Option<String>,
#[argh(option, short = 'd')]
/// directory that contains the db
pub db_path: Option<String>,
}
impl RemitsConfig {
fn new() -> Self {
confy::load("remits").expect("could not load config")
}
fn update_from_flags(&mut self) -> Self {
let flags: RemitsConfig = argh::from_env();
// This one must be first so debug logs work the rest of the way down
setup_logger(self.log_level.clone(), flags.log_level);
if flags.port.is_some() {
debug!(
"Replacing config option \"port\":{} with flag \"-p/--port\":{}",
self.port.as_ref().unwrap(),
flags.port.as_ref().unwrap()
);
self.port = flags.port;
}
if flags.db_path.is_some() {
self.db_path = flags.db_path;
}
self.clone()
}
pub fn addr(&self) -> String {
format!("0.0.0.0:{}", self.clone().port.expect("no port defined"))
}
}
impl ::std::default::Default for RemitsConfig {
fn default() -> Self {
Self {
port: Some("4242".into()),
log_level: Some("info".into()),
db_path: Some("/var/lib/remits".into()),
}
}
}
fn setup_logger(config_level: Option<String>, flag_level: Option<String>) {
let log_level = &flag_level.unwrap_or_else(|| {
config_level
.as_ref()
.unwrap_or(&"info".to_owned())
.to_string()
});
Builder::new()
.parse_filters(log_level)
.target(Target::Stdout)
.format_timestamp_nanos()
.init();
info!("log level set to {}", log_level);
}
pub fn load() -> RemitsConfig {
RemitsConfig::new().update_from_flags()
}
|
use failure::Error;
use std::io::{self, BufRead};
pub fn get_text() -> Result<String, Error> {
println!("Text to encrypt ?");
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_line(&mut buffer)?;
Ok(buffer.trim_right().to_string())
}
pub fn get_cipher_text() -> Result<String, Error> {
println!("Encrypted text ?");
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_line(&mut buffer)?;
Ok(buffer.trim_right().to_string())
}
pub fn get_key() -> Result<String, Error> {
println!("Encryption key ?");
let mut buffer = String::new();
let stdin = io::stdin();
let mut handle = stdin.lock();
handle.read_line(&mut buffer)?;
Ok(buffer.trim_right().to_string())
}
|
extern crate procfs;
use std::collections::HashMap;
fn main() {
let mut host = HashMap::new();
let cpu_keys = vec![
"model name",
"cpu cores",
"cache size",
];
let _mem_keys = vec![
"mem_total",
"swap_total",
];
let mut cpu_details = HashMap::new();
let mut mem_details = HashMap::new();
let cpu = procfs::CpuInfo::new();
match cpu {
Ok(x) => cpu_fields(&mut cpu_details, &cpu_keys, &x.fields),
Err(_e) => println!("{:?}", "Couldn't fetch CPU info!")
}
let mem = procfs::Meminfo::new();
match mem {
Ok(x) => mem_info(&mut mem_details, &x),
Err(_e) => println!("{:?}", "Couldn't fetch CPU info!")
}
host.insert("cpu", cpu_details);
host.insert("mem", mem_details);
println!("{:#?}", host);
let client = reqwest::blocking::Client::new();
let res = client.post("http://0.0.0.0:9090")
.json(&host)
.send();
println!("{:?}", res);
}
fn cpu_fields(host_info: &mut HashMap<String, String>, keys: &Vec<&str>, cf: &HashMap<String, String>) {
for key in keys.iter() {
if cf.contains_key(&key.to_string()) {
let kv = cf.get(&key.to_string());
match kv {
Some(x) => host_info.insert(key.to_string(), x.to_string()),
None => host_info.insert(key.to_string(), "".to_string()),
};
} else {
println!("{:?}", "No matching keys found!");
}
}
}
fn mem_info(host_info: &mut HashMap<String, String>, cf: &procfs::Meminfo) {
host_info.insert("mem_total".to_string(), cf.mem_total.to_string());
host_info.insert("swap_total".to_string(), cf.swap_total.to_string());
}
|
pub use markers::*;
pub use ops::*;
mod markers {
pub trait Tuple0 {}
impl Tuple0 for () {}
pub trait Tuple1 {}
impl<E0> Tuple1 for (E0,) {}
pub trait Tuple2 {}
impl<E0, E1> Tuple2 for (E0, E1) {}
pub trait Tuple3 {}
impl<E0, E1, E2> Tuple3 for (E0, E1, E2) {}
pub trait Tuple4 {}
impl<E0, E1, E2, E3> Tuple4 for (E0, E1, E2, E3) {}
}
mod ops {
pub trait Get0 {
type Output;
}
pub trait Get1 {
type Output;
}
pub trait Get2 {
type Output;
}
pub trait Get3 {
type Output;
}
// 1-tuple
impl<E0> Get0 for (E0,) {
type Output = E0;
}
// 2-tuple
impl<E0, E1> Get0 for (E0, E1) {
type Output = E0;
}
impl<E0, E1> Get1 for (E0, E1) {
type Output = E1;
}
// 3-tuple
impl<E0, E1, E2> Get0 for (E0, E1, E2) {
type Output = E0;
}
impl<E0, E1, E2> Get1 for (E0, E1, E2) {
type Output = E1;
}
impl<E0, E1, E2> Get2 for (E0, E1, E2) {
type Output = E2;
}
// 4-tuple
impl<E0, E1, E2, E3> Get0 for (E0, E1, E2, E3) {
type Output = E0;
}
impl<E0, E1, E2, E3> Get1 for (E0, E1, E2, E3) {
type Output = E1;
}
impl<E0, E1, E2, E3> Get2 for (E0, E1, E2, E3) {
type Output = E2;
}
impl<E0, E1, E2, E3> Get3 for (E0, E1, E2, E3) {
type Output = E3;
}
}
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![deny(warnings)]
extern crate build_helper;
extern crate cc;
use build_helper::native_lib_boilerplate;
use std::env;
use std::fs::File;
fn main() {
let target = env::var("TARGET").expect("TARGET was not set");
if cfg!(feature = "backtrace") &&
!target.contains("cloudabi") &&
!target.contains("emscripten") &&
!target.contains("msvc") &&
!target.contains("wasm32")
{
let _ = build_libbacktrace(&target);
}
if target.contains("linux") {
if target.contains("android") {
println!("cargo:rustc-link-lib=dl");
println!("cargo:rustc-link-lib=log");
println!("cargo:rustc-link-lib=gcc");
} else if !target.contains("musl") {
println!("cargo:rustc-link-lib=dl");
println!("cargo:rustc-link-lib=rt");
println!("cargo:rustc-link-lib=pthread");
}
} else if target.contains("freebsd") {
println!("cargo:rustc-link-lib=execinfo");
println!("cargo:rustc-link-lib=pthread");
} else if target.contains("dragonfly") || target.contains("bitrig") ||
target.contains("netbsd") || target.contains("openbsd") {
println!("cargo:rustc-link-lib=pthread");
} else if target.contains("solaris") {
println!("cargo:rustc-link-lib=socket");
println!("cargo:rustc-link-lib=posix4");
println!("cargo:rustc-link-lib=pthread");
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("apple-darwin") {
println!("cargo:rustc-link-lib=System");
// res_init and friends require -lresolv on macOS/iOS.
// See #41582 and http://blog.achernya.com/2013/03/os-x-has-silly-libsystem.html
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("apple-ios") {
println!("cargo:rustc-link-lib=System");
println!("cargo:rustc-link-lib=objc");
println!("cargo:rustc-link-lib=framework=Security");
println!("cargo:rustc-link-lib=framework=Foundation");
println!("cargo:rustc-link-lib=resolv");
} else if target.contains("windows") {
println!("cargo:rustc-link-lib=advapi32");
println!("cargo:rustc-link-lib=ws2_32");
println!("cargo:rustc-link-lib=userenv");
println!("cargo:rustc-link-lib=shell32");
} else if target.contains("fuchsia") {
println!("cargo:rustc-link-lib=zircon");
println!("cargo:rustc-link-lib=fdio");
} else if target.contains("cloudabi") {
if cfg!(feature = "backtrace") {
println!("cargo:rustc-link-lib=unwind");
}
println!("cargo:rustc-link-lib=c");
println!("cargo:rustc-link-lib=compiler_rt");
}
}
fn build_libbacktrace(target: &str) -> Result<(), ()> {
let native = native_lib_boilerplate("libbacktrace", "libbacktrace", "backtrace", "")?;
let mut build = cc::Build::new();
build
.flag("-fvisibility=hidden")
.include("../libbacktrace")
.include(&native.out_dir)
.out_dir(&native.out_dir)
.warnings(false)
.file("../libbacktrace/alloc.c")
.file("../libbacktrace/backtrace.c")
.file("../libbacktrace/dwarf.c")
.file("../libbacktrace/fileline.c")
.file("../libbacktrace/posix.c")
.file("../libbacktrace/read.c")
.file("../libbacktrace/sort.c")
.file("../libbacktrace/state.c");
if target.contains("darwin") {
build.file("../libbacktrace/macho.c");
} else if target.contains("windows") {
build.file("../libbacktrace/pecoff.c");
} else {
build.file("../libbacktrace/elf.c");
if target.contains("64") {
build.define("BACKTRACE_ELF_SIZE", "64");
} else {
build.define("BACKTRACE_ELF_SIZE", "32");
}
}
File::create(native.out_dir.join("backtrace-supported.h")).unwrap();
build.define("BACKTRACE_SUPPORTED", "1");
build.define("BACKTRACE_USES_MALLOC", "1");
build.define("BACKTRACE_SUPPORTS_THREADS", "0");
build.define("BACKTRACE_SUPPORTS_DATA", "0");
File::create(native.out_dir.join("config.h")).unwrap();
if !target.contains("apple-ios") &&
!target.contains("solaris") &&
!target.contains("redox") &&
!target.contains("android") &&
!target.contains("haiku") {
build.define("HAVE_DL_ITERATE_PHDR", "1");
}
build.define("_GNU_SOURCE", "1");
build.define("_LARGE_FILES", "1");
build.compile("backtrace");
Ok(())
}
|
// This file is part of linux-epoll. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/linux-epoll/master/COPYRIGHT. No part of linux-epoll, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2019 The developers of linux-epoll. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/linux-epoll/master/COPYRIGHT.
#[derive(Default, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct ResponseHeader
{
is_authenticated_data: bool,
is_authoritative_answer: bool,
is_recursion_available: bool,
}
pub enum Outcome
{
Normal,
DnsSecDataFailedAuthentication,
AuthoritativeServerReportsNoDomainButThisIsNotValidated,
}
struct RequestQuery<'a>
{
answer_section: Box<dyn ResourceRecordVisitor<'a>>,
}
pub struct OutstandingRequests<'a>
{
requests_by_identifier: HashMap<MessageIdentifier, (RequestQueryIdentification, RequestQuery<'a>)>
}
macro_rules! validate_number_of_entries_in_the_question_section_is_one
{
($message_header: ident) =>
{
{
let number_of_entries_in_the_question_section = $message_header.number_of_entries_in_the_question_section();
if unlikely!(number_of_entries_in_the_question_section != 1)
{
return Err(ResponseDoesNotContainExactlyOneQuestion(number_of_entries_in_the_question_section))
}
}
}
}
macro_rules! validate_is_response
{
($message_header: ident) =>
{
if unlikely!($message_header.is_query())
{
return Err(ResponseWasAQuery)
}
}
}
macro_rules! validate_opcode
{
($message_header: ident) =>
{
match $message_header.raw_opcode()
{
MessageOpcode::Query => (),
MessageOpcode::InverseQuery => return Err(InvalidResponseOpcode(MessageOpcode::InverseQuery)),
MessageOpcode::Status => return Err(InvalidResponseOpcode(MessageOpcode::Status)),
opcode @ 3 => return Err(UnassignedResponseOpcode(opcode)),
MessageOpcode::Notify => return Err(InvalidResponseOpcode(MessageOpcode::Notify)),
MessageOpcode::Update => return Err(InvalidResponseOpcode(MessageOpcode::Update)),
MessageOpcode::DnsStatefulOperations => return Err(InvalidResponseOpcode(MessageOpcode::DnsStatefulOperations)),
opcode @ 7 ... 15 => return Err(UnassignedResponseOpcode(opcode)),
_ => unreachable!(),
}
}
}
macro_rules! validate_reserved_header_bits_are_zero
{
($message_header: ident) =>
{
if unlikely!(!$message_header.z())
{
return Err(ResponseUsedReservedHeaderBits)
}
}
}
macro_rules! validate_response_is_not_truncated
{
($message_header: ident) =>
{
if unlikely!(!$message_header.is_truncated())
{
return Err(ResponseIsTruncated)
}
}
}
macro_rules! validate_recursion_desired_bit_was_copied_from_query_and_is_one
{
($message_header: ident) =>
{
if unlikely!(!$message_header.recursion_desired())
{
return Err(ResponseFailedToCopyRecursionDesiredBit)
}
}
}
macro_rules! validate_checking_bit_was_copied_from_query_and_is_zero
{
($message_header: ident) =>
{
if unlikely!(!$message_header.recursion_desired())
{
return Err(ResponseFailedToCopyCheckingDisabledBit)
}
}
}
macro_rules! validate_authentic_answers_do_not_have_authoritative_data_bit_set
{
($message_header: ident) =>
{
{
let is_authoritative_answer = $message_header.authoritative_answer();
let is_authenticated_data = $message_header.authentic_data();
if unlikely!(is_authoritative_answer)
{
if unlikely!(is_authenticated_data)
{
return Err(ResponseWasAuthoritativeButHasTheAuthoritativeDataBitSet)
}
}
(is_authoritative_answer, is_authenticated_data)
}
}
}
macro_rules! validate_message_response_code
{
($message_header: ident, $is_authoritative_answer: ident, $is_authenticated_data: ident) =>
{
{
use self::Outcome::*;
match message_header.raw_response_code()
{
MessageResponseCode::NoError => Normal,
MessageResponseCode::FormatError => return Err(MessageResponseCodeWasFormatError),
MessageResponseCode::ServerFailure => if unlikely!(!$is_authenticated_data)
{
return Ok(DnsSecDataFailedAuthentication)
}
else
{
return Err(MessageResponseCodeWasServerFailure)
},
MessageResponseCode::NonExistentDomain => if unlikely!($is_authoritative_answer)
{
AuthoritativeServerReportsNoDomainButThisIsNotValidated
}
else
{
return Err(MessageResponseCodeWasNonExistentDomainForANonAuthoritativeServer)
},
MessageResponseCode::NotImplemented => return Err(MessageResponseCodeWasNotImplemented),
MessageResponseCode::Refused => return Err(MessageResponseCodeWasRefused),
MessageResponseCode::NameExistsWhenItShouldNot => return Err(MessageResponseCodeShouldNotBeDynamicDnsAssociated(MessageResponseCode::NameExistsWhenItShouldNot)),
MessageResponseCode::ResourceRecordSetExistsWhenItShouldNot => return Err(MessageResponseCodeShouldNotBeDynamicDnsAssociated(MessageResponseCode::ResourceRecordSetExistsWhenItShouldNot)),
MessageResponseCode::ResourceRecordSetThatShouldExistDoesNot => return Err(MessageResponseCodeShouldNotBeDynamicDnsAssociated(MessageResponseCode::ResourceRecordSetThatShouldExistDoesNot)),
MessageResponseCode::ServerNotAuthoritativeForZoneOrNotAuthorized => return Err(MessageResponseCodeShouldNotBeDynamicDnsAssociated(MessageResponseCode::ServerNotAuthoritativeForZoneOrNotAuthorized)),
MessageResponseCode::NameNotContainedInZone => return Err(MessageResponseCodeShouldNotBeDynamicDnsAssociated(MessageResponseCode::NameNotContainedInZone)),
MessageResponseCode::DnsStatefulOperationsTypeNotImplemented => return Err(MessageResponseCodeShouldNotBeDnsStatefulOperationsTypeNotImplemented),
response_code @ 12 ... 15 => return Err(MessageResponseCodeUnassigned(response_code)),
_ => unreachable!(),
}
}
}
}
impl<'message> OutstandingRequests<'message>
{
pub fn parse_slice_after_trimming_tcp_message_size_bytes<'message>(&mut self, raw_message: &'message mut [u8]) -> Result<Outcome, DnsProtocolError>
{
let message = raw_message.cast_mut::<Message>(0);
let message_header = message.message_header();
let identifier = message_header.identifier();
let (request_query_identification, request_query) = match self.requests_by_identifier.remove(&identifier)
{
// TODO: This MAY be possible for timed-out queries we later throw away, but I suspect a better technique if a query times out is to just discard the entire set of outstanding requests and re-init the connection.
// RFC 2308 Section 7.1: "In either case a resolver MAY cache a server failure response.
// If it does so it MUST NOT cache it for longer than five (5) minutes, and it MUST be cached against the specific query tuple <query name, type, class, server IP address>".
// RFC 2308 Section 7.2: "A server may be deemed to be dead or unreachable if it has not responded to an outstanding query within 120 seconds.
// ...
// A server MAY cache a dead server indication.
// If it does so it MUST NOT be deemed dead for longer than five (5) minutes".
None => return Err(ResponseWasForAnUnknownRequest(identifier)),
Some((request_query_identification, request_query)) => (request_query_identification, request_query),
};
validate_is_response!(message_header);
validate_number_of_entries_in_the_question_section_is_one!(message_header);
validate_opcode!(message_header);
validate_reserved_header_bits_are_zero!(message_header);
validate_response_is_not_truncated!(message_header);
validate_recursion_desired_bit_was_copied_from_query_and_is_one!(message_header);
validate_checking_bit_was_copied_from_query_and_is_zero!(message_header);
let (is_authoritative_answer, is_authenticated_data) = validate_authentic_answers_do_not_have_authoritative_data_bit_set!(message_header);
let outcome = validate_message_response_code!(message_header, is_authoritative_answer, is_authenticated_data);
let start_of_message_pointer = raw_message.start_pointer();
let end_of_message_pointer = raw_message.end_pointer();
let mut parsed_labels = ParsedLabels::new(start_of_message_pointer);
let (next_resource_record_pointer, data_type) = message.message_body_as_query_section_entry().parse_response(&mut parsed_labels, end_of_message_pointer, request_query_identification)?;
Self::response_record_section_parsing(end_of_message_pointer, next_resource_record_pointer, message_header, &mut parsed_labels, data_type)?;
let response_header = ResponseHeader
{
is_authenticated_data,
is_authoritative_answer,
is_recursion_available: message_header.recursion_available(),
};
Ok(outcome)
}
/*
Request, outbound
- EDNS(0) with DO bit set
No EDNS options.
- Zero CD bit
- One AD bit
- One RD bit
- Zero RA bit
- Zero TC bit
- error code always zero.
- QCLASS always IN
- Always one query and no additional records.
- May need a client certificate.
*/
fn response_record_section_parsing(end_of_message_pointer: usize, next_resource_record_pointer: usize, message_header: &MessageHeader, parsed_labels: &mut ParsedLabels, data_type: DataType) -> Result<AnswerOutcome, DnsProtocolError>
{
let mut response_parsing_state = ResponseParsingState::default();
let (next_resource_record_pointer, canonical_name_chain) = Self::parse_answer_section(end_of_message_pointer, next_resource_record_pointer, message_header, parsed_labels, &mut response_parsing_state, data_type)?;
let (next_resource_record_pointer, answer_outcome) = Self::parse_authority_section(end_of_message_pointer, next_resource_record_pointer, message_header, parsed_labels, &mut response_parsing_state, canonical_name_chain)?;
Self::parse_additional_section(end_of_message_pointer, next_resource_record_pointer, message_header, parsed_labels, response_parsing_state)?;
Ok(answer_outcome)
}
#[inline(always)]
fn parse_answer_section<'message>(end_of_message_pointer: usize, next_resource_record_pointer: usize, message_header: &MessageHeader, parsed_labels: &mut ParsedLabels, response_parsing_state: &mut ResponseParsingState, data_type: DataType) -> Result<(usize, CanonicalNameChain<'message>), DnsProtocolError>
{
let number_of_resource_records = message_header.number_of_resource_records_in_the_authority_records_section();
// TODO: Fix this.
let resource_record_visitor = XXXX;
let canonical_name_chain = XXXX;
let next_resource_record_pointer = Self::loop_over_resource_records(end_of_message_pointer, next_resource_record_pointer, number_of_resource_records, |response_record_section_parsing, resource_record, end_of_message_pointer| resource_record.parse_answer_section_resource_record_in_response(data_type, end_of_message_pointer, parsed_labels, &mut resource_record_visitor, response_parsing_state))?;
Ok(next_resource_record_pointer, canonical_name_chain)
}
#[inline(always)]
fn parse_authority_section<'message>(end_of_message_pointer: usize, next_resource_record_pointer: usize, message_header: &MessageHeader, parsed_labels: &mut ParsedLabels, response_parsing_state: &mut ResponseParsingState, canonical_name_chain: CanonicalNameChain<'message>) -> Result<(usize, AnswerOutcome), DnsProtocolError>
{
let number_of_resource_records = message_header.number_of_resource_records_in_the_authority_records_section();
let mut authority_resource_record_visitor = AuthorityResourceRecordVisitor::new(canonical_name_chain);
let next_resource_record_pointer = Self::loop_over_resource_records(end_of_message_pointer, next_resource_record_pointer, number_of_resource_records, |response_record_section_parsing, resource_record, end_of_message_pointer| resource_record.parse_authority_section_resource_record_in_response(end_of_message_pointer, parsed_labels, &mut authority_resource_record_visitor, response_parsing_state))?;
let answer_outcome = authority_resource_record_visitor.answer_outcome(is_authoritative_answer, has_nxdomain_error_code, answer_section_has_at_least_one_record_of_requested_data_type);
Ok((next_resource_record_pointer, answer_outcome))
}
#[inline(always)]
fn parse_additional_section(end_of_message_pointer: usize, next_resource_record_pointer: usize, message_header: &MessageHeader, parsed_labels: &mut ParsedLabels, mut response_parsing_state: ResponseParsingState) -> Result<(), DnsProtocolError>
{
let number_of_resource_records = message_header.number_of_resource_records_in_the_additional_records_section();
let mut discarding_resource_record_visitor = DiscardingResourceRecordVisitor::default();
let next_resource_record_pointer = Self::loop_over_resource_records(end_of_message_pointer, next_resource_record_pointer, number_of_resource_records, |response_record_section_parsing, resource_record, end_of_message_pointer| resource_record.parse_additional_section_resource_record_in_response(end_of_message_pointer, parsed_labels, &mut discarding_resource_record_visitor, &mut response_parsing_state))?;
if unlikely!(response_parsing_state.have_yet_to_see_an_edns_opt_resource_record)
{
return Err(ResponseDidNotContainAnExtendedDnsOptMetaResourceRecord)
}
match response_parsing_state.dnssec_ok
{
None => Err(ResponseDoesNotSupportExtendedDns),
Some(false) => Err(ResponseIgnoredDnsSec),
Some(true) => Ok(()),
}
}
#[inline(always)]
fn loop_over_resource_records(end_of_message_pointer: usize, next_resource_record_pointer: usize, number_of_resource_records: u16, parse_method: impl for<'a> Fn(&mut ResourceRecord, usize) -> Result<usize, DnsProtocolError>) -> Result<usize, DnsProtocolError>
{
let mut next_resource_record_pointer = next_resource_record_pointer;
for _ in 0 .. number_of_resource_records
{
if unlikely!(next_resource_record_pointer == end_of_message_pointer)
{
return Err(ResourceRecordsOverflowAnswerSection)
}
let resource_record = next_resource_record_pointer.unsafe_cast_mut::<ResourceRecord>();
next_resource_record_pointer = parse_method(resource_record, end_of_message_pointer)?;
}
Ok(next_resource_record_pointer)
}
}
|
/**
* Copyright © 2019
* Sami Shalayel <sami.shalayel@tutamail.com>,
* Carl Schwan <carl@carlschwan.eu>,
* Daniel Freiermuth <d_freiermu14@cs.uni-kl.de>
*
* This work is free. You can redistribute it and/or modify it under the
* terms of the Do What The Fuck You Want To Public License, Version 2,
* as published by Sam Hocevar. See the LICENSE file for more details.
*
* This program is free software. It comes without any warranty, to
* the extent permitted by applicable law. You can redistribute it
* and/or modify it under the terms of the Do What The Fuck You Want
* To Public License, Version 2, as published by Sam Hocevar. See the LICENSE
* file for more details. **/
use crate::ray::Ray;
use crate::shader::Shader;
use crate::world::World;
use nalgebra::{Unit, Vector2, Vector3};
pub struct SpecularShader {
pub alpha: f64,
}
impl SpecularShader {
pub fn new(alpha: f64) -> Box<Shader> {
Box::new(SpecularShader { alpha })
}
}
impl Shader for SpecularShader {
fn get_appearance_for(
&self,
intersection_pos: Vector3<f64>,
ray_dir: Vector3<f64>,
_surface_normal: Vector3<f64>,
world: &World,
_surface_pos: Vector2<f64>,
_recursion_depth: f64,
) -> Vector3<f64> {
let mut i_specular = Vector3::new(0.0, 0.0, 0.0);
for light in &world.lights {
let shade_ray = Ray {
dir: Unit::new_normalize(intersection_pos - light.pos),
start: light.pos,
};
if let Some(shade_intersection) = world.next_intersection(&shade_ray) {
if (shade_intersection.pos - intersection_pos).norm() < 0.1 {
let l_m = -shade_ray.dir.normalize();
let n_hat = shade_intersection.normal_at_surface.normalize();
let r_hat = (2.0 * l_m.dot(&n_hat) * n_hat - l_m).normalize();
let v_hat = -ray_dir.normalize();
//TODO: put shininess(Reflektionsfaktor) in intersection
let rv = r_hat.dot(&v_hat);
i_specular += (if rv > 0.0 { rv } else { 0.0 }).powf(self.alpha) * light.color;
}
}
}
i_specular
}
}
|
//! 定时器
//!
#![no_main]
#![no_std]
#![feature(alloc_error_handler)]
extern crate alloc;
use alloc_cortex_m::CortexMHeap;
use bluepill::clocks::ClockExt;
use bluepill::hal::delay::Delay;
use bluepill::hal::gpio::gpioc::PC13;
use bluepill::hal::gpio::{Output, PushPull};
use bluepill::hal::pac::{TIM1, TIM2};
use bluepill::hal::prelude::*;
use bluepill::hal::serial::Config;
use bluepill::hal::timer::CountDownTimer;
use bluepill::hal::timer::Timer;
use bluepill::led::*;
use bluepill::stdio::{self, *};
use bluepill::{sprint, sprintln};
use core::cell::RefCell;
use core::fmt::Write;
use cortex_m::{asm::wfi, interrupt::Mutex};
use cortex_m_rt::entry;
use embedded_hal::timer::Cancel;
use panic_semihosting as _;
use stm32f1xx_hal::pac::interrupt;
use stm32f1xx_hal::pac::Interrupt;
use stm32f1xx_hal::timer::Event;
/// 堆内存分配器
#[global_allocator]
static ALLOCATOR: CortexMHeap = CortexMHeap::empty();
/// 堆内存 16K
const HEAP_SIZE: usize = 16384;
#[entry]
fn main() -> ! {
unsafe {
ALLOCATOR.init(cortex_m_rt::heap_start() as usize, HEAP_SIZE);
}
let p = bluepill::Peripherals::take().unwrap(); //核心设备、外围设备
let mut flash = p.device.FLASH.constrain(); //Flash
let mut rcc = p.device.RCC.constrain(); //RCC
let mut afio = p.device.AFIO.constrain(&mut rcc.apb2);
let clocks = rcc.cfgr.clocks_72mhz(&mut flash.acr); //配置全速时钟
//let mut delay = Delay::new(cp.SYST, clocks); //配置延时器
let mut gpioa = p.device.GPIOA.split(&mut rcc.apb2);
let mut gpioc = p.device.GPIOC.split(&mut rcc.apb2);
////////////////初始化设备///////////////////
let mut delay1 = Delay::new(p.core.SYST, clocks); //配置延时器
let (mut tx, _) = bluepill::hal::serial::Serial::usart1(
p.device.USART1,
(
gpioa.pa9.into_alternate_push_pull(&mut gpioa.crh),
gpioa.pa10,
),
&mut afio.mapr,
Config::default().baudrate(115200.bps()),
clocks,
&mut rcc.apb2,
)
.split();
stdio::use_tx1(tx);
let mut led = Led(gpioc.pc13).ppo(&mut gpioc.crh); //配置LED
let mut timer = Timer::tim1(p.device.TIM1, &clocks, &mut rcc.apb2).start_count_down(1.hz());
let mut delay = Timer::tim2(p.device.TIM2, &clocks, &mut rcc.apb1).start_count_down(1.hz());
timer.listen(Event::Update);
delay.listen(Event::Update);
cortex_m::interrupt::free(|cs| {
*BLINK.borrow(cs).borrow_mut() = Some(led);
*DELAY.borrow(cs).borrow_mut() = Some(delay);
*TIMER.borrow(cs).borrow_mut() = Some(timer);
});
bluepill::enable_interrupt(Interrupt::TIM1_UP);
bluepill::enable_interrupt(Interrupt::TIM2);
sprintln!("hello timer led");
loop {
sprintln!("listen timer");
cortex_m::interrupt::free(|cs| {
// Move TIMER pin here, leaving a None in its place
TIMER
.borrow(cs)
.borrow_mut()
.as_mut()
.unwrap()
.listen(Event::Update);
});
delay1.delay_ms(5000u32);
sprintln!("unlisten timer");
cortex_m::interrupt::free(|cs| {
// Move TIMER pin here, leaving a None in its place
TIMER
.borrow(cs)
.borrow_mut()
.as_mut()
.unwrap()
.unlisten(Event::Update);
});
delay1.delay_ms(5000u32);
//cortex_m::asm::wfi();
}
}
#[interrupt]
unsafe fn TIM2() {
static mut LED: Option<Led<PC13<Output<PushPull>>>> = None;
static mut TIM: Option<CountDownTimer<TIM2>> = None;
let led = LED.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
// Move LED pin here, leaving a None in its place
BLINK.borrow(cs).replace(None).unwrap()
})
});
let tim = TIM.get_or_insert_with(|| {
cortex_m::interrupt::free(|cs| {
// Move DELAY pin here, leaving a None in its place
DELAY.borrow(cs).replace(None).unwrap()
})
});
led.toggle();
tim.wait().ok();
}
static mut COUNT: u32 = 0;
static BLINK: Mutex<RefCell<Option<Led<PC13<Output<PushPull>>>>>> = Mutex::new(RefCell::new(None));
static DELAY: Mutex<RefCell<Option<CountDownTimer<TIM2>>>> = Mutex::new(RefCell::new(None));
static TIMER: Mutex<RefCell<Option<CountDownTimer<TIM1>>>> = Mutex::new(RefCell::new(None));
#[interrupt]
unsafe fn TIM1_UP() {
// static mut TIM: Option<CountDownTimer<TIM1>> = None;
// let tim = TIM.get_or_insert_with(|| {
// cortex_m::interrupt::free(|cs| {
// // Move TIMER pin here, leaving a None in its place
// TIMER.borrow(cs).borrow_mut().unwrap().wait().ok(); //replace(None).unwrap()
// })
// });
cortex_m::interrupt::free(|_| unsafe {
COUNT += 10;
});
unsafe { COUNT = 0 };
//tim.wait().ok();
cortex_m::interrupt::free(|cs| {
// Move TIMER pin here, leaving a None in its place
TIMER.borrow(cs).borrow_mut().as_mut().unwrap().wait().ok(); //replace(None).unwrap()
});
}
// 内存不足执行此处代码(调试用)
#[alloc_error_handler]
fn alloc_error(_layout: core::alloc::Layout) -> ! {
cortex_m::asm::bkpt();
loop {}
}
|
/// A container for a FFI function pointer.
///
/// `Function<F>` ensures the stored pointer is valid at all times.
/// By default it is set to a special function which panics upon being called.
/// This, unfortunately, results in varargs functions not being storable.
#[derive(Debug, Clone, Copy)]
pub struct Function<F> {
/// The stored function pointer.
ptr: F,
}
gen_function_impls!(a: A, b: B, c: C, d: D, e: E, f: F);
|
use crate::vec3::Vec3;
use crate::ray::{Ray, hit::{Hittable,HitRecord}, material::Material};
use std::sync::Arc;
pub struct Sphere {
pub center : Vec3,
pub radius : f64,
pub material : Arc<dyn Material>
}
impl Sphere {
pub fn new(center: Vec3, radius: f64, material: Arc<dyn Material>) -> Sphere {
Sphere{ radius, center, material}
}
}
impl Hittable for Sphere {
fn hit(&self, ray: &Ray, interval: (f64,f64)) -> Option<HitRecord> {
let oc = ray.origin - self.center;
let a = ray.direction.length_squared();
let half_b = Vec3::dot(oc,ray.direction);
let c = oc.length_squared() - self.radius*self.radius;
let discriminant = half_b*half_b-a*c;
let same_side = |x: Vec3, y: Vec3| Vec3::dot(x,y)<0.0;
if discriminant > 0.0{
let root = discriminant.sqrt();
let time = (-half_b - root)/a;
if interval.0 < time && time < interval.1 {
let point = ray.point_at_parameter(time);
let outward_normal = (point - self.center)/ self.radius;
let front_face = same_side(ray.direction, outward_normal);
let normal = if front_face { outward_normal } else { - outward_normal };
return Some(HitRecord::new( point, normal, time, front_face, self.material.clone()) )
};
let time = (-half_b + root)/a;
if interval.0 < time && time < interval.1 {
let point = ray.point_at_parameter(time);
let outward_normal = (point - self.center)/ self.radius;
let front_face = same_side(ray.direction, outward_normal);
let normal = if front_face { outward_normal } else { - outward_normal };
return Some(HitRecord::new( point, normal, time, front_face, self.material.clone() ))
};
};
None
}
}
|
use std::io::{Read, Result as IOResult, Seek, SeekFrom, Cursor, Error as IOError, ErrorKind};
use crate::read_util::PrimitiveRead;
use crate::lump_data::game_lumps::StaticPropDict;
pub struct GameLumps {
game_lumps: Box<[GameLump]>
}
impl GameLumps {
pub fn read(read: &mut dyn Read) -> IOResult<Self> {
let lump_count = read.read_i32()?;
let mut game_lumps = Vec::<GameLump>::new();
for _ in 0..lump_count {
let game_lump = GameLump::read(read)?;
game_lumps.push(game_lump);
}
Ok(Self {
game_lumps: game_lumps.into_boxed_slice()
})
}
pub(crate) fn read_static_prop_dict<R: Read + Seek>(&self, read: &mut R) -> IOResult<StaticPropDict> {
for lump in self.game_lumps.as_ref() {
if lump.id == StaticPropDict::id() {
read.seek(SeekFrom::Start(lump.file_offset as u64))?;
let mut data = Vec::with_capacity(lump.file_length as usize);
unsafe {
data.set_len(lump.file_length as usize);
}
read.read_exact(&mut data)?;
let mut cursor = Cursor::new(data);
let static_props = StaticPropDict::read(&mut cursor, lump.version)?;
return Ok(static_props);
}
}
Err(IOError::new(ErrorKind::Other, "Game lump not found"))
}
}
pub struct GameLump {
pub id: u32,
pub flags: u16,
pub version: u16,
pub file_offset: i32,
pub file_length: i32
}
impl GameLump {
pub fn read(read: &mut dyn Read) -> IOResult<Self> {
let id = read.read_u32()?;
let flags = read.read_u16()?;
let version = read.read_u16()?;
let file_offset = read.read_i32()?;
let file_length = read.read_i32()?;
println!("entity id: {:?}", id);
Ok(Self {
id,
flags,
version,
file_offset,
file_length
})
}
}
|
#[doc = "Reader of register MPCBB2_VCTR21"]
pub type R = crate::R<u32, super::MPCBB2_VCTR21>;
#[doc = "Writer for register MPCBB2_VCTR21"]
pub type W = crate::W<u32, super::MPCBB2_VCTR21>;
#[doc = "Register MPCBB2_VCTR21 `reset()`'s with value 0"]
impl crate::ResetValue for super::MPCBB2_VCTR21 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `B672`"]
pub type B672_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B672`"]
pub struct B672_W<'a> {
w: &'a mut W,
}
impl<'a> B672_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `B673`"]
pub type B673_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B673`"]
pub struct B673_W<'a> {
w: &'a mut W,
}
impl<'a> B673_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `B674`"]
pub type B674_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B674`"]
pub struct B674_W<'a> {
w: &'a mut W,
}
impl<'a> B674_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `B675`"]
pub type B675_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B675`"]
pub struct B675_W<'a> {
w: &'a mut W,
}
impl<'a> B675_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `B676`"]
pub type B676_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B676`"]
pub struct B676_W<'a> {
w: &'a mut W,
}
impl<'a> B676_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `B677`"]
pub type B677_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B677`"]
pub struct B677_W<'a> {
w: &'a mut W,
}
impl<'a> B677_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `B678`"]
pub type B678_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B678`"]
pub struct B678_W<'a> {
w: &'a mut W,
}
impl<'a> B678_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `B679`"]
pub type B679_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B679`"]
pub struct B679_W<'a> {
w: &'a mut W,
}
impl<'a> B679_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `B680`"]
pub type B680_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B680`"]
pub struct B680_W<'a> {
w: &'a mut W,
}
impl<'a> B680_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `B681`"]
pub type B681_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B681`"]
pub struct B681_W<'a> {
w: &'a mut W,
}
impl<'a> B681_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `B682`"]
pub type B682_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B682`"]
pub struct B682_W<'a> {
w: &'a mut W,
}
impl<'a> B682_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Reader of field `B683`"]
pub type B683_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B683`"]
pub struct B683_W<'a> {
w: &'a mut W,
}
impl<'a> B683_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Reader of field `B684`"]
pub type B684_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B684`"]
pub struct B684_W<'a> {
w: &'a mut W,
}
impl<'a> B684_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Reader of field `B685`"]
pub type B685_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B685`"]
pub struct B685_W<'a> {
w: &'a mut W,
}
impl<'a> B685_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Reader of field `B686`"]
pub type B686_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B686`"]
pub struct B686_W<'a> {
w: &'a mut W,
}
impl<'a> B686_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Reader of field `B687`"]
pub type B687_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B687`"]
pub struct B687_W<'a> {
w: &'a mut W,
}
impl<'a> B687_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
#[doc = "Reader of field `B688`"]
pub type B688_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B688`"]
pub struct B688_W<'a> {
w: &'a mut W,
}
impl<'a> B688_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `B689`"]
pub type B689_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B689`"]
pub struct B689_W<'a> {
w: &'a mut W,
}
impl<'a> B689_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Reader of field `B690`"]
pub type B690_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B690`"]
pub struct B690_W<'a> {
w: &'a mut W,
}
impl<'a> B690_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Reader of field `B691`"]
pub type B691_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B691`"]
pub struct B691_W<'a> {
w: &'a mut W,
}
impl<'a> B691_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "Reader of field `B692`"]
pub type B692_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B692`"]
pub struct B692_W<'a> {
w: &'a mut W,
}
impl<'a> B692_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "Reader of field `B693`"]
pub type B693_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B693`"]
pub struct B693_W<'a> {
w: &'a mut W,
}
impl<'a> B693_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21);
self.w
}
}
#[doc = "Reader of field `B694`"]
pub type B694_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B694`"]
pub struct B694_W<'a> {
w: &'a mut W,
}
impl<'a> B694_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22);
self.w
}
}
#[doc = "Reader of field `B695`"]
pub type B695_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B695`"]
pub struct B695_W<'a> {
w: &'a mut W,
}
impl<'a> B695_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 23)) | (((value as u32) & 0x01) << 23);
self.w
}
}
#[doc = "Reader of field `B696`"]
pub type B696_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B696`"]
pub struct B696_W<'a> {
w: &'a mut W,
}
impl<'a> B696_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Reader of field `B697`"]
pub type B697_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B697`"]
pub struct B697_W<'a> {
w: &'a mut W,
}
impl<'a> B697_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25);
self.w
}
}
#[doc = "Reader of field `B698`"]
pub type B698_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B698`"]
pub struct B698_W<'a> {
w: &'a mut W,
}
impl<'a> B698_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26);
self.w
}
}
#[doc = "Reader of field `B699`"]
pub type B699_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B699`"]
pub struct B699_W<'a> {
w: &'a mut W,
}
impl<'a> B699_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27);
self.w
}
}
#[doc = "Reader of field `B700`"]
pub type B700_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B700`"]
pub struct B700_W<'a> {
w: &'a mut W,
}
impl<'a> B700_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28);
self.w
}
}
#[doc = "Reader of field `B701`"]
pub type B701_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B701`"]
pub struct B701_W<'a> {
w: &'a mut W,
}
impl<'a> B701_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29);
self.w
}
}
#[doc = "Reader of field `B702`"]
pub type B702_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B702`"]
pub struct B702_W<'a> {
w: &'a mut W,
}
impl<'a> B702_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30);
self.w
}
}
#[doc = "Reader of field `B703`"]
pub type B703_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `B703`"]
pub struct B703_W<'a> {
w: &'a mut W,
}
impl<'a> B703_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bit 0 - B672"]
#[inline(always)]
pub fn b672(&self) -> B672_R {
B672_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - B673"]
#[inline(always)]
pub fn b673(&self) -> B673_R {
B673_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - B674"]
#[inline(always)]
pub fn b674(&self) -> B674_R {
B674_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - B675"]
#[inline(always)]
pub fn b675(&self) -> B675_R {
B675_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - B676"]
#[inline(always)]
pub fn b676(&self) -> B676_R {
B676_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - B677"]
#[inline(always)]
pub fn b677(&self) -> B677_R {
B677_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - B678"]
#[inline(always)]
pub fn b678(&self) -> B678_R {
B678_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - B679"]
#[inline(always)]
pub fn b679(&self) -> B679_R {
B679_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - B680"]
#[inline(always)]
pub fn b680(&self) -> B680_R {
B680_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - B681"]
#[inline(always)]
pub fn b681(&self) -> B681_R {
B681_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - B682"]
#[inline(always)]
pub fn b682(&self) -> B682_R {
B682_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 11 - B683"]
#[inline(always)]
pub fn b683(&self) -> B683_R {
B683_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - B684"]
#[inline(always)]
pub fn b684(&self) -> B684_R {
B684_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 13 - B685"]
#[inline(always)]
pub fn b685(&self) -> B685_R {
B685_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - B686"]
#[inline(always)]
pub fn b686(&self) -> B686_R {
B686_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 15 - B687"]
#[inline(always)]
pub fn b687(&self) -> B687_R {
B687_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bit 16 - B688"]
#[inline(always)]
pub fn b688(&self) -> B688_R {
B688_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - B689"]
#[inline(always)]
pub fn b689(&self) -> B689_R {
B689_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - B690"]
#[inline(always)]
pub fn b690(&self) -> B690_R {
B690_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - B691"]
#[inline(always)]
pub fn b691(&self) -> B691_R {
B691_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 20 - B692"]
#[inline(always)]
pub fn b692(&self) -> B692_R {
B692_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 21 - B693"]
#[inline(always)]
pub fn b693(&self) -> B693_R {
B693_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 22 - B694"]
#[inline(always)]
pub fn b694(&self) -> B694_R {
B694_R::new(((self.bits >> 22) & 0x01) != 0)
}
#[doc = "Bit 23 - B695"]
#[inline(always)]
pub fn b695(&self) -> B695_R {
B695_R::new(((self.bits >> 23) & 0x01) != 0)
}
#[doc = "Bit 24 - B696"]
#[inline(always)]
pub fn b696(&self) -> B696_R {
B696_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 25 - B697"]
#[inline(always)]
pub fn b697(&self) -> B697_R {
B697_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 26 - B698"]
#[inline(always)]
pub fn b698(&self) -> B698_R {
B698_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 27 - B699"]
#[inline(always)]
pub fn b699(&self) -> B699_R {
B699_R::new(((self.bits >> 27) & 0x01) != 0)
}
#[doc = "Bit 28 - B700"]
#[inline(always)]
pub fn b700(&self) -> B700_R {
B700_R::new(((self.bits >> 28) & 0x01) != 0)
}
#[doc = "Bit 29 - B701"]
#[inline(always)]
pub fn b701(&self) -> B701_R {
B701_R::new(((self.bits >> 29) & 0x01) != 0)
}
#[doc = "Bit 30 - B702"]
#[inline(always)]
pub fn b702(&self) -> B702_R {
B702_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 31 - B703"]
#[inline(always)]
pub fn b703(&self) -> B703_R {
B703_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - B672"]
#[inline(always)]
pub fn b672(&mut self) -> B672_W {
B672_W { w: self }
}
#[doc = "Bit 1 - B673"]
#[inline(always)]
pub fn b673(&mut self) -> B673_W {
B673_W { w: self }
}
#[doc = "Bit 2 - B674"]
#[inline(always)]
pub fn b674(&mut self) -> B674_W {
B674_W { w: self }
}
#[doc = "Bit 3 - B675"]
#[inline(always)]
pub fn b675(&mut self) -> B675_W {
B675_W { w: self }
}
#[doc = "Bit 4 - B676"]
#[inline(always)]
pub fn b676(&mut self) -> B676_W {
B676_W { w: self }
}
#[doc = "Bit 5 - B677"]
#[inline(always)]
pub fn b677(&mut self) -> B677_W {
B677_W { w: self }
}
#[doc = "Bit 6 - B678"]
#[inline(always)]
pub fn b678(&mut self) -> B678_W {
B678_W { w: self }
}
#[doc = "Bit 7 - B679"]
#[inline(always)]
pub fn b679(&mut self) -> B679_W {
B679_W { w: self }
}
#[doc = "Bit 8 - B680"]
#[inline(always)]
pub fn b680(&mut self) -> B680_W {
B680_W { w: self }
}
#[doc = "Bit 9 - B681"]
#[inline(always)]
pub fn b681(&mut self) -> B681_W {
B681_W { w: self }
}
#[doc = "Bit 10 - B682"]
#[inline(always)]
pub fn b682(&mut self) -> B682_W {
B682_W { w: self }
}
#[doc = "Bit 11 - B683"]
#[inline(always)]
pub fn b683(&mut self) -> B683_W {
B683_W { w: self }
}
#[doc = "Bit 12 - B684"]
#[inline(always)]
pub fn b684(&mut self) -> B684_W {
B684_W { w: self }
}
#[doc = "Bit 13 - B685"]
#[inline(always)]
pub fn b685(&mut self) -> B685_W {
B685_W { w: self }
}
#[doc = "Bit 14 - B686"]
#[inline(always)]
pub fn b686(&mut self) -> B686_W {
B686_W { w: self }
}
#[doc = "Bit 15 - B687"]
#[inline(always)]
pub fn b687(&mut self) -> B687_W {
B687_W { w: self }
}
#[doc = "Bit 16 - B688"]
#[inline(always)]
pub fn b688(&mut self) -> B688_W {
B688_W { w: self }
}
#[doc = "Bit 17 - B689"]
#[inline(always)]
pub fn b689(&mut self) -> B689_W {
B689_W { w: self }
}
#[doc = "Bit 18 - B690"]
#[inline(always)]
pub fn b690(&mut self) -> B690_W {
B690_W { w: self }
}
#[doc = "Bit 19 - B691"]
#[inline(always)]
pub fn b691(&mut self) -> B691_W {
B691_W { w: self }
}
#[doc = "Bit 20 - B692"]
#[inline(always)]
pub fn b692(&mut self) -> B692_W {
B692_W { w: self }
}
#[doc = "Bit 21 - B693"]
#[inline(always)]
pub fn b693(&mut self) -> B693_W {
B693_W { w: self }
}
#[doc = "Bit 22 - B694"]
#[inline(always)]
pub fn b694(&mut self) -> B694_W {
B694_W { w: self }
}
#[doc = "Bit 23 - B695"]
#[inline(always)]
pub fn b695(&mut self) -> B695_W {
B695_W { w: self }
}
#[doc = "Bit 24 - B696"]
#[inline(always)]
pub fn b696(&mut self) -> B696_W {
B696_W { w: self }
}
#[doc = "Bit 25 - B697"]
#[inline(always)]
pub fn b697(&mut self) -> B697_W {
B697_W { w: self }
}
#[doc = "Bit 26 - B698"]
#[inline(always)]
pub fn b698(&mut self) -> B698_W {
B698_W { w: self }
}
#[doc = "Bit 27 - B699"]
#[inline(always)]
pub fn b699(&mut self) -> B699_W {
B699_W { w: self }
}
#[doc = "Bit 28 - B700"]
#[inline(always)]
pub fn b700(&mut self) -> B700_W {
B700_W { w: self }
}
#[doc = "Bit 29 - B701"]
#[inline(always)]
pub fn b701(&mut self) -> B701_W {
B701_W { w: self }
}
#[doc = "Bit 30 - B702"]
#[inline(always)]
pub fn b702(&mut self) -> B702_W {
B702_W { w: self }
}
#[doc = "Bit 31 - B703"]
#[inline(always)]
pub fn b703(&mut self) -> B703_W {
B703_W { w: self }
}
}
|
use super::super::types::DBConnection;
pub fn set_query_timeout(conn: &DBConnection) -> postgres::Result<()> {
conn.execute("SET SESSION statement_timeout TO 2000", &[])?;
Ok(())
}
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[link(name = "windows")]
extern "system" {
pub fn DxcCreateInstance(rclsid: *const ::windows_sys::core::GUID, riid: *const ::windows_sys::core::GUID, ppv: *mut *mut ::core::ffi::c_void) -> ::windows_sys::core::HRESULT;
#[cfg(feature = "Win32_System_Com")]
pub fn DxcCreateInstance2(pmalloc: super::super::super::System::Com::IMalloc, rclsid: *const ::windows_sys::core::GUID, riid: *const ::windows_sys::core::GUID, ppv: *mut *mut ::core::ffi::c_void) -> ::windows_sys::core::HRESULT;
}
pub const CLSID_DxcAssembler: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 3609779048,
data2: 63747,
data3: 20352,
data4: [148, 205, 220, 207, 118, 236, 113, 81],
};
pub const CLSID_DxcCompiler: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 1944202643,
data2: 59086,
data3: 18419,
data4: [181, 191, 240, 102, 79, 57, 193, 176],
};
pub const CLSID_DxcCompilerArgs: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 1045868162,
data2: 8781,
data3: 18191,
data4: [161, 161, 254, 48, 22, 238, 159, 157],
};
pub const CLSID_DxcContainerBuilder: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 2484290196,
data2: 16671,
data3: 17780,
data4: [180, 208, 135, 65, 226, 82, 64, 210],
};
pub const CLSID_DxcContainerReflection: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 3119858825,
data2: 21944,
data3: 16396,
data4: [186, 58, 22, 117, 228, 114, 139, 145],
};
pub const CLSID_DxcDiaDataSource: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 3441388403,
data2: 10928,
data3: 18509,
data4: [142, 220, 235, 231, 164, 60, 160, 159],
};
pub const CLSID_DxcLibrary: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 1648744111,
data2: 26336,
data3: 18685,
data4: [128, 180, 77, 39, 23, 150, 116, 140],
};
pub const CLSID_DxcLinker: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 4016734343, data2: 45290, data3: 19798, data4: [158, 69, 208, 126, 26, 139, 120, 6] };
pub const CLSID_DxcOptimizer: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 2922174367,
data2: 52258,
data3: 17727,
data4: [155, 107, 177, 36, 231, 165, 32, 76],
};
pub const CLSID_DxcPdbUtils: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 1415716347,
data2: 62158,
data3: 17790,
data4: [174, 140, 236, 53, 95, 174, 236, 124],
};
pub const CLSID_DxcValidator: ::windows_sys::core::GUID = ::windows_sys::core::GUID {
data1: 2359550485,
data2: 63272,
data3: 19699,
data4: [140, 221, 136, 175, 145, 117, 135, 161],
};
pub type DXC_CP = u32;
pub const DXC_CP_ACP: DXC_CP = 0u32;
pub const DXC_CP_UTF16: DXC_CP = 1200u32;
pub const DXC_CP_UTF8: DXC_CP = 65001u32;
pub const DXC_HASHFLAG_INCLUDES_SOURCE: u32 = 1u32;
pub type DXC_OUT_KIND = i32;
pub const DXC_OUT_NONE: DXC_OUT_KIND = 0i32;
pub const DXC_OUT_OBJECT: DXC_OUT_KIND = 1i32;
pub const DXC_OUT_ERRORS: DXC_OUT_KIND = 2i32;
pub const DXC_OUT_PDB: DXC_OUT_KIND = 3i32;
pub const DXC_OUT_SHADER_HASH: DXC_OUT_KIND = 4i32;
pub const DXC_OUT_DISASSEMBLY: DXC_OUT_KIND = 5i32;
pub const DXC_OUT_HLSL: DXC_OUT_KIND = 6i32;
pub const DXC_OUT_TEXT: DXC_OUT_KIND = 7i32;
pub const DXC_OUT_REFLECTION: DXC_OUT_KIND = 8i32;
pub const DXC_OUT_ROOT_SIGNATURE: DXC_OUT_KIND = 9i32;
pub const DXC_OUT_EXTRA_OUTPUTS: DXC_OUT_KIND = 10i32;
pub const DXC_OUT_FORCE_DWORD: DXC_OUT_KIND = -1i32;
#[repr(C)]
#[cfg(feature = "Win32_Foundation")]
pub struct DxcArgPair {
pub pName: super::super::super::Foundation::PWSTR,
pub pValue: super::super::super::Foundation::PWSTR,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for DxcArgPair {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for DxcArgPair {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
pub struct DxcBuffer {
pub Ptr: *mut ::core::ffi::c_void,
pub Size: usize,
pub Encoding: u32,
}
impl ::core::marker::Copy for DxcBuffer {}
impl ::core::clone::Clone for DxcBuffer {
fn clone(&self) -> Self {
*self
}
}
#[cfg(feature = "Win32_System_Com")]
pub type DxcCreateInstance2Proc = ::core::option::Option<unsafe extern "system" fn(pmalloc: super::super::super::System::Com::IMalloc, rclsid: *const ::windows_sys::core::GUID, riid: *const ::windows_sys::core::GUID, ppv: *mut *mut ::core::ffi::c_void) -> ::windows_sys::core::HRESULT>;
pub type DxcCreateInstanceProc = ::core::option::Option<unsafe extern "system" fn(rclsid: *const ::windows_sys::core::GUID, riid: *const ::windows_sys::core::GUID, ppv: *mut *mut ::core::ffi::c_void) -> ::windows_sys::core::HRESULT>;
#[repr(C)]
#[cfg(feature = "Win32_Foundation")]
pub struct DxcDefine {
pub Name: super::super::super::Foundation::PWSTR,
pub Value: super::super::super::Foundation::PWSTR,
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::marker::Copy for DxcDefine {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::clone::Clone for DxcDefine {
fn clone(&self) -> Self {
*self
}
}
#[repr(C)]
pub struct DxcShaderHash {
pub Flags: u32,
pub HashDigest: [u8; 16],
}
impl ::core::marker::Copy for DxcShaderHash {}
impl ::core::clone::Clone for DxcShaderHash {
fn clone(&self) -> Self {
*self
}
}
pub const DxcValidatorFlags_Default: u32 = 0u32;
pub const DxcValidatorFlags_InPlaceEdit: u32 = 1u32;
pub const DxcValidatorFlags_ModuleOnly: u32 = 4u32;
pub const DxcValidatorFlags_RootSignatureOnly: u32 = 2u32;
pub const DxcValidatorFlags_ValidMask: u32 = 7u32;
pub const DxcVersionInfoFlags_Debug: u32 = 1u32;
pub const DxcVersionInfoFlags_Internal: u32 = 2u32;
pub const DxcVersionInfoFlags_None: u32 = 0u32;
pub type IDxcAssembler = *mut ::core::ffi::c_void;
pub type IDxcBlob = *mut ::core::ffi::c_void;
pub type IDxcBlobEncoding = *mut ::core::ffi::c_void;
pub type IDxcBlobUtf16 = *mut ::core::ffi::c_void;
pub type IDxcBlobUtf8 = *mut ::core::ffi::c_void;
pub type IDxcCompiler = *mut ::core::ffi::c_void;
pub type IDxcCompiler2 = *mut ::core::ffi::c_void;
pub type IDxcCompiler3 = *mut ::core::ffi::c_void;
pub type IDxcCompilerArgs = *mut ::core::ffi::c_void;
pub type IDxcContainerBuilder = *mut ::core::ffi::c_void;
pub type IDxcContainerReflection = *mut ::core::ffi::c_void;
pub type IDxcExtraOutputs = *mut ::core::ffi::c_void;
pub type IDxcIncludeHandler = *mut ::core::ffi::c_void;
pub type IDxcLibrary = *mut ::core::ffi::c_void;
pub type IDxcLinker = *mut ::core::ffi::c_void;
pub type IDxcOperationResult = *mut ::core::ffi::c_void;
pub type IDxcOptimizer = *mut ::core::ffi::c_void;
pub type IDxcOptimizerPass = *mut ::core::ffi::c_void;
pub type IDxcPdbUtils = *mut ::core::ffi::c_void;
pub type IDxcResult = *mut ::core::ffi::c_void;
pub type IDxcUtils = *mut ::core::ffi::c_void;
pub type IDxcValidator = *mut ::core::ffi::c_void;
pub type IDxcValidator2 = *mut ::core::ffi::c_void;
pub type IDxcVersionInfo = *mut ::core::ffi::c_void;
pub type IDxcVersionInfo2 = *mut ::core::ffi::c_void;
pub type IDxcVersionInfo3 = *mut ::core::ffi::c_void;
|
use criterion::{
criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion,
};
use std::time::Duration;
use ray_tracing::RunConfig;
fn get_config() -> RunConfig<'static> {
let mut config = RunConfig::default();
config.img_config.aspect_ratio = 3.0 / 2.0;
config.quiet = true;
config.use_bvh = false;
config
}
fn set_up_group(group: &mut BenchmarkGroup<WallTime>) {
group
.warm_up_time(Duration::from_secs(5))
.measurement_time(Duration::from_secs(30))
.sample_size(10);
}
pub fn small(c: &mut Criterion) {
let mut group = c.benchmark_group("Small");
set_up_group(&mut group);
let mut config = get_config();
config.img_config.width = 100;
group.bench_function("Small", |b| b.iter(|| ray_tracing::run(&config)));
group.finish();
}
pub fn medium(c: &mut Criterion) {
let mut group = c.benchmark_group("Medium");
set_up_group(&mut group);
let mut config = get_config();
config.img_config.width = 200;
group.bench_function("Meduim", |b| b.iter(|| ray_tracing::run(&config)));
group.finish();
}
pub fn large(c: &mut Criterion) {
let mut group = c.benchmark_group("Large");
set_up_group(&mut group);
let mut config = get_config();
config.img_config.width = 500;
group.bench_function("Large", |b| b.iter(|| ray_tracing::run(&config)));
group.finish();
}
criterion_group!(benches, small, medium, large,);
criterion_main!(benches);
|
use winit::{event::{Event, WindowEvent}, event_loop::{EventLoop, ControlFlow}, window::{WindowBuilder}};
use pixels::{SurfaceTexture, Pixels};
use std::{sync::{Arc, atomic::{AtomicBool, Ordering::Relaxed}}, thread, time::{Instant}};
use antmachine::{ants::World};
//use rand::distributions::{Distribution, Uniform};
use parking_lot::Mutex;
static WIDTH: u32 = 256;
static HEIGHT: u32 = 256;
static ANTS: usize = 256;
fn main() {
let event_loop = EventLoop::new();
let win = WindowBuilder::new()
.with_title("Ants!")
.with_maximized(true)
.build(&event_loop).unwrap();
let size = win.inner_size();
let width = size.width;
let height = size.height;
let surface_texture = SurfaceTexture::new(width, height, &win);
let pixels = Arc::new(Mutex::new(Pixels::new(WIDTH, HEIGHT, surface_texture).unwrap()));
let event_pix = pixels.clone();
//let start = Instant::now();
let world = Arc::new(Mutex::new(World::new(ANTS)));
let world_clone = world.clone();
let is_resized = Arc::new(AtomicBool::new(false));
let is_resized_event = is_resized.clone();
thread::spawn(move || {
let mut evolution = 0;
//let ants = Uniform::from(0..ANTS);
//let mut rng = rand::thread_rng();
loop {
let evo_time = Instant::now();
{
let mut world = world.lock();
world.evolve_threaded(16);
evolution += 1;
println!("{} ants\t{} pheromones\t{} evolution\t{} micros to compute",
world.ants.len(), world.pheromones.len(),
evolution, evo_time.elapsed().as_micros())
}
}
});
thread::spawn(move || {
loop {
if is_resized.load(std::sync::atomic::Ordering::Relaxed) {
continue;
}
let mut pixels = pixels.lock();
let mut frame: Vec<Vec<&mut [u8]>> = pixels.get_frame()
.chunks_exact_mut(WIDTH as usize * 4)
.map(|x| x.chunks_exact_mut(4).collect())
.collect();
for row in frame.iter_mut() {
for px in row {
px.copy_from_slice(&[0x00, 0x00, 0x00, 0xFF]);
}
}
{
let world = world_clone.lock();
for ph in &world.pheromones {
frame[(ph.pos.y * HEIGHT as f64) as usize]
[(WIDTH as f64 - (ph.pos.x * WIDTH as f64)) as usize]
.copy_from_slice(&[(ph.pow * 128.) as u8, (ph.pow * 128.) as u8, (ph.pow * 128.) as u8, 0xFF])
}
for ant in &world.ants {
frame[(ant.pos.y * HEIGHT as f64) as usize]
[(WIDTH as f64 - (ant.pos.x * WIDTH as f64)) as usize]
.copy_from_slice(&[0xFF, 0xFF, 0xFF, 0xFF])
}
}
pixels.render().unwrap();
}
});
event_loop.run(move |e, _, cf| {
*cf = ControlFlow::Wait;
match e {
Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => *cf = ControlFlow::Exit,
Event::WindowEvent { event: WindowEvent::Resized(size), ..} => {
is_resized_event.store(true, Relaxed);
let width = size.width;
let height = size.height;
let mut pixels = event_pix.lock();
pixels.resize_surface(width, height);
pixels.resize_buffer(WIDTH, HEIGHT)
}
_ => {
if is_resized_event.load(Relaxed) {
is_resized_event.store(false, Relaxed)
};
},
}
})
}
|
#[derive(Debug)]
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(i32, i32, i32),
}
// ex. of defining a method on enum
impl Message {
fn call(&self) {
match self {
Message::Quit => println!("i am quit"),
Message::Move {x,y}=> println!("i am move {},{}", x,y),
Message::Write(s) => println!("i am Write: {}", s),
Message::ChangeColor(r,g,b) => println!("I am ChangeColor r:{} g:{} b:{}",r,g,b)
}
}
}
fn main() {
let m = Message::Write(String::from("THE WRITE MESSAGE"));
let m1 = Message::Move {x:99, y:88};
m.call();
m1.call();
let mut count = 0;
if let Message::Move {x,y} = m1 {
println!("move message to {} {}", x,y);
} else {
count += 1;
}
println!("count={}", count);
} |
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This is a regression test against an ICE that used to occur
// on malformed attributes for a custom MultiModifier.
// aux-build:macro_crate_test.rs
// ignore-stage1
#![feature(plugin)]
#![plugin(macro_crate_test)]
#[noop_attribute"x"] //~ ERROR expected one of
fn night() { }
#[noop_attribute("hi"), rank = 2] //~ ERROR unexpected token
fn knight() { }
#[noop_attribute("/user", data= = "<user")] //~ ERROR literal or identifier
fn nite() { }
fn main() {}
|
use lazy_static::lazy_static;
use std::collections::HashSet;
pub fn run() {
lazy_static! {
static ref INPUT: String =
std::fs::read_to_string("data/input-day-8.txt")
.unwrap()
.strip_suffix("\n")
.unwrap()
.to_string();
}
run_part_1(INPUT.to_string());
run_part_2(INPUT.to_string());
}
fn run_part_1(input: String) {
let instructions: Vec<&str> = input.lines().collect();
let mut history: HashSet<usize> = HashSet::new();
let mut stack = Vec::<&str>::new();
let mut acc: f32 = 0f32;
let mut next_inst: usize = 0;
stack.push(instructions[0]);
while !stack.is_empty() {
let instruction: Vec<&str> = stack.pop().unwrap().split(" ").collect();
let result = run_instruction(next_inst, instruction[0], instruction[1]);
next_inst = result.0;
if history.insert(next_inst) {
acc += result.1;
if instructions.len() > next_inst {
stack.push(instructions[next_inst]);
}
}
}
println!("{}", acc)
}
fn run_part_2(input: String) {
let instructions: Vec<&str> = input.lines().collect();
let mut history: HashSet<usize> = HashSet::new();
let mut stack = Vec::<&str>::new();
let mut acc: f32 = 0f32;
let mut next_inst: usize = 0;
stack.push(instructions[0]);
// keep track of changes, and whether the
// current run already has a changed 'jmp'
// or 'nop'
let mut changes: HashSet<usize> = HashSet::new();
let mut unchanged = true;
while !stack.is_empty() {
let mut instruction: Vec<&str> = stack.pop().unwrap().split(" ").collect();
if instruction[0] == "nop" || instruction[0] == "jmp" {
if unchanged && changes.insert(next_inst) {
instruction[0] = if instruction[0] == "nop" {
"jmp"
} else {
"nop"
};
unchanged = false;
}
}
let result = run_instruction(next_inst, instruction[0], instruction[1]);
next_inst = result.0;
if history.insert(next_inst) {
acc += result.1;
if instructions.len() > next_inst {
stack.push(instructions[next_inst]);
}
} else if unchanged == false {
// if we did change a value on this run
// then we need to start over, since
// we are in an infinite loop
next_inst = 0;
acc = 0f32;
stack.clear();
stack.push(instructions[0]);
history.clear();
unchanged = true;
}
}
println!("{}", acc)
}
fn run_instruction(line: usize, instruction: &str, value: &str) -> (usize, f32) {
match instruction {
"acc" => {
(line + 1, value.parse().unwrap())
},
"jmp" => {
let (sign, int) = value.split_at(1);
let next_line: usize;
if sign == "-" {
next_line = line - usize::from_str_radix(int, 10).unwrap()
} else {
next_line = line + usize::from_str_radix(int, 10).unwrap()
}
(next_line, 0f32)
},
"nop" => {
(line + 1, 0f32)
},
_ => (0, 0f32)
}
}
|
use super::ema::rma_func;
use super::falling::declare_s_var;
use super::VarResult;
use crate::ast::stat_expr_types::VarIndex;
use crate::ast::syntax_type::{FunctionType, FunctionTypes, SimpleSyntaxType, SyntaxType};
use crate::helper::{
float_abs, float_max, move_element, pine_ref_to_bool, pine_ref_to_f64, pine_ref_to_f64_series,
pine_ref_to_i64, require_param, series_index,
};
use crate::runtime::context::{downcast_ctx, Ctx};
use crate::runtime::InputSrc;
use crate::types::{
downcast_pf_ref, int2float, Arithmetic, Callable, CallableFactory, Float, Int, PineRef,
RefData, RuntimeErr, Series, SeriesCall,
};
pub const VAR_NAME: &'static str = "rising";
fn check_greater<'a>(
source: RefData<Series<Float>>,
length: i64,
) -> Result<PineRef<'a>, RuntimeErr> {
let cur_val = source.index_value(0).unwrap();
for i in 1..=length as usize {
if source.index_value(i).unwrap() > cur_val {
return Ok(PineRef::new_rc(Series::from(false)));
}
}
return Ok(PineRef::new_rc(Series::from(true)));
}
pub fn declare_var<'a>() -> VarResult<'a> {
declare_s_var(VAR_NAME, check_greater)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ast::syntax_type::SyntaxType;
use crate::runtime::VarOperate;
use crate::runtime::{AnySeries, NoneCallback};
use crate::types::Series;
use crate::{LibInfo, PineParser, PineRunner};
#[test]
fn accdist_test() {
let lib_info = LibInfo::new(
vec![declare_var()],
vec![("close", SyntaxType::float_series())],
);
let src = "m = rising(close, 2)";
let blk = PineParser::new(src, &lib_info).parse_blk().unwrap();
let mut runner = PineRunner::new(&lib_info, &blk, &NoneCallback());
runner
.run(
&vec![(
"close",
AnySeries::from_float_vec(vec![Some(10f64), Some(20f64), Some(5f64)]),
)],
None,
)
.unwrap();
assert_eq!(
runner.get_context().get_var(VarIndex::new(0, 0)),
&Some(PineRef::new(Series::from_vec(vec![true, true, false])))
);
}
}
|
pub mod labeledbar;
pub mod table; |
extern crate chrono;
extern crate rand;
mod mersenne_twister;
use self::mersenne_twister::MersenneTwister64;
use chrono::{Duration, Utc};
use rand::Rng;
fn main() {
let mut rng = rand::thread_rng();
let t1 = rng.gen_range(40, 1000);
let t2 = rng.gen_range(40, 1000);
let seed = (Utc::now() + Duration::seconds(t1)).timestamp() as u64;
let mut mt = MersenneTwister64::new(seed);
println!("Seeding MT with {}", seed);
let out = mt.next_u64();
println!("Got {}", out);
let start_crack = (Utc::now() + Duration::seconds(t1 + t2)).timestamp() as u64;
println!("Starting search at {}", start_crack);
let answer = crack_mt_timestamp(out, start_crack);
if let Some(x) = answer {
let mut clone_mt = MersenneTwister64::new(x);
clone_mt.next_u64();
for _ in 0..1000 {
assert_eq!(mt.next_u64(), clone_mt.next_u64());
}
} else {
println!("FAIL");
}
}
fn crack_mt_timestamp(output: u64, start: u64) -> Option<u64> {
let iterations = 1000 * 2;
for i in 0..iterations {
let guess = start - i;
let mut guess_mt = MersenneTwister64::new(guess);
if guess_mt.next_u64() == output {
println!("Found seed of {}", guess);
return Some(guess);
}
}
None
}
|
use futures::Stream;
use geo_types::Geometry;
use h3ron::collections::{H3CellSet, RandomState};
use h3ron::iter::change_resolution;
use h3ron::{H3Cell, ToH3Cells};
use h3ron_polars::frame::H3DataFrame;
use postage::prelude::{Sink, Stream as _};
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::spawn;
use tokio::task::spawn_blocking;
use tracing::{debug, debug_span, info, Instrument};
use ukis_clickhouse_arrow_grpc::Client;
use crate::clickhouse::compacted_tables::{
CompactedTablesStore, QueryOptions, TableSet, TableSetQuery,
};
use crate::Error;
/// find the resolution generate coarser h3 cells to access the tableset without needing to fetch more
/// than `max_h3indexes_fetch_count` indexes per batch.
///
/// That resolution must be a base resolution
fn select_traversal_resolution(
tableset: &TableSet,
target_h3_resolution: u8,
max_h3indexes_fetch_count: usize,
) -> u8 {
let mut resolutions: Vec<_> = tableset
.base_resolutions()
.iter()
.filter(|r| **r < target_h3_resolution)
.copied()
.collect();
resolutions.sort_unstable();
let mut traversal_resolution = target_h3_resolution;
for r in resolutions {
let r_diff = (target_h3_resolution - r) as u32;
if 7_u64.pow(r_diff) <= (max_h3indexes_fetch_count as u64) {
traversal_resolution = r;
break;
}
}
info!(
"traversal: using H3 res {} as traversal_resolution",
traversal_resolution
);
traversal_resolution
}
pub struct TraversalOptions {
/// the query to run
pub query: TableSetQuery,
/// the h3 resolutions which shall be fetched
pub h3_resolution: u8,
/// The maximum number of cells to fetch in one DB query.
///
/// Please note that this setting controls only the number of cells
/// requested from the DB. Should - for example - each cell have data
/// for multiple time steps in the database, more rows will be returned.
///
/// This setting is crucial to control the size of the messages transferred from
/// Clickhouse. So, decrease when Clickhouse runs into GRPC message size limits
/// (protobuf supports max. 2GB).
pub max_h3indexes_fetch_count: usize,
/// Number of parallel DB connections to use in the background.
/// Depending with the number of connections used the amount of memory used increases as well as
/// the load put onto the DB-Server. The benefit is getting data faster as it is pre-loaded in the
/// background.
pub num_connections: usize,
/// optional prefilter query.
///
/// This query will be applied to the tables in the reduced `traversal_h3_resolution` and only cells
/// found by this query will be loaded from the tables in the requested full resolution
pub filter_query: Option<TableSetQuery>,
/// uncompact the cells loaded from the db. This should be true in most cases.
pub do_uncompact: bool,
/// include the cells with in the buffer around the traversal-cell into the returned data.
///
/// Unit for the buffer is number of cells in reslution `h3_resolution`. No buffer is used per default.
pub include_buffer: u32,
}
impl Default for TraversalOptions {
fn default() -> Self {
Self {
query: TableSetQuery::AutoGenerated,
h3_resolution: 0,
max_h3indexes_fetch_count: 500,
num_connections: 3,
filter_query: None,
do_uncompact: true,
include_buffer: 0,
}
}
}
impl TraversalOptions {
pub fn with_h3_resolution(h3_resolution: u8) -> Self {
Self {
h3_resolution,
..Default::default()
}
}
pub fn with_query_and_h3_resolution(query: TableSetQuery, h3_resolution: u8) -> Self {
Self {
query,
h3_resolution,
..Default::default()
}
}
}
pub enum TraversalArea {
Geometry(Geometry<f64>),
H3Cells(Vec<H3Cell>),
}
impl TraversalArea {
///
///
/// The cells are returned sorted for a deterministic traversal order
pub fn to_cells(&self, traversal_resolution: u8) -> Result<Vec<H3Cell>, Error> {
let mut cells = match self {
TraversalArea::Geometry(geometry) => {
let mut cells: Vec<_> =
geometry.to_h3_cells(traversal_resolution)?.iter().collect();
// always add the outer vertices of polygons to ensure having always cells
// even when the polygon is too small to have any cells inside
match geometry {
Geometry::Polygon(poly) => {
cells.extend(poly.exterior().to_h3_cells(traversal_resolution)?.iter())
}
Geometry::MultiPolygon(mpoly) => {
for poly in mpoly.0.iter() {
cells.extend(poly.exterior().to_h3_cells(traversal_resolution)?.iter());
}
}
_ => (),
};
cells
}
TraversalArea::H3Cells(cells) => {
change_resolution(cells.as_slice(), traversal_resolution)
.collect::<Result<Vec<_>, _>>()?
}
};
cells.sort_unstable();
cells.dedup();
cells.shrink_to_fit();
Ok(cells)
}
}
impl From<Geometry<f64>> for TraversalArea {
fn from(geom: Geometry<f64>) -> Self {
Self::Geometry(geom)
}
}
impl From<Vec<H3Cell>> for TraversalArea {
fn from(cells: Vec<H3Cell>) -> Self {
Self::H3Cells(cells)
}
}
pub struct Traverser {
pub num_traversal_cells: usize,
pub traversal_h3_resolution: u8,
dataframe_recv: tokio::sync::mpsc::Receiver<Result<TraversedCell, Error>>,
num_cells_already_traversed: usize,
}
impl Stream for Traverser {
type Item = Result<TraversedCell, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let self_mut = self.get_mut();
let polled = self_mut.dataframe_recv.poll_recv(cx);
if polled.is_ready() {
self_mut.num_cells_already_traversed += 1;
}
polled
}
fn size_hint(&self) -> (usize, Option<usize>) {
// yielding less elements than hinted is allowed, though not best practice.
// We may yield less here when traversal cells to not contain data
let num_cells_outstanding = self
.num_traversal_cells
.saturating_sub(self.num_cells_already_traversed);
(num_cells_outstanding, Some(num_cells_outstanding))
}
}
pub async fn traverse(
client: &mut Client,
database_name: String,
tableset_name: String,
area: &TraversalArea,
options: TraversalOptions,
) -> Result<Traverser, Error> {
let tableset = client.get_tableset(&database_name, tableset_name).await?;
let traversal_h3_resolution = select_traversal_resolution(
&tableset,
options.h3_resolution,
options.max_h3indexes_fetch_count,
);
let traversal_cells = area.to_cells(traversal_h3_resolution)?;
traverse_inner(
client,
database_name,
tableset,
traversal_cells,
options,
traversal_h3_resolution,
)
.await
}
async fn traverse_inner(
client: &mut Client,
database_name: String,
tableset: TableSet,
traversal_cells: Vec<H3Cell>,
options: TraversalOptions,
traversal_h3_resolution: u8,
) -> Result<Traverser, Error> {
let do_uncompact = options.do_uncompact;
let num_traversal_cells = traversal_cells.len();
let h3_resolution = options.h3_resolution;
let include_buffer = options.include_buffer;
let mut context = WorkerContext {
client: client.clone(),
database_name,
tableset,
};
let (dataframe_send, dataframe_recv) = tokio::sync::mpsc::channel(options.num_connections);
let _background_fetch = spawn(async move {
let (mut trav_cells_send, _trav_cells_recv) =
postage::dispatch::channel(2 * options.num_connections);
// spawn the workers performing the db-work
for _ in 0..(options.num_connections) {
let mut worker_context = context.clone();
let mut worker_trav_cells_recv = trav_cells_send.subscribe();
let worker_dataframe_send = dataframe_send.clone();
let worker_query = options.query.clone();
spawn(async move {
while let Some(cell) = worker_trav_cells_recv.recv().await {
let message = match load_traversed_cell(
&mut worker_context,
worker_query.clone(),
cell,
h3_resolution,
do_uncompact,
include_buffer,
)
.await
{
Ok(Some(traversed_cell)) => Ok(traversed_cell),
Ok(None) => {
// no data found, continue to the next cell
debug!("traversal cell yielded no data - skipping");
continue;
}
Err(e) => Err(e),
};
if worker_dataframe_send.send(message).await.is_err() {
debug!("worker channel has been closed upstream. shutting down worker");
break;
} else {
debug!("traversal cell loaded and send");
}
}
});
}
// distribute the cells to the workers
let _ = spawn(async move {
if let Some(filter_query) = &options.filter_query {
for cell_chunk in traversal_cells.chunks(50) {
dispatch_traversal_cells(
&mut trav_cells_send,
prefilter_traversal_cells(
&mut context,
filter_query.clone(),
cell_chunk,
traversal_h3_resolution,
)
.await,
)
.await;
}
} else {
dispatch_traversal_cells(&mut trav_cells_send, Ok(traversal_cells)).await;
}
})
.await;
});
// end of this scope closes the local copy of the dataframe_send channel to allow the
// pipeline to collapse when ta traversal is finished
Ok(Traverser {
num_traversal_cells,
traversal_h3_resolution,
dataframe_recv,
num_cells_already_traversed: 0,
})
}
#[derive(Clone)]
struct WorkerContext {
client: Client,
database_name: String,
tableset: TableSet,
}
async fn dispatch_traversal_cells(
sender: &mut postage::dispatch::Sender<Result<H3Cell, Error>>,
traversal_cells: Result<Vec<H3Cell>, Error>,
) {
match traversal_cells {
Ok(cells) => {
for cell in cells {
if sender.send(Ok(cell)).await.is_err() {
debug!("sink rejected message");
break;
}
}
}
Err(e) => {
if sender.send(Err(e)).await.is_err() {
debug!("sink rejected message");
}
}
}
}
async fn prefilter_traversal_cells(
worker_context: &mut WorkerContext,
filter_query: TableSetQuery,
cells: &[H3Cell],
traversal_h3_resolution: u8,
) -> Result<Vec<H3Cell>, Error> {
if cells.is_empty() {
return Ok(vec![]);
}
let filter_h3df = worker_context
.client
.query_tableset_cells(
&worker_context.database_name,
worker_context.tableset.clone(),
QueryOptions::new(filter_query, cells.to_vec(), traversal_h3_resolution),
)
.await?;
// use only the indexes from the filter query to be able to fetch a smaller subset
spawn_blocking(move || {
filter_h3df
.h3indexchunked()
.map_err(Error::from)
.and_then(|ic| {
ic.to_collection::<Vec<_>>()
.map_err(Error::from)
.map(|mut cells| {
// remove duplicates
cells.sort_unstable();
cells.dedup();
cells
})
})
})
.await?
}
pub struct TraversedCell {
/// the traversal cell whose child cells where loaded
pub cell: H3Cell,
/// dataframe containing the data of the child cells
pub contained_data: H3DataFrame<H3Cell>,
}
fn buffer_cell(
cell: H3Cell,
buffer_h3_resolution: u8,
buffer_width: u32,
) -> Result<Vec<H3Cell>, Error> {
// TODO: brute force implementation - needs to be improved when used often
let children = cell.get_children(buffer_h3_resolution)?;
let mut cellset =
H3CellSet::with_capacity_and_hasher(children.capacity(), RandomState::default());
for child_cell in children.iter() {
cellset.insert(child_cell);
cellset.extend(child_cell.grid_disk(buffer_width)?.iter());
}
Ok(cellset.drain().collect())
}
async fn load_traversed_cell(
worker_context: &mut WorkerContext,
query: TableSetQuery,
cell: Result<H3Cell, Error>,
h3_resolution: u8,
do_uncompact: bool,
include_buffer: u32,
) -> Result<Option<TraversedCell>, Error> {
match cell {
Ok(cell) => {
let cells_to_load = if include_buffer == 0 {
vec![cell]
} else {
spawn_blocking(move || buffer_cell(cell, h3_resolution, include_buffer)).await??
};
let mut query_options = QueryOptions::new(query, cells_to_load, h3_resolution);
query_options.do_uncompact = do_uncompact;
let contained_data = worker_context
.client
.query_tableset_cells(
&worker_context.database_name,
worker_context.tableset.clone(),
query_options,
)
.instrument(debug_span!(
"Loading traversal cell",
cell = cell.to_string().as_str()
))
.await?;
if contained_data.dataframe().shape().0 == 0 {
// no data found, continue to the next cell
debug!("Discarding received empty dataframe");
return Ok(None);
}
Ok(Some(TraversedCell {
cell,
contained_data,
}))
}
Err(e) => Err(e),
}
}
|
#[doc = "Reader of register CHMAP0"]
pub type R = crate::R<u32, super::CHMAP0>;
#[doc = "Writer for register CHMAP0"]
pub type W = crate::W<u32, super::CHMAP0>;
#[doc = "Register CHMAP0 `reset()`'s with value 0"]
impl crate::ResetValue for super::CHMAP0 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `CH0SEL`"]
pub type CH0SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CH0SEL`"]
pub struct CH0SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CH0SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
#[doc = "Reader of field `CH1SEL`"]
pub type CH1SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CH1SEL`"]
pub struct CH1SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CH1SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 4)) | (((value as u32) & 0x0f) << 4);
self.w
}
}
#[doc = "Reader of field `CH2SEL`"]
pub type CH2SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CH2SEL`"]
pub struct CH2SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CH2SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 8)) | (((value as u32) & 0x0f) << 8);
self.w
}
}
#[doc = "Reader of field `CH3SEL`"]
pub type CH3SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CH3SEL`"]
pub struct CH3SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CH3SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 12)) | (((value as u32) & 0x0f) << 12);
self.w
}
}
#[doc = "Reader of field `CH4SEL`"]
pub type CH4SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CH4SEL`"]
pub struct CH4SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CH4SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 16)) | (((value as u32) & 0x0f) << 16);
self.w
}
}
#[doc = "Reader of field `CH5SEL`"]
pub type CH5SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CH5SEL`"]
pub struct CH5SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CH5SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 20)) | (((value as u32) & 0x0f) << 20);
self.w
}
}
#[doc = "Reader of field `CH6SEL`"]
pub type CH6SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CH6SEL`"]
pub struct CH6SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CH6SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 24)) | (((value as u32) & 0x0f) << 24);
self.w
}
}
#[doc = "Reader of field `CH7SEL`"]
pub type CH7SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CH7SEL`"]
pub struct CH7SEL_W<'a> {
w: &'a mut W,
}
impl<'a> CH7SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 28)) | (((value as u32) & 0x0f) << 28);
self.w
}
}
impl R {
#[doc = "Bits 0:3 - uDMA Channel 0 Source Select"]
#[inline(always)]
pub fn ch0sel(&self) -> CH0SEL_R {
CH0SEL_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bits 4:7 - uDMA Channel 1 Source Select"]
#[inline(always)]
pub fn ch1sel(&self) -> CH1SEL_R {
CH1SEL_R::new(((self.bits >> 4) & 0x0f) as u8)
}
#[doc = "Bits 8:11 - uDMA Channel 2 Source Select"]
#[inline(always)]
pub fn ch2sel(&self) -> CH2SEL_R {
CH2SEL_R::new(((self.bits >> 8) & 0x0f) as u8)
}
#[doc = "Bits 12:15 - uDMA Channel 3 Source Select"]
#[inline(always)]
pub fn ch3sel(&self) -> CH3SEL_R {
CH3SEL_R::new(((self.bits >> 12) & 0x0f) as u8)
}
#[doc = "Bits 16:19 - uDMA Channel 4 Source Select"]
#[inline(always)]
pub fn ch4sel(&self) -> CH4SEL_R {
CH4SEL_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bits 20:23 - uDMA Channel 5 Source Select"]
#[inline(always)]
pub fn ch5sel(&self) -> CH5SEL_R {
CH5SEL_R::new(((self.bits >> 20) & 0x0f) as u8)
}
#[doc = "Bits 24:27 - uDMA Channel 6 Source Select"]
#[inline(always)]
pub fn ch6sel(&self) -> CH6SEL_R {
CH6SEL_R::new(((self.bits >> 24) & 0x0f) as u8)
}
#[doc = "Bits 28:31 - uDMA Channel 7 Source Select"]
#[inline(always)]
pub fn ch7sel(&self) -> CH7SEL_R {
CH7SEL_R::new(((self.bits >> 28) & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 0:3 - uDMA Channel 0 Source Select"]
#[inline(always)]
pub fn ch0sel(&mut self) -> CH0SEL_W {
CH0SEL_W { w: self }
}
#[doc = "Bits 4:7 - uDMA Channel 1 Source Select"]
#[inline(always)]
pub fn ch1sel(&mut self) -> CH1SEL_W {
CH1SEL_W { w: self }
}
#[doc = "Bits 8:11 - uDMA Channel 2 Source Select"]
#[inline(always)]
pub fn ch2sel(&mut self) -> CH2SEL_W {
CH2SEL_W { w: self }
}
#[doc = "Bits 12:15 - uDMA Channel 3 Source Select"]
#[inline(always)]
pub fn ch3sel(&mut self) -> CH3SEL_W {
CH3SEL_W { w: self }
}
#[doc = "Bits 16:19 - uDMA Channel 4 Source Select"]
#[inline(always)]
pub fn ch4sel(&mut self) -> CH4SEL_W {
CH4SEL_W { w: self }
}
#[doc = "Bits 20:23 - uDMA Channel 5 Source Select"]
#[inline(always)]
pub fn ch5sel(&mut self) -> CH5SEL_W {
CH5SEL_W { w: self }
}
#[doc = "Bits 24:27 - uDMA Channel 6 Source Select"]
#[inline(always)]
pub fn ch6sel(&mut self) -> CH6SEL_W {
CH6SEL_W { w: self }
}
#[doc = "Bits 28:31 - uDMA Channel 7 Source Select"]
#[inline(always)]
pub fn ch7sel(&mut self) -> CH7SEL_W {
CH7SEL_W { w: self }
}
}
|
extern crate jlib;
use jlib::api::account_relations::api::request;
use jlib::api::account_relations::data::{RequestAccountRelationsResponse, RelationsSideKick};
use jlib::api::config::Config;
static TEST_SERVER: &'static str = "ws://101.200.176.249:5040";
fn main() {
let config = Config::new(TEST_SERVER, true);
let account = "j9syYwWgtmjchcbqhVB18pmFqXUYahZvvg".to_string();
let rtype = Some("trust".to_string());
request(config, account, rtype, |x| match x {
Ok(response) => {
let res: RequestAccountRelationsResponse = response;
println!("account relations: {:?}", &res);
},
Err(e) => {
let err: RelationsSideKick= e;
println!("err: {:?}", err);
}
});
}
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/// This module tests pkg_resolver's RewriteManager when
/// dynamic rewrite rules have been disabled.
use {
super::*,
crate::mock_filesystem::spawn_directory_handler,
fidl_fuchsia_pkg_rewrite::EngineProxy as RewriteEngineProxy,
fuchsia_async as fasync,
fuchsia_url_rewrite::{Rule, RuleConfig},
fuchsia_zircon::Status,
std::{convert::TryInto, fs::File},
};
impl Mounts {
fn add_dynamic_rewrite_rules(&self, rule_config: &RuleConfig) {
if let DirOrProxy::Dir(ref d) = self.pkg_resolver_data {
let f = File::create(d.path().join("rewrites.json")).unwrap();
serde_json::to_writer(BufWriter::new(f), rule_config).unwrap();
} else {
panic!("not supported");
}
}
}
fn make_rule_config(rule: &Rule) -> RuleConfig {
RuleConfig::Version1(vec![rule.clone()])
}
fn make_rule() -> Rule {
Rule::new("example.com", "example.com", "/", "/").unwrap()
}
async fn get_rules(rewrite_engine: &RewriteEngineProxy) -> Vec<Rule> {
let (rule_iterator, rule_iterator_server) =
fidl::endpoints::create_proxy().expect("create rule iterator proxy");
rewrite_engine.list(rule_iterator_server).expect("list rules");
let mut ret = vec![];
loop {
let rules = rule_iterator.next().await.expect("advance rule iterator");
if rules.is_empty() {
return ret;
}
ret.extend(rules.into_iter().map(|r| r.try_into().unwrap()))
}
}
#[fasync::run_singlethreaded(test)]
async fn load_dynamic_rules() {
let mounts = Mounts::new();
let rule = make_rule();
mounts.add_dynamic_rewrite_rules(&make_rule_config(&rule));
mounts.add_config(&Config { disable_dynamic_configuration: false });
let env = TestEnv::new_with_mounts(mounts);
assert_eq!(get_rules(&env.proxies.rewrite_engine).await, vec![rule]);
env.stop().await;
}
#[fasync::run_singlethreaded(test)]
async fn no_load_dynamic_rules_if_disabled() {
let mounts = Mounts::new();
mounts.add_dynamic_rewrite_rules(&make_rule_config(&make_rule()));
mounts.add_config(&Config { disable_dynamic_configuration: true });
let env = TestEnv::new_with_mounts(mounts);
assert_eq!(get_rules(&env.proxies.rewrite_engine).await, vec![]);
env.stop().await;
}
#[fasync::run_singlethreaded(test)]
async fn commit_transaction_succeeds() {
let env = TestEnv::new();
let (edit_transaction, edit_transaction_server) = fidl::endpoints::create_proxy().unwrap();
env.proxies.rewrite_engine.start_edit_transaction(edit_transaction_server).unwrap();
let rule = make_rule();
Status::ok(edit_transaction.add(&mut rule.clone().into()).await.unwrap()).unwrap();
assert_eq!(Status::from_raw(edit_transaction.commit().await.unwrap()), Status::OK);
assert_eq!(get_rules(&env.proxies.rewrite_engine).await, vec![rule]);
env.stop().await;
}
#[fasync::run_singlethreaded(test)]
async fn commit_transaction_fails_if_disabled() {
let mounts = Mounts::new();
mounts.add_config(&Config { disable_dynamic_configuration: true });
let env = TestEnv::new_with_mounts(mounts);
let (edit_transaction, edit_transaction_server) = fidl::endpoints::create_proxy().unwrap();
env.proxies.rewrite_engine.start_edit_transaction(edit_transaction_server).unwrap();
Status::ok(edit_transaction.add(&mut make_rule().into()).await.unwrap()).unwrap();
assert_eq!(Status::from_raw(edit_transaction.commit().await.unwrap()), Status::ACCESS_DENIED);
assert_eq!(get_rules(&env.proxies.rewrite_engine).await, vec![]);
env.stop().await;
}
#[fasync::run_singlethreaded(test)]
async fn attempt_to_open_persisted_dynamic_rules() {
let (proxy, open_counts) = spawn_directory_handler();
let mounts = Mounts {
pkg_resolver_data: DirOrProxy::Proxy(proxy),
pkg_resolver_config_data: DirOrProxy::Dir(tempfile::tempdir().expect("/tmp to exist")),
};
let env = TestEnv::new_with_mounts(mounts);
// Waits for pkg_resolver to be initialized
get_rules(&env.proxies.rewrite_engine).await;
assert_eq!(open_counts.lock().get("rewrites.json"), Some(&1));
env.stop().await;
}
#[fasync::run_singlethreaded(test)]
async fn no_attempt_to_open_persisted_dynamic_rules_if_disabled() {
let (proxy, open_counts) = spawn_directory_handler();
let mounts = Mounts {
pkg_resolver_data: DirOrProxy::Proxy(proxy),
pkg_resolver_config_data: DirOrProxy::Dir(tempfile::tempdir().expect("/tmp to exist")),
};
mounts.add_config(&Config { disable_dynamic_configuration: true });
let env = TestEnv::new_with_mounts(mounts);
// Waits for pkg_resolver to be initialized
get_rules(&env.proxies.rewrite_engine).await;
assert_eq!(open_counts.lock().get("rewrites.json"), None);
env.stop().await;
}
|
//! Bindings to the [capstone library][upstream] disassembly framework.
//!
//! This crate is a wrapper around the
//! [Capstone disassembly library](http://www.capstone-engine.org/),
//! a "lightweight multi-platform, multi-architecture disassembly framework."
//!
//! The [`Capstone`](struct.Capstone.html) struct is the main interface to the library.
//!
//! ```rust
//! extern crate capstone;
//! use capstone::prelude::*;
//!
//! const CODE: &'static [u8] = b"\x55\x48\x8b\x05\xb8\x13\x00\x00";
//! fn main() {
//! match Capstone::new().x86().mode(arch::x86::ArchMode::Mode32).build() {
//! Ok(cs) => {
//! match cs.disasm_all(CODE, 0x1000) {
//! Ok(insns) => {
//! println!("Got {} instructions", insns.len());
//!
//! for i in insns.iter() {
//! println!("{}", i);
//! }
//! },
//! Err(err) => {
//! println!("Error: {}", err)
//! }
//! }
//! },
//! Err(err) => {
//! println!("Error: {}", err)
//! }
//! }
//! }
//! ```
//!
//! Produces:
//!
//! ```no_test
//! Got 2 instructions
//! 0x1000: push rbp
//! 0x1001: mov rax, qword ptr [rip + 0x13b8]
//! ```
//!
//! [upstream]: http://capstone-engine.org/
//!
extern crate capstone_sys;
pub mod arch;
mod capstone;
mod constants;
mod error;
mod instruction;
pub use capstone::*;
pub use constants::*;
pub use instruction::*;
pub use error::*;
/// Contains items that you probably want to always import
///
/// For example:
///
/// ```
/// use capstone::prelude::*;
/// ```
pub mod prelude {
pub use {Capstone, CsResult};
pub use arch::{self, BuildsCapstone, BuildsCapstoneEndian, BuildsCapstoneExtraMode,
BuildsCapstoneSyntax};
}
#[cfg(test)]
mod test {
use std::collections::HashSet;
use capstone_sys::cs_group_type;
use super::*;
use super::arch::*;
const X86_CODE: &'static [u8] = b"\x55\x48\x8b\x05\xb8\x13\x00\x00";
const ARM_CODE: &'static [u8] = b"\x55\x48\x8b\x05\xb8\x13\x00\x00";
// Aliases for group types
const JUMP: cs_group_type = cs_group_type::CS_GRP_JUMP;
const CALL: cs_group_type = cs_group_type::CS_GRP_CALL;
const RET: cs_group_type = cs_group_type::CS_GRP_RET;
const INT: cs_group_type = cs_group_type::CS_GRP_INT;
const IRET: cs_group_type = cs_group_type::CS_GRP_IRET;
#[test]
fn test_x86_simple() {
match Capstone::new().x86().mode(x86::ArchMode::Mode64).build() {
Ok(cs) => {
match cs.disasm_all(X86_CODE, 0x1000) {
Ok(insns) => {
assert_eq!(insns.len(), 2);
let is: Vec<_> = insns.iter().collect();
assert_eq!(is[0].mnemonic().unwrap(), "push");
assert_eq!(is[1].mnemonic().unwrap(), "mov");
assert_eq!(is[0].address(), 0x1000);
assert_eq!(is[1].address(), 0x1001);
assert_eq!(is[0].bytes(), b"\x55");
assert_eq!(is[1].bytes(), b"\x48\x8b\x05\xb8\x13\x00\x00");
}
Err(err) => assert!(false, "Couldn't disasm instructions: {}", err),
}
}
Err(e) => {
assert!(false, "Couldn't create a cs engine: {}", e);
}
}
}
#[test]
fn test_arm_simple() {
match Capstone::new().arm().mode(arm::ArchMode::Arm).build() {
Ok(cs) => {
match cs.disasm_all(ARM_CODE, 0x1000) {
Ok(insns) => {
assert_eq!(insns.len(), 2);
let is: Vec<_> = insns.iter().collect();
assert_eq!(is[0].mnemonic().unwrap(), "streq");
assert_eq!(is[1].mnemonic().unwrap(), "strheq");
assert_eq!(is[0].address(), 0x1000);
assert_eq!(is[1].address(), 0x1004);
}
Err(err) => assert!(false, "Couldn't disasm instructions: {}", err),
}
}
Err(e) => {
assert!(false, "Couldn't create a cs engine: {}", e);
}
}
}
#[test]
fn test_arm64_none() {
let cs = Capstone::new()
.arm64()
.mode(arm64::ArchMode::Arm)
.build()
.unwrap();
assert!(cs.disasm_all(ARM_CODE, 0x1000).unwrap().is_empty());
}
#[test]
fn test_x86_names() {
match Capstone::new().x86().mode(x86::ArchMode::Mode32).build() {
Ok(cs) => {
let reg_id = 1;
match cs.reg_name(reg_id) {
Some(reg_name) => assert_eq!(reg_name, "ah"),
None => assert!(false, "Couldn't get register name"),
}
let insn_id = 1;
match cs.insn_name(insn_id) {
Some(insn_name) => assert_eq!(insn_name, "aaa"),
None => assert!(false, "Couldn't get instruction name"),
}
assert_eq!(cs.group_name(1), Some(String::from("jump")));
let reg_id = 6000;
match cs.reg_name(reg_id) {
Some(_) => assert!(false, "invalid register worked"),
None => {}
}
let insn_id = 6000;
match cs.insn_name(insn_id) {
Some(_) => assert!(false, "invalid instruction worked"),
None => {}
}
assert_eq!(cs.group_name(6000), None);
}
Err(e) => {
assert!(false, "Couldn't create a cs engine: {}", e);
}
}
}
#[test]
fn test_detail_false_fail() {
let mut cs = Capstone::new()
.x86()
.mode(x86::ArchMode::Mode64)
.build()
.unwrap();
cs.set_detail(false).unwrap();
let insns: Vec<_> = cs.disasm_all(X86_CODE, 0x1000).unwrap().iter().collect();
assert_eq!(
cs.insn_belongs_to_group(&insns[0], 0),
Err(Error::Capstone(CapstoneError::DetailOff))
);
assert_eq!(
cs.insn_belongs_to_group(&insns[1], 0),
Err(Error::Capstone(CapstoneError::DetailOff))
);
}
#[test]
fn test_detail_true() {
let mut cs1 = Capstone::new()
.x86()
.mode(x86::ArchMode::Mode64)
.build()
.unwrap();
cs1.set_detail(true).unwrap();
let cs2 = Capstone::new()
.x86()
.mode(x86::ArchMode::Mode64)
.detail(true)
.build()
.unwrap();
for cs in [cs1, cs2].iter_mut() {
let insns: Vec<_> = cs.disasm_all(X86_CODE, 0x1000).unwrap().iter().collect();
let insn_group_ids = [
cs_group_type::CS_GRP_JUMP,
cs_group_type::CS_GRP_CALL,
cs_group_type::CS_GRP_RET,
cs_group_type::CS_GRP_INT,
cs_group_type::CS_GRP_IRET,
];
for insn_idx in 0..1 + 1 {
for insn_group_id in &insn_group_ids {
assert_eq!(
cs.insn_belongs_to_group(&insns[insn_idx], *insn_group_id as u64),
Ok(false)
);
}
}
}
}
fn test_instruction_helper(
cs: &Capstone,
insn: &Insn,
mnemonic_name: &str,
bytes: &[u8],
has_default_syntax: bool,
) {
// Check mnemonic
if has_default_syntax {
// insn_name() does not respect current syntax
// does not always match the internal mnemonic
cs.insn_name(insn.id() as u64).expect(
"Failed to get instruction name",
);
}
assert_eq!(
mnemonic_name,
insn.mnemonic().expect("Failed to get mnemonic"),
"Did not match contained insn.mnemonic"
);
// Assert instruction bytes match
assert_eq!(bytes, insn.bytes());
}
/// Assert instruction belongs or does not belong to groups, testing both insn_belongs_to_group
/// and insn_group_ids
fn test_instruction_group_helper(
cs: &Capstone,
insn: &Insn,
mnemonic_name: &str,
bytes: &[u8],
expected_groups: &[cs_group_type],
has_default_syntax: bool,
) {
test_instruction_helper(&cs, insn, mnemonic_name, bytes, has_default_syntax);
// Assert expected instruction groups is a subset of computed groups through ids
let instruction_group_ids: HashSet<u8> = cs.insn_group_ids(&insn)
.expect("failed to get instruction groups")
.iter()
.map(|&x| x)
.collect();
let expected_groups_ids: HashSet<u8> = expected_groups.iter().map(|&x| x as u8).collect();
assert!(
expected_groups_ids.is_subset(&instruction_group_ids),
"Expected groups {:?} does NOT match computed insn groups {:?} with ",
expected_groups_ids,
instruction_group_ids
);
// Assert expected instruction groups is a subset of computed groups through enum
let instruction_groups_set: HashSet<u8> = cs.insn_group_ids(&insn)
.expect("failed to get instruction groups")
.iter()
.map(|&x| x)
.collect();
let expected_groups_set: HashSet<u8> = expected_groups.iter().map(|&x| x as u8).collect();
assert!(
expected_groups_set.is_subset(&instruction_groups_set),
"Expected groups {:?} does NOT match computed insn groups {:?}",
expected_groups_set,
instruction_groups_set
);
// Create sets of expected groups and unexpected groups
let instruction_types: HashSet<cs_group_type> = [
cs_group_type::CS_GRP_JUMP,
cs_group_type::CS_GRP_CALL,
cs_group_type::CS_GRP_RET,
cs_group_type::CS_GRP_INT,
cs_group_type::CS_GRP_IRET,
].iter()
.cloned()
.collect();
let expected_groups_set: HashSet<cs_group_type> =
expected_groups.iter().map(|&x| x).collect();
let not_belong_groups = instruction_types.difference(&expected_groups_set);
// Assert instruction belongs to belong_groups
for &belong_group in expected_groups {
assert_eq!(
Ok(true),
cs.insn_belongs_to_group(&insn, belong_group as u64),
"{:?} does NOT BELONG to group {:?}, but the instruction SHOULD",
insn,
belong_group
);
}
// Assert instruction does not belong to not_belong_groups
for ¬_belong_group in not_belong_groups {
assert_eq!(
Ok(false),
cs.insn_belongs_to_group(&insn, not_belong_group as u64),
"{:?} BELONGS to group {:?}, but the instruction SHOULD NOT",
insn,
not_belong_group
);
}
// @todo: check read_register_ids
// @todo: check write_register_ids
}
fn instructions_match_group(
cs: &mut Capstone,
expected_insns: &[(&str, &[u8], &[cs_group_type])],
has_default_syntax: bool,
) {
let insns_buf: Vec<u8> = expected_insns
.iter()
.flat_map(|&(_, bytes, _)| bytes)
.map(|x| *x)
.collect();
// Details required to get groups information
cs.set_detail(true).unwrap();
if expected_insns.len() == 0 {
// Input was empty, which will cause disasm_all() to fail
return;
}
let insns: Vec<_> = cs.disasm_all(&insns_buf, 0x1000)
.expect("Failed to disassemble")
.iter()
.collect();
// Check number of instructions
assert_eq!(insns.len(), expected_insns.len());
for (insn, &(expected_mnemonic, expected_bytes, expected_groups)) in
insns.iter().zip(expected_insns)
{
test_instruction_group_helper(
&cs,
insn,
expected_mnemonic,
expected_bytes,
expected_groups,
has_default_syntax,
)
}
}
fn instructions_match(
cs: &mut Capstone,
expected_insns: &[(&str, &[u8])],
has_default_syntax: bool,
) {
let insns_buf: Vec<u8> = expected_insns
.iter()
.flat_map(|&(_, bytes)| bytes)
.map(|x| *x)
.collect();
// Details required to get groups information
cs.set_detail(true).unwrap();
if expected_insns.len() == 0 {
// Input was empty, which will cause disasm_all() to fail
return;
}
let insns: Vec<_> = cs.disasm_all(&insns_buf, 0x1000)
.expect("Failed to disassemble")
.iter()
.collect();
// Check number of instructions
assert_eq!(insns.len(), expected_insns.len());
for (insn, &(expected_mnemonic, expected_bytes)) in insns.iter().zip(expected_insns) {
test_instruction_helper(
&cs,
insn,
expected_mnemonic,
expected_bytes,
has_default_syntax,
)
}
}
#[test]
fn test_instruction_group_ids() {
let expected_insns: &[(&str, &[u8], &[cs_group_type])] =
&[
("nop", b"\x90", &[]),
("je", b"\x74\x05", &[JUMP]),
("call", b"\xe8\x28\x07\x00\x00", &[CALL]),
("ret", b"\xc3", &[RET]),
("syscall", b"\x0f\x05", &[INT]),
("iretd", b"\xcf", &[IRET]),
("sub", b"\x48\x83\xec\x08", &[]),
("test", b"\x48\x85\xc0", &[]),
("mov", b"\x48\x8b\x05\x95\x4a\x4d\x00", &[]),
("mov", b"\xb9\x04\x02\x00\x00", &[]),
];
let mut cs = Capstone::new()
.x86()
.mode(x86::ArchMode::Mode64)
.build()
.unwrap();
instructions_match_group(&mut cs, expected_insns, true);
}
fn test_insns_match(cs: &mut Capstone, insns: &[(&str, &[u8])]) {
for &(mnemonic, bytes) in insns.iter() {
let insns = cs.disasm_all(bytes, 0x1000).unwrap();
assert_eq!(insns.len(), 1);
let insn = insns.iter().next().unwrap();
assert_eq!(insn.mnemonic(), Some(mnemonic));
}
}
fn test_extra_mode_helper(
arch: Arch,
mode: Mode,
extra_mode: &[ExtraMode],
valid_both_insns: &[(&str, &[u8])],
valid_extra_mode: &[(&str, &[u8])],
) {
let extra_mode = extra_mode.iter().map(|x| *x);
let mut cs = Capstone::new_raw(arch, mode, extra_mode, None).unwrap();
test_insns_match(&mut cs, valid_both_insns);
for &(_, _) in valid_extra_mode.iter() {
// Capstone will disassemble instructions not allowed by the current mode
// assert!(
// cs.disasm_all(bytes, 0x1000).is_err(),
// "Disassembly succeeded when on instruction when it should not have for {:?}",
// bytes);
}
test_insns_match(&mut cs, valid_both_insns);
test_insns_match(&mut cs, valid_extra_mode);
}
#[test]
fn test_extra_mode() {
test_extra_mode_helper(
Arch::ARM,
Mode::Arm,
&[ExtraMode::V8],
&[("str", b"\x04\xe0\x2d\xe5")],
&[("vcvtt.f64.f16", b"\xe0\x3b\xb2\xee")],
);
}
fn test_arch_mode_endian_insns(
cs: &mut Capstone,
arch: Arch,
mode: Mode,
endian: Option<Endian>,
extra_mode: &[ExtraMode],
insns: &[(&str, &[u8])],
) {
let expected_insns: Vec<(&str, &[u8])> = insns
.iter()
.map(|&(mnemonic, bytes)| (mnemonic, bytes))
.collect();
let extra_mode = extra_mode.iter().map(|x| *x);
let mut cs_raw = Capstone::new_raw(arch, mode, extra_mode, endian).unwrap();
instructions_match(&mut cs_raw, expected_insns.as_slice(), true);
instructions_match(cs, expected_insns.as_slice(), true);
}
#[test]
fn test_syntax() {
let expected_insns: &[(&str, &str, &[u8], &[cs_group_type])] =
&[
("nop", "nop", b"\x90", &[]),
("je", "je", b"\x74\x05", &[JUMP]),
("call", "callq", b"\xe8\x28\x07\x00\x00", &[CALL]),
("ret", "retq", b"\xc3", &[RET]),
("syscall", "syscall", b"\x0f\x05", &[INT]),
("iretd", "iretl", b"\xcf", &[IRET]),
("sub", "subq", b"\x48\x83\xec\x08", &[]),
("test", "testq", b"\x48\x85\xc0", &[]),
("mov", "movq", b"\x48\x8b\x05\x95\x4a\x4d\x00", &[]),
("mov", "movl", b"\xb9\x04\x02\x00\x00", &[]),
];
let expected_insns_intel: Vec<(&str, &[u8], &[cs_group_type])> = expected_insns
.iter()
.map(|&(mnemonic, _, bytes, groups)| (mnemonic, bytes, groups))
.collect();
let expected_insns_att: Vec<(&str, &[u8], &[cs_group_type])> = expected_insns
.iter()
.map(|&(_, mnemonic, bytes, groups)| (mnemonic, bytes, groups))
.collect();
let mut cs = Capstone::new()
.x86()
.mode(x86::ArchMode::Mode64)
.syntax(x86::ArchSyntax::Intel)
.build()
.unwrap();
instructions_match_group(&mut cs, &expected_insns_intel, true);
cs.set_syntax(Syntax::Intel).unwrap();
instructions_match_group(&mut cs, &expected_insns_intel, true);
cs.set_syntax(Syntax::Att).unwrap();
instructions_match_group(&mut cs, &expected_insns_att, false);
}
// @todo(tmfink) test invalid syntax once we check for invalid options
#[test]
fn test_invalid_syntax() {
// These do no support any syntax change
let rules = [(Arch::ARM, Mode::Thumb)];
let syntaxes = [
// Syntax::Intel,
// Syntax::Att,
// Syntax::NoRegName,
];
for &(arch, mode) in rules.iter() {
let mut cs = Capstone::new_raw(arch, mode, NO_EXTRA_MODE, None).unwrap();
for &syntax in syntaxes.iter() {
let result = cs.set_syntax(syntax);
assert!(result.is_err(), "Expected Err, got {:?}", result);
}
}
}
#[test]
fn test_invalid_mode() {
if let Err(err) = Capstone::new_raw(Arch::PPC, Mode::Thumb, NO_EXTRA_MODE, None) {
assert_eq!(err, Error::Capstone(CapstoneError::InvalidMode));
} else {
panic!("Should fail to create given modes");
}
}
#[test]
fn test_capstone_version() {
let (major, minor) = Capstone::lib_version();
println!("Capstone lib version: ({}, {})", major, minor);
assert!(major > 0 && major < 100, "Invalid major version {}", major);
assert!(minor < 500, "Invalid minor version {}", minor);
}
#[test]
fn test_capstone_supports_arch() {
let architectures = vec![
Arch::ARM,
Arch::ARM64,
Arch::MIPS,
Arch::X86,
Arch::PPC,
Arch::SPARC,
Arch::SYSZ,
Arch::XCORE,
// Arch::M68K,
];
println!("Supported architectures");
for arch in architectures {
let supports_arch = Capstone::supports_arch(arch);
println!(" {:?}: {}", arch, if supports_arch { "yes" } else { "no" });
}
}
#[test]
fn test_capstone_is_diet() {
println!("Capstone is diet: {}", Capstone::is_diet());
}
#[test]
fn test_arch_arm() {
test_arch_mode_endian_insns(
&mut Capstone::new()
.arm()
.mode(arm::ArchMode::Arm)
.build()
.unwrap(),
Arch::ARM,
Mode::Arm,
None,
&[],
&[
("bl", b"\xed\xff\xff\xeb"),
("str", b"\x04\xe0\x2d\xe5"),
("andeq", b"\x00\x00\x00\x00"),
("str", b"\xe0\x83\x22\xe5"),
("mcreq", b"\xf1\x02\x03\x0e"),
("mov", b"\x00\x00\xa0\xe3"),
("strb", b"\x02\x30\xc1\xe7"),
("cmp", b"\x00\x00\x53\xe3"),
("setend", b"\x00\x02\x01\xf1"),
("ldm", b"\x05\x40\xd0\xe8"),
("strdeq", b"\xf4\x80\x00\x00"),
],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.arm()
.mode(arm::ArchMode::Thumb)
.build()
.unwrap(),
Arch::ARM,
Mode::Thumb,
None,
&[],
&[
("bx", b"\x70\x47"),
("blx", b"\x00\xf0\x10\xe8"),
("mov", b"\xeb\x46"),
("sub", b"\x83\xb0"),
("ldr", b"\xc9\x68"),
("cbz", b"\x1f\xb1"),
("wfi", b"\x30\xbf"),
("cpsie.w", b"\xaf\xf3\x20\x84"),
("tbb", b"\xd1\xe8\x00\xf0"),
("movs", b"\xf0\x24"),
("lsls", b"\x04\x07"),
("subs", b"\x1f\x3c"),
("stm", b"\xf2\xc0"),
("movs", b"\x00\x00"),
("mov.w", b"\x4f\xf0\x00\x01"),
("ldr", b"\x46\x6c"),
],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.arm()
.mode(arm::ArchMode::Thumb)
.build()
.unwrap(),
Arch::ARM,
Mode::Thumb,
None,
&[],
&[
("mov.w", b"\x4f\xf0\x00\x01"),
("pop.w", b"\xbd\xe8\x00\x88"),
("tbb", b"\xd1\xe8\x00\xf0"),
("it", b"\x18\xbf"),
("iteet", b"\xad\xbf"),
("vdupne.8", b"\xf3\xff\x0b\x0c"),
("msr", b"\x86\xf3\x00\x89"),
("msr", b"\x80\xf3\x00\x8c"),
("sxtb.w", b"\x4f\xfa\x99\xf6"),
("vaddw.u16", b"\xd0\xff\xa2\x01"),
],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.arm()
.mode(arm::ArchMode::Thumb)
.extra_mode([arm::ArchExtraMode::MClass].iter().map(|x| *x))
.build()
.unwrap(),
Arch::ARM,
Mode::Thumb,
None,
&[ExtraMode::MClass],
&[("mrs", b"\xef\xf3\x02\x80")],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.arm()
.mode(arm::ArchMode::Arm)
.extra_mode([arm::ArchExtraMode::V8].iter().map(|x| *x))
.build()
.unwrap(),
Arch::ARM,
Mode::Arm,
None,
&[ExtraMode::V8],
&[
("vcvtt.f64.f16", b"\xe0\x3b\xb2\xee"),
("crc32b", b"\x42\x00\x01\xe1"),
("dmb", b"\x51\xf0\x7f\xf5"),
],
);
}
#[test]
fn test_arch_arm64() {
test_arch_mode_endian_insns(
&mut Capstone::new()
.arm64()
.mode(arm64::ArchMode::Arm)
.build()
.unwrap(),
Arch::ARM64,
Mode::Arm,
None,
&[],
&[
("mrs", b"\x09\x00\x38\xd5"),
("msr", b"\xbf\x40\x00\xd5"),
("msr", b"\x0c\x05\x13\xd5"),
("tbx", b"\x20\x50\x02\x0e"),
("scvtf", b"\x20\xe4\x3d\x0f"),
("fmla", b"\x00\x18\xa0\x5f"),
("fmov", b"\xa2\x00\xae\x9e"),
("dsb", b"\x9f\x37\x03\xd5"),
("dmb", b"\xbf\x33\x03\xd5"),
("isb", b"\xdf\x3f\x03\xd5"),
("mul", b"\x21\x7c\x02\x9b"),
("lsr", b"\x21\x7c\x00\x53"),
("sub", b"\x00\x40\x21\x4b"),
("ldr", b"\xe1\x0b\x40\xb9"),
("cneg", b"\x20\x04\x81\xda"),
("add", b"\x20\x08\x02\x8b"),
("ldr", b"\x10\x5b\xe8\x3c"),
],
);
}
#[test]
fn test_arch_mips() {
test_arch_mode_endian_insns(
&mut Capstone::new()
.mips()
.mode(mips::ArchMode::Mips32R6)
.build()
.unwrap(),
Arch::MIPS,
Mode::Mips32R6,
Some(Endian::Little),
&[],
&[("ori", b"\x56\x34\x21\x34"), ("srl", b"\xc2\x17\x01\x00")],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.mips()
.mode(mips::ArchMode::Mips32R6)
.endian(Endian::Big)
.build()
.unwrap(),
Arch::MIPS,
Mode::Mips32R6,
Some(Endian::Big),
&[],
&[
("ori", b"\x34\x21\x34\x56"),
("jal", b"\x0C\x10\x00\x97"),
("nop", b"\x00\x00\x00\x00"),
("addiu", b"\x24\x02\x00\x0c"),
("lw", b"\x8f\xa2\x00\x00"),
("ori", b"\x34\x21\x34\x56"),
],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.mips()
.mode(mips::ArchMode::Mips32R6)
.extra_mode([mips::ArchExtraMode::Micro].iter().map(|x| *x))
.endian(Endian::Big)
.build()
.unwrap(),
Arch::MIPS,
Mode::Mips32R6,
Some(Endian::Big),
&[ExtraMode::Micro],
&[
("break", b"\x00\x07\x00\x07"),
("wait", b"\x00\x11\x93\x7c"),
("syscall", b"\x01\x8c\x8b\x7c"),
("rotrv", b"\x00\xc7\x48\xd0"),
],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.mips()
.mode(mips::ArchMode::Mips32R6)
.endian(Endian::Big)
.build()
.unwrap(),
Arch::MIPS,
Mode::Mips32R6,
Some(Endian::Big),
&[],
&[
("addiupc", b"\xec\x80\x00\x19"),
("align", b"\x7c\x43\x22\xa0"),
],
);
}
#[test]
fn test_arch_ppc() {
test_arch_mode_endian_insns(
&mut Capstone::new()
.ppc()
.mode(ppc::ArchMode::Mode32)
.endian(Endian::Big)
.build()
.unwrap(),
Arch::PPC,
// Mode::Mode32,
Mode::Default,
Some(Endian::Big),
&[],
&[
("bdnzla+", b"\x43\x20\x0c\x07"),
("bdztla", b"\x41\x56\xff\x17"),
("lwz", b"\x80\x20\x00\x00"),
("lwz", b"\x80\x3f\x00\x00"),
("vpkpx", b"\x10\x43\x23\x0e"),
("stfs", b"\xd0\x44\x00\x80"),
("crand", b"\x4c\x43\x22\x02"),
("cmpwi", b"\x2d\x03\x00\x80"),
("addc", b"\x7c\x43\x20\x14"),
("mulhd.", b"\x7c\x43\x20\x93"),
("bdnzlrl+", b"\x4f\x20\x00\x21"),
("bgelrl-", b"\x4c\xc8\x00\x21"),
("bne", b"\x40\x82\x00\x14"),
],
);
}
#[test]
fn test_arch_sparc() {
test_arch_mode_endian_insns(
&mut Capstone::new()
.sparc()
.mode(sparc::ArchMode::Default)
.build()
.unwrap(),
Arch::SPARC,
Mode::Default,
None,
&[],
&[
("cmp", b"\x80\xa0\x40\x02"),
("jmpl", b"\x85\xc2\x60\x08"),
("restore", b"\x85\xe8\x20\x01"),
("restore", b"\x81\xe8\x00\x00"),
("mov", b"\x90\x10\x20\x01"),
("casx", b"\xd5\xf6\x10\x16"),
("sethi", b"\x21\x00\x00\x0a"),
("add", b"\x86\x00\x40\x02"),
("nop", b"\x01\x00\x00\x00"),
("bne", b"\x12\xbf\xff\xff"),
("ba", b"\x10\xbf\xff\xff"),
("add", b"\xa0\x02\x00\x09"),
("fbg", b"\x0d\xbf\xff\xff"),
("st", b"\xd4\x20\x60\x00"),
("ldsb", b"\xd4\x4e\x00\x16"),
("brnz,a,pn", b"\x2a\xc2\x80\x03"),
],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.sparc()
.mode(sparc::ArchMode::V9)
.build()
.unwrap(),
Arch::SPARC,
Mode::V9,
Some(Endian::Big),
&[],
&[
("fcmps", b"\x81\xa8\x0a\x24"),
("fstox", b"\x89\xa0\x10\x20"),
("fqtoi", b"\x89\xa0\x1a\x60"),
("fnegq", b"\x89\xa0\x00\xe0"),
],
);
}
#[test]
fn test_arch_systemz() {
test_arch_mode_endian_insns(
&mut Capstone::new()
.sysz()
.mode(sysz::ArchMode::Default)
.build()
.unwrap(),
Arch::SYSZ,
Mode::Default,
None,
&[],
&[
("adb", b"\xed\x00\x00\x00\x00\x1a"),
("a", b"\x5a\x0f\x1f\xff"),
("afi", b"\xc2\x09\x80\x00\x00\x00"),
("br", b"\x07\xf7"),
("xiy", b"\xeb\x2a\xff\xff\x7f\x57"),
("xy", b"\xe3\x01\xff\xff\x7f\x57"),
("stmg", b"\xeb\x00\xf0\x00\x00\x24"),
("ear", b"\xb2\x4f\x00\x78"),
("clije", b"\xec\x18\x00\x00\xc1\x7f"),
],
);
}
#[test]
fn test_arch_x86() {
test_arch_mode_endian_insns(
&mut Capstone::new()
.x86()
.mode(x86::ArchMode::Mode16)
.build()
.unwrap(),
Arch::X86,
Mode::Mode16,
None,
&[],
&[
("lea", b"\x8d\x4c\x32"),
("or", b"\x08\x01"),
("fadd", b"\xd8\x81\xc6\x34"),
("adc", b"\x12\x00"),
("add", b"\x00\x05"),
("and", b"\x23\x01"),
("add", b"\x00\x00"),
("mov", b"\x36\x8b\x84\x91\x23"),
("add", b"\x01\x00"),
("add", b"\x00\x41\x8d"),
("test", b"\x84\x39"),
("mov", b"\x89\x67\x00"),
("add", b"\x00\x8d\x87\x89"),
("add", b"\x67\x00\x00"),
("mov", b"\xb4\xc6"),
],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.x86()
.mode(x86::ArchMode::Mode32)
.build()
.unwrap(),
Arch::X86,
Mode::Mode32,
None,
&[],
&[
("lea", b"\x8d\x4c\x32\x08"),
("add", b"\x01\xd8"),
("add", b"\x81\xc6\x34\x12\x00\x00"),
("add", b"\x05\x23\x01\x00\x00"),
("mov", b"\x36\x8b\x84\x91\x23\x01\x00\x00"),
("inc", b"\x41"),
("lea", b"\x8d\x84\x39\x89\x67\x00\x00"),
("lea", b"\x8d\x87\x89\x67\x00\x00"),
("mov", b"\xb4\xc6"),
],
);
test_arch_mode_endian_insns(
&mut Capstone::new()
.x86()
.mode(x86::ArchMode::Mode64)
.build()
.unwrap(),
Arch::X86,
Mode::Mode64,
None,
&[],
&[("push", b"\x55"), ("mov", b"\x48\x8b\x05\xb8\x13\x00\x00")],
);
}
#[test]
fn test_arch_xcore() {
test_arch_mode_endian_insns(
&mut Capstone::new()
.xcore()
.mode(xcore::ArchMode::Default)
.build()
.unwrap(),
Arch::XCORE,
Mode::Default,
None,
&[],
&[
("get", b"\xfe\x0f"),
("ldw", b"\xfe\x17"),
("setd", b"\x13\x17"),
("init", b"\xc6\xfe\xec\x17"),
("divu", b"\x97\xf8\xec\x4f"),
("lda16", b"\x1f\xfd\xec\x37"),
("ldw", b"\x07\xf2\x45\x5b"),
("lmul", b"\xf9\xfa\x02\x06"),
("add", b"\x1b\x10"),
("ldaw", b"\x09\xfd\xec\xa7"),
],
);
}
}
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::{cell::RefCell, ptr, rc::Rc};
use crate::{
context::{Context, ContextInner},
path::Path,
raster::Raster,
spinel_sys::*,
Clip, Transform,
};
/// Spinel `Raster` builder.
///
/// Is actually a thin wrapper over the [spn_raster_builder_t] stored in [`Context`].
///
/// [spn_raster_builder_t]: https://fuchsia.googlesource.com/fuchsia/+/refs/heads/master/src/graphics/lib/compute/spinel/spinel.h#123
///
/// # Examples
///
/// ```no_run
/// # use spinel_rs::{Context, Path, Point, RasterBuilder, Transform, Clip};
/// #
/// # fn catch() -> Option<()> {
/// # let context: Context = unimplemented!();
/// # let circle1: Path = unimplemented!();
/// # let circle2: Path = unimplemented!();
/// #
/// let tl = Point { x: 1.0, y: 1.0 };
/// let tr = Point { x: 5.0, y: 1.0 };
/// let br = Point { x: 5.0, y: 5.0 };
/// let bl = Point { x: 1.0, y: 5.0 };
///
/// let shapes = RasterBuilder::new(&context)
/// .fill(circle1, Transform::default(), Clip::default())
/// .fill(circle2, Transform::default(), Clip::default())
/// .build()?;
/// # None
/// # }
/// ```
#[derive(Clone, Debug)]
pub struct RasterBuilder {
context: Rc<RefCell<ContextInner>>,
paths: Vec<SpnPath>,
transforms: Vec<[f32; 8]>,
clips: Vec<[f32; 4]>,
}
impl RasterBuilder {
/// Allocates the `spn_raster_builder` which contains all the internal (host) raster-related
/// data. [spn_raster_builder_create]
///
/// [spn_raster_builder_create]: https://fuchsia.googlesource.com/fuchsia/+/refs/heads/master/src/graphics/lib/compute/spinel/spinel.h#128
pub fn new(context: &Context) -> Self {
Self {
context: Rc::clone(&context.inner),
paths: vec![],
transforms: vec![],
clips: vec![],
}
}
/// Fills current `Raster` with `path` by applying the 2D `transform` and rectangular `clip`.
/// [spn_raster_fill]
///
/// [spn_raster_fill]: https://fuchsia.googlesource.com/fuchsia/+/refs/heads/master/src/graphics/lib/compute/spinel/spinel.h#175
pub fn fill(mut self, path: Path, transform: Transform, clip: Clip) -> Self {
if !transform.is_finite() {
panic!("{:?} does not have finite f32 values", transform);
}
if !clip.is_finite() {
panic!("{:?} does not have finite f32 values", clip);
}
self.paths.push(path.inner.spn_path);
self.transforms.push(transform.as_array());
self.clips.push([
clip.bottom_left.x,
clip.bottom_left.y,
clip.top_right.x,
clip.top_right.y,
]);
self
}
/// Builds `Raster` and resets builder. Calls [spn_raster_begin] and [spn_raster_end] to
/// allocate the path.
///
/// [spn_raster_begin]: https://fuchsia.googlesource.com/fuchsia/+/refs/heads/master/src/graphics/lib/compute/spinel/spinel.h#144
/// [spn_raster_end]: https://fuchsia.googlesource.com/fuchsia/+/refs/heads/master/src/graphics/lib/compute/spinel/spinel.h#147
pub fn build(self) -> Option<Raster> {
macro_rules! success {
( $result:expr, $raster_builder:expr $( , )? ) => {{
if let Err(SpnError::SpnErrorRasterBuilderLost) = $result.res() {
$raster_builder.context.borrow_mut().reset_raster_builder();
return None;
}
$result.success();
}};
}
unsafe {
let spn_raster_builder = self.context.borrow().spn_raster_builder;
success!(spn_raster_begin(spn_raster_builder), self);
let transforms: Vec<_> =
self.transforms.iter().map(|transform| transform.as_ptr()).collect();
let clips: Vec<_> = self.clips.iter().map(|clip| clip.as_ptr()).collect();
success!(
spn_raster_fill(
spn_raster_builder,
self.paths.as_ptr(),
ptr::null(),
transforms.as_ptr(),
ptr::null(),
clips.as_ptr(),
self.paths.len() as u32,
),
self,
);
let mut spn_raster = Default::default();
success!(spn_raster_end(spn_raster_builder, &mut spn_raster as *mut _), self);
Some(Raster::new(&self.context, spn_raster))
}
}
}
#[cfg(test)]
mod tests {
use std::convert::TryInto;
use crate::{path::Path, path_builder::PathBuilder, Point};
use super::*;
const FINITE: Clip =
Clip { bottom_left: Point { x: 0.0, y: 0.0 }, top_right: Point { x: 0.0, y: 0.0 } };
const BL_NAN: Clip = Clip {
bottom_left: Point { x: std::f32::NAN, y: 0.0 },
top_right: Point { x: 0.0, y: 0.0 },
};
const TR_NAN: Clip = Clip {
bottom_left: Point { x: 0.0, y: 0.0 },
top_right: Point { x: std::f32::NAN, y: 0.0 },
};
fn new_path_and_raster_builder() -> (Path, RasterBuilder) {
let context = Context::new();
let path_builder = PathBuilder::new(&context, Point::default());
let path = path_builder.build().unwrap();
let raster_builder = RasterBuilder::new(&context);
(path, raster_builder)
}
#[test]
fn transform_is_finite() {
let mut values = [1.0; 17];
values[8] = std::f32::NAN;
for i in 0..9 {
let transform = Transform::from_matrix(values[i..i + 9].try_into().unwrap());
assert!(!transform.is_finite());
}
let transform = Transform::from_matrix([1.0; 9]);
assert!(transform.is_finite());
}
#[test]
fn raster_builder_finite_transform_and_clip() {
let (path, raster_builder) = new_path_and_raster_builder();
raster_builder.fill(path, Transform::identity(), Clip::default());
}
#[test]
#[should_panic]
fn raster_builder_non_finite_transform() {
let (path, raster_builder) = new_path_and_raster_builder();
let mut transform = Transform::identity();
transform.scale_x = std::f32::NAN;
raster_builder.fill(path, transform, Clip::default());
}
#[test]
fn clip_is_finite() {
assert!(FINITE.is_finite());
assert!(!BL_NAN.is_finite());
assert!(!TR_NAN.is_finite());
}
#[test]
#[should_panic]
fn raster_builder_non_finite_clip() {
let (path, raster_builder) = new_path_and_raster_builder();
raster_builder.fill(path, Transform::identity(), BL_NAN);
}
}
|
use super::Chain;
use super::Link;
use anyhow::Result;
use chrono::NaiveDate;
use rusqlite::{params, Connection};
use super::FORMAT;
pub fn setup_tables(conn: &Connection) -> Result<()> {
conn.execute(
"CREATE TABLE IF NOT EXISTS chains (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL UNIQUE
)",
params![],
)?;
conn.execute(
"CREATE TABLE IF NOT EXISTS links (
chain_id INTEGER,
date TEXT NOT NULL,
PRIMARY KEY (chain_id, date),
FOREIGN KEY (chain_id) REFERENCES chains(id)
)",
params![],
)?;
Ok(())
}
pub fn add_chain(conn: &Connection, chain: &Chain) -> Result<()> {
conn.execute(
"INSERT OR IGNORE INTO chains (name)
VALUES (?1)",
params![chain.name,],
)?;
Ok(())
}
pub fn delete_chain_for_name(conn: &Connection, chain_name: &str) -> Result<()> {
conn.execute("DELETE FROM chains WHERE name=?1", params![chain_name])?;
Ok(())
}
pub fn edit_chain_for_name(conn: &Connection, chain: &Chain, name: &str) -> Result<()> {
conn.execute(
"UPDATE chains
SET
name = ?2,
WHERE
name = ?1;",
params![name, chain.name],
)?;
Ok(())
}
pub fn get_chains(conn: &Connection) -> Result<Vec<Chain>> {
let mut statement = conn.prepare(
"SELECT
id,
name
FROM chains
ORDER BY name ASC;",
)?;
let chain_iter = statement.query_map([], |row| {
Ok(Chain {
id: row.get(0)?,
name: row.get(1)?,
})
})?;
Ok(chain_iter.filter_map(Result::ok).collect())
}
pub fn get_chain_id_for_name(conn: &Connection, chain_name: &str) -> Result<i32> {
Ok(conn.query_row_and_then(
"SELECT id FROM chains WHERE name=?;",
params![chain_name],
|row| row.get(0),
)?)
}
pub fn get_chain_for_id(conn: &Connection, chain_id: i32) -> Result<Chain> {
let chain = conn.query_row(
"SELECT id, name FROM chains WHERE id=?1;",
params![chain_id],
|row| {
Ok(Chain {
id: row.get(0)?,
name: row.get(1)?,
})
},
)?;
Ok(chain)
}
pub fn get_chain_for_name(conn: &Connection, chain_name: &str) -> Result<Chain> {
let chain = conn.query_row(
"SELECT id, name FROM chains WHERE name=?1;",
params![chain_name],
|row| {
Ok(Chain {
id: row.get(0)?,
name: row.get(1)?,
})
},
)?;
Ok(chain)
}
pub fn add_link(conn: &Connection, link: &Link) -> Result<()> {
conn.execute(
"INSERT OR IGNORE INTO links (chain_id, date)
VALUES (?1, ?2);",
params![link.chain_id, link.date.format(FORMAT).to_string()],
)?;
Ok(())
}
pub fn delete_link(conn: &Connection, link: &Link) -> Result<()> {
conn.execute(
"DELETE FROM links WHERE chain_id=?1 AND date=?2;",
params![link.chain_id, link.date.format(FORMAT).to_string()],
)?;
Ok(())
}
pub fn update(conn: &Connection, current: &Link, new: &Link) -> Result<()> {
conn.execute(
"UPDATE links SET date = ?1 WHERE chain_id = ?2 AND date = ?3",
params![
new.date.format(FORMAT).to_string(),
current.chain_id,
current.date.format(FORMAT).to_string()
],
)?;
Ok(())
}
pub fn get_links_for_chain_id(conn: &Connection, chain_id: i32) -> Result<Vec<Link>> {
let mut statement = conn.prepare(
"SELECT chain_id, date
FROM links
WHERE chain_id = ?
ORDER BY date ASC;",
)?;
let link_iter = statement.query_map(params![chain_id], |row| {
let chain_id: i32 = row.get(0)?;
let date_str: String = row.get::<usize, String>(1)?.to_string();
let date = NaiveDate::parse_from_str(&date_str, FORMAT).unwrap();
Ok(Link {
chain_id: chain_id,
date: date,
})
})?;
Ok(link_iter.filter_map(Result::ok).collect())
}
|
//! named accounts for synthesized data accounts for bank state, etc.
//!
//! this account carries the Bank's most recent blockhashes for some N parents
//!
use crate::account::Account;
use crate::account_utils::State;
use crate::hash::Hash;
use crate::pubkey::Pubkey;
use crate::syscall;
use bincode::serialized_size;
use std::ops::Deref;
/// "Sysca11SlotHashes11111111111111111111111111"
/// slot hashes account pubkey
const ID: [u8; 32] = [
6, 167, 211, 138, 69, 219, 186, 157, 48, 170, 46, 66, 2, 146, 193, 59, 39, 59, 245, 188, 30,
60, 130, 78, 86, 27, 113, 191, 208, 0, 0, 0,
];
pub fn id() -> Pubkey {
Pubkey::new(&ID)
}
pub fn check_id(pubkey: &Pubkey) -> bool {
pubkey.as_ref() == ID
}
pub const MAX_SLOT_HASHES: usize = 512; // 512 slots to get your vote in
#[derive(Serialize, Deserialize, PartialEq, Debug)]
pub struct SlotHashes {
// non-pub to keep control of size
inner: Vec<(u64, Hash)>,
}
impl SlotHashes {
pub fn from(account: &Account) -> Option<Self> {
account.state().ok()
}
pub fn to(&self, account: &mut Account) -> Option<()> {
account.set_state(self).ok()
}
pub fn size_of() -> usize {
serialized_size(&SlotHashes {
inner: vec![(0, Hash::default()); MAX_SLOT_HASHES],
})
.unwrap() as usize
}
pub fn add(&mut self, slot: u64, hash: Hash) {
self.inner.insert(0, (slot, hash));
self.inner.truncate(MAX_SLOT_HASHES);
}
}
impl Deref for SlotHashes {
type Target = Vec<(u64, Hash)>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
pub fn create_account(difs: u64) -> Account {
Account::new(difs, 0, SlotHashes::size_of(), &syscall::id())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::hash::hash;
#[test]
fn test_slot_hashes_id() {
let ids = [("Sysca11S1otHashes11111111111111111111111111", id())];
// to get the bytes above:
// ids.iter().for_each(|(name, _)| {
// dbg!((name, bs58::decode(name).into_vec().unwrap()));
// });
assert!(ids.iter().all(|(name, id)| *name == id.to_string()));
assert!(check_id(&id()));
}
#[test]
fn test_slot_hashes_create_account() {
let difs = 42;
let account = create_account(difs);
let slot_hashes = SlotHashes::from(&account);
assert_eq!(slot_hashes, Some(SlotHashes { inner: vec![] }));
let mut slot_hashes = slot_hashes.unwrap();
for i in 0..MAX_SLOT_HASHES + 1 {
slot_hashes.add(
i as u64,
hash(&[(i >> 24) as u8, (i >> 16) as u8, (i >> 8) as u8, i as u8]),
);
}
assert_eq!(slot_hashes[0].0, MAX_SLOT_HASHES as u64);
assert_eq!(slot_hashes.len(), MAX_SLOT_HASHES);
}
}
|
pub trait Sealed {}
impl Sealed for () {}
impl<S: Sealed> Sealed for &'_ S {}
impl<S: Sealed> Sealed for &'_ mut S {}
macro_rules! tuple_impl {
($( $types:ident, )*) => {
impl<$( $types, )*> Sealed for ($( $types, )*)
where
last_type!($( $types, )*): ?Sized,
{}
};
}
for_tuples!(A, B, C, D, E, F, G, H, I, J, K, L, # tuple_impl);
|
#![macro_use]
use lazy_static::lazy_static;
use parking_lot::RwLock;
use super::timer::Timer;
lazy_static! {
pub static ref TIMING_DATABASE: RwLock<GlobalTimers> = RwLock::new(GlobalTimers::default());
}
#[derive(Default)]
pub struct GlobalTimers {
pub physics: PhysicsTimers,
pub gui_render: GUITimers,
pub sim_render: SimRenderTimers,
}
#[derive(Default)]
pub struct GUITimers {
pub draw: Timer,
}
#[derive(Default)]
pub struct PhysicsTimers {
pub step: Timer,
pub pos_update: Timer,
pub col_detect: Timer,
}
#[derive(Default)]
pub struct SimRenderTimers {
pub render: Timer,
pub vertex_buffer_update: Timer,
}
|
use avaro::parse_avaro;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
#[structopt(name = "FILE")]
file_name: String,
}
fn main() {
let opt: Opt = Opt::from_args();
let content = std::fs::read_to_string(opt.file_name).unwrap();
let result = parse_avaro(&content);
match result {
Ok(entities) => {
dbg!(entities);
}
Err(e) => {
eprintln!("{}", e);
}
};
}
|
// actor/computer.rs
use actor::Actor;
pub struct Computer {
name: String,
position: i32,
money: i32,
knowledge: i32,
tiles: Vec<String>,
still_playing: bool,
skip_one_turn: bool,
}
impl Actor for Computer {
pub fn is_playing(&self) -> bool {
self.still_playing
}
pub fn get_name(&self) -> String {
self.name
}
}
|
use bevy::app::startup_stage;
use bevy::prelude::*;
use bevy_index::{ComponentIndex, ComponentIndexes};
use rand::distributions::{Bernoulli, Distribution};
const MAP_SIZE: isize = 10;
const GAME_INTERVAL: f32 = 0.5;
const FRACTION_ALIVE: f64 = 0.2;
const GRAPHICS_SCALE: f32 = 10.0;
const COL_ALIVE: Color = Color::rgb_linear(0.0, 0.0, 0.0);
const COL_DEAD: Color = Color::rgb_linear(1.0, 1.0, 1.0);
#[derive(Hash, PartialEq, Eq, Clone, Copy)]
struct Position {
x: isize,
y: isize,
}
impl Position {
fn get_neighbors(self) -> Vec<Position> {
let mut neighbors: Vec<Position> = Vec::new();
for i in -1..1 {
for j in -1..1 {
// A cell is not a neighbor to itself
if (i != 0) | (j != 0) {
let candidate_neighbor = Position {
x: self.x + i,
y: self.y + j,
};
match candidate_neighbor.check_bounds() {
Some(n) => neighbors.push(n),
None => (),
}
}
}
}
neighbors
}
fn check_bounds(self) -> Option<Position> {
if (0 <= self.x) && (self.x <= MAP_SIZE) && (0 <= self.y) && (self.y <= MAP_SIZE) {
return Some(self);
} else {
return None;
}
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum Life {
Alive,
Dead,
}
#[derive(Debug, Clone)]
struct LifeEvent {
entity: Entity,
status: Life,
}
struct GameTimer(Timer);
fn main() {
App::build()
.add_plugins(DefaultPlugins)
.add_resource(GameTimer(Timer::from_seconds(GAME_INTERVAL, true)))
.init_index::<Position>()
.add_event::<LifeEvent>()
.add_startup_system(init_camera.system())
.add_startup_system(init_grid.system())
.add_startup_system_to_stage(startup_stage::POST_STARTUP, init_cells.system())
//.add_system(report_alive.system())
.add_system(game_of_life.system())
.add_system_to_stage(stage::POST_UPDATE, process_life_events.system())
.add_system_to_stage(stage::LAST, update_cell_color.system())
.run();
}
fn init_grid(commands: &mut Commands, mut materials: ResMut<Assets<ColorMaterial>>) {
assert!(MAP_SIZE < (usize::MAX as f64).sqrt().floor() as isize);
// spawn_batch doesn't work because ColorMaterial isn't thread safe
for x in -MAP_SIZE..MAP_SIZE {
for y in -MAP_SIZE..MAP_SIZE {
commands
.spawn(SpriteBundle {
material: materials.add(COL_DEAD.into()),
transform: Transform::from_translation(Vec3::new(
x as f32 * GRAPHICS_SCALE,
y as f32 * GRAPHICS_SCALE,
0.0,
)),
sprite: Sprite::new(Vec2::new(0.9 * GRAPHICS_SCALE, 0.9 * GRAPHICS_SCALE)),
..Default::default()
})
.with(Position { x, y })
.with(Life::Dead);
}
}
}
fn init_cells(mut query: Query<&mut Life>) {
let alive_rng = Bernoulli::new(FRACTION_ALIVE).unwrap();
for mut life in query.iter_mut() {
if alive_rng.sample(&mut rand::thread_rng()) {
*life = Life::Alive;
}
}
}
fn init_camera(commands: &mut Commands) {
commands.spawn(Camera2dBundle::default());
}
fn count_alive(
neighbors: Vec<Position>,
position_index: &ComponentIndex<Position>,
life_query: &Query<&Life>,
) -> u8 {
neighbors
.iter()
.map(|p| {
position_index
.get(p)
.iter()
.any(|&e| life_query.get(e).ok() == Some(&Life::Alive)) as u8
})
.sum()
}
// FIXME: kills all neighboring cells
// Pretty sure it's because the index isn't updated in time
fn game_of_life(
time: Res<Time>,
mut timer: ResMut<GameTimer>,
query: Query<(&Life, &Position, Entity)>,
position_index: Res<ComponentIndex<Position>>,
life_query: Query<&Life>,
mut life_events: ResMut<Events<LifeEvent>>,
) {
timer.0.tick(time.delta_seconds());
if timer.0.finished() {
for (life, position, entity) in query.iter() {
// FIXME:
let n_neighbors = count_alive(position.get_neighbors(), &position_index, &life_query);
dbg!(n_neighbors);
match *life {
Life::Alive => {
if (n_neighbors < 2) | (n_neighbors > 3) {
life_events.send(LifeEvent {
entity: entity,
status: Life::Dead,
})
}
}
Life::Dead => {
if n_neighbors == 3 {
life_events.send(LifeEvent {
entity: entity,
status: Life::Alive,
})
}
}
}
}
}
}
fn process_life_events(
mut life_event_reader: Local<EventReader<LifeEvent>>,
life_events: ResMut<Events<LifeEvent>>,
mut life_query: Query<&mut Life>,
) {
for life_event in life_event_reader.iter(&life_events) {
dbg!(life_event.status);
// Update the entity corresponding with the life_event's entity
if let Ok(mut life_value) = life_query.get_mut(life_event.entity) {
*life_value = life_event.status;
}
}
}
// FIXME: isn't working
fn update_cell_color(mut query: Query<(&Life, &mut ColorMaterial), Changed<Life>>) {
for (life, mut color) in query.iter_mut() {
*color = match life {
Life::Alive => COL_ALIVE.into(),
Life::Dead => COL_DEAD.into(),
}
}
}
fn report_alive(query: Query<&Life>) {
let mut n = 0;
for life in query.iter() {
if *life == Life::Alive {
n += 1;
}
}
println!("{}", n);
}
|
use crate::intcode::IntCodeEmulator;
const INPUT: &str = include_str!("../input/2019/day2.txt");
pub fn parse_input() -> Vec<i64> {
INPUT
.trim()
.split(',')
.map(|l| l.parse().expect("Unable to parse input"))
.collect()
}
pub fn part1() -> i64 {
let input = parse_input();
run(&input, 12, 2)
}
pub fn part2() -> i64 {
let input = parse_input();
for noun in 0..=99 {
for verb in 0..=99 {
let output = run(&input, noun, verb);
if output == 19_690_720 {
return 100 * noun + verb;
}
}
}
panic!("Correct noun/verb combo not found");
}
fn run(input: &[i64], noun: i64, verb: i64) -> i64 {
let mut program = input.to_vec();
program[1] = noun;
program[2] = verb;
let mut vm = IntCodeEmulator::new(program);
vm.execute();
vm.ram()[0]
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn day02_part1() {
assert_eq!(part1(), 6_627_023);
}
#[test]
fn day02_part2() {
assert_eq!(part2(), 4019);
}
}
|
//! Benchmarks for rustix.
//!
//! To enable these benchmarks, add `--cfg=criterion` to RUSTFLAGS and enable
//! the "fs", "time", and "process" cargo features.
//!
//! ```sh
//! RUSTFLAGS=--cfg=criterion cargo bench --features=fs,time,process
//! ```
#[cfg(any(
not(criterion),
not(feature = "fs"),
not(feature = "process"),
not(feature = "time"),
windows,
target_os = "emscripten",
target_os = "redox",
target_os = "wasi",
))]
fn main() {
unimplemented!(
"Add --cfg=criterion to RUSTFLAGS and enable the \"fs\", \"time\", and \"process\" cargo \
features."
)
}
#[cfg(not(any(
not(criterion),
not(feature = "fs"),
not(feature = "process"),
not(feature = "time"),
windows,
target_os = "emscripten",
target_os = "redox",
target_os = "wasi",
)))]
use criterion::{criterion_group, criterion_main};
#[cfg(not(any(
not(criterion),
not(feature = "fs"),
not(feature = "process"),
not(feature = "time"),
windows,
target_os = "emscripten",
target_os = "redox",
target_os = "wasi",
)))]
mod suite {
use criterion::Criterion;
pub(super) fn simple_statat(c: &mut Criterion) {
use rustix::fs::{statat, AtFlags, CWD};
c.bench_function("simple statat", |b| {
b.iter(|| {
statat(CWD, "/", AtFlags::empty()).unwrap();
})
});
}
pub(super) fn simple_statat_libc(c: &mut Criterion) {
c.bench_function("simple statat libc", |b| {
b.iter(|| {
let mut s = std::mem::MaybeUninit::<libc::stat>::uninit();
unsafe {
assert_eq!(
libc::fstatat(
libc::AT_FDCWD,
std::ffi::CString::new("/").unwrap().as_c_str().as_ptr() as _,
s.as_mut_ptr(),
0
),
0
);
}
})
});
}
pub(super) fn simple_statat_libc_cstr(c: &mut Criterion) {
c.bench_function("simple statat libc cstr", |b| {
b.iter(|| {
let mut s = std::mem::MaybeUninit::<libc::stat>::uninit();
unsafe {
assert_eq!(
libc::fstatat(
libc::AT_FDCWD,
rustix::cstr!("/").as_ptr() as _,
s.as_mut_ptr(),
0
),
0
);
}
})
});
}
pub(super) fn simple_statat_cstr(c: &mut Criterion) {
use rustix::fs::{statat, AtFlags, CWD};
c.bench_function("simple statat cstr", |b| {
b.iter(|| {
statat(CWD, rustix::cstr!("/"), AtFlags::empty()).unwrap();
})
});
}
#[cfg(not(target_os = "wasi"))]
pub(super) fn simple_clock_gettime(c: &mut Criterion) {
use rustix::time::{clock_gettime, ClockId};
c.bench_function("simple clock_gettime", |b| {
b.iter(|| {
let _ = clock_gettime(ClockId::Monotonic);
})
});
}
#[cfg(not(target_os = "wasi"))]
pub(super) fn simple_clock_gettime_libc(c: &mut Criterion) {
c.bench_function("simple clock_gettime libc", |b| {
b.iter(|| {
let mut s = std::mem::MaybeUninit::<libc::timespec>::uninit();
unsafe {
assert_eq!(
libc::clock_gettime(libc::CLOCK_MONOTONIC, s.as_mut_ptr()),
0
);
let _ = s.assume_init();
}
})
});
}
#[cfg(not(target_os = "wasi"))]
pub(super) fn simple_getpid(c: &mut Criterion) {
use rustix::process::getpid;
c.bench_function("simple getpid", |b| {
b.iter(|| {
let _ = getpid();
})
});
}
#[cfg(not(target_os = "wasi"))]
pub(super) fn simple_getpid_libc(c: &mut Criterion) {
c.bench_function("simple getpid libc", |b| {
b.iter(|| unsafe {
let _ = libc::getpid();
})
});
}
}
#[cfg(not(any(
not(criterion),
not(feature = "fs"),
not(feature = "process"),
not(feature = "time"),
windows,
target_os = "emscripten",
target_os = "redox",
target_os = "wasi",
)))]
criterion_group!(
benches,
suite::simple_statat,
suite::simple_statat_libc,
suite::simple_statat_libc_cstr,
suite::simple_statat_cstr,
suite::simple_clock_gettime,
suite::simple_clock_gettime_libc,
suite::simple_getpid,
suite::simple_getpid_libc
);
#[cfg(not(any(
not(criterion),
not(feature = "fs"),
not(feature = "process"),
not(feature = "time"),
windows,
target_os = "emscripten",
target_os = "redox",
target_os = "wasi",
)))]
criterion_main!(benches);
|
pub use VkQueryPipelineStatisticFlags::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkQueryPipelineStatisticFlags {
VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x0000_0001,
VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x0000_0002,
VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x0000_0004,
VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x0000_0008,
VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x0000_0010,
VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x0000_0020,
VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x0000_0040,
VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x0000_0080,
VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x0000_0100,
VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x0000_0200,
VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x0000_0400,
}
use crate::SetupVkFlags;
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
pub struct VkQueryPipelineStatisticFlagBits(u32);
SetupVkFlags!(
VkQueryPipelineStatisticFlags,
VkQueryPipelineStatisticFlagBits
);
|
use super::VecMutator;
use crate::mutators::mutations::{Mutation, RevertMutation};
use crate::{Mutator, SubValueProvider};
pub struct CopyElement;
#[derive(Clone)]
pub struct CopyElementRandomStep;
#[derive(Clone)]
pub struct CopyElementStep {
from_idx: usize,
to_idx: usize,
}
pub struct ConcreteCopyElement<T> {
el: T,
cplx: f64,
idx: usize,
}
pub struct RevertCopyElement {
idx: usize,
}
impl<T, M> RevertMutation<Vec<T>, VecMutator<T, M>> for RevertCopyElement
where
T: Clone + 'static,
M: Mutator<T>,
{
#[no_coverage]
fn revert(
self,
_mutator: &VecMutator<T, M>,
value: &mut Vec<T>,
_cache: &mut <VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
) {
let _ = value.remove(self.idx);
}
}
impl<T, M> Mutation<Vec<T>, VecMutator<T, M>> for CopyElement
where
T: Clone + 'static,
M: Mutator<T>,
{
type RandomStep = CopyElementRandomStep;
type Step = CopyElementStep;
type Concrete<'a> = ConcreteCopyElement<T>;
type Revert = RevertCopyElement;
#[no_coverage]
fn default_random_step(&self, mutator: &VecMutator<T, M>, value: &Vec<T>) -> Option<Self::RandomStep> {
if mutator.m.max_complexity() == 0. {
return None;
}
if value.is_empty() || value.len() >= *mutator.len_range.end() {
None
} else {
Some(CopyElementRandomStep)
}
}
#[no_coverage]
fn random<'a>(
mutator: &VecMutator<T, M>,
value: &Vec<T>,
cache: &<VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
_random_step: &Self::RandomStep,
_max_cplx: f64,
) -> Self::Concrete<'a> {
let from_idx = mutator.rng.usize(..value.len());
let to_idx = mutator.rng.usize(..value.len());
let (el, el_cache) = (&value[from_idx], &cache.inner[from_idx]);
let cplx = mutator.m.complexity(el, el_cache);
ConcreteCopyElement {
el: el.clone(),
cplx,
idx: to_idx,
}
}
#[no_coverage]
fn default_step(
&self,
mutator: &VecMutator<T, M>,
value: &Vec<T>,
_cache: &<VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
) -> Option<Self::Step> {
if mutator.m.max_complexity() == 0. {
return None;
}
if value.is_empty() || value.len() >= *mutator.len_range.end() {
None
} else {
Some(Self::Step { from_idx: 0, to_idx: 0 })
}
}
#[no_coverage]
fn from_step<'a>(
mutator: &VecMutator<T, M>,
value: &Vec<T>,
cache: &<VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
step: &'a mut Self::Step,
subvalue_provider: &dyn SubValueProvider,
max_cplx: f64,
) -> Option<Self::Concrete<'a>> {
// The step.from_idx increments from 0 to value.len()
// once it reaches value.len(), we have tried every single possibility
if step.from_idx == value.len() {
return None;
}
// It doesn't make sense to copy an element to its same index
// e.g. [0, 1, 2, 3]
// copy 0 to 0 -> [0, 0, 1, 2, 3]
// ok, but then:
// copy 0 to 1 -> [0, 0, 1, 2, 3]
// they're the same thing
if step.from_idx == step.to_idx {
step.to_idx += 1;
}
let value_cplx = mutator.complexity(value, cache);
let spare_cplx = max_cplx - value_cplx;
let (el, el_cache) = (&value[step.from_idx], &cache.inner[step.from_idx]);
let cplx = mutator.m.complexity(el, el_cache);
// cannot copy an element that would make the value exceed the maximum complexity
// so we try another one
if cplx > spare_cplx {
step.from_idx += 1;
step.to_idx = 0;
Self::from_step(mutator, value, cache, step, subvalue_provider, max_cplx)
} else {
let concrete = ConcreteCopyElement {
el: el.clone(),
cplx,
idx: step.to_idx,
};
step.to_idx = (step.to_idx + 1) % (value.len() + 1);
if step.to_idx == 0 {
// then we have tried copying the element at from_idx to every other index
// time to copy a different element
step.from_idx += 1;
}
Some(concrete)
}
}
#[no_coverage]
fn apply<'a>(
mutation: Self::Concrete<'a>,
mutator: &VecMutator<T, M>,
value: &mut Vec<T>,
cache: &mut <VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
_subvalue_provider: &dyn SubValueProvider,
_max_cplx: f64,
) -> (Self::Revert, f64) {
value.insert(mutation.idx, mutation.el);
let new_cplx = mutator.complexity_from_inner(cache.sum_cplx + mutation.cplx, value.len());
(RevertCopyElement { idx: mutation.idx }, new_cplx)
}
}
|
mod logger;
pub use logger::Logger;
|
mod test {
include!(concat!(env!("OUT_DIR"), "/test.rs"));
}
fn main() {
let s = "abc a A ABC abC_def";
//let s = "abc !".to_string(); // match unmatch
let mut lex = test::Lexer::new(&s, test::SpaceCounter::new());
loop {
let res = lex.yylex();
println!("{:?}", res);
if res.is_err() {
break;
}
println!("remain '{}' characters", lex.remain());
}
println!("space count: {}", lex.get_space_counter().count());
}
|
//! Implementation of Printf-Style string formatting
//! as per the [Python Docs](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting).
use crate::{
builtins::{
try_f64_to_bigint, tuple, PyBaseExceptionRef, PyByteArray, PyBytes, PyFloat, PyInt, PyStr,
},
function::ArgIntoFloat,
protocol::PyBuffer,
stdlib::builtins,
AsObject, PyObjectRef, PyResult, TryFromBorrowedObject, TryFromObject, VirtualMachine,
};
use itertools::Itertools;
use num_traits::cast::ToPrimitive;
use rustpython_format::cformat::*;
use std::str::FromStr;
fn spec_format_bytes(
vm: &VirtualMachine,
spec: &CFormatSpec,
obj: PyObjectRef,
) -> PyResult<Vec<u8>> {
match &spec.format_type {
CFormatType::String(conversion) => match conversion {
// Unlike strings, %r and %a are identical for bytes: the behaviour corresponds to
// %a for strings (not %r)
CFormatConversion::Repr | CFormatConversion::Ascii => {
let b = builtins::ascii(obj, vm)?.into();
Ok(b)
}
CFormatConversion::Str | CFormatConversion::Bytes => {
if let Ok(buffer) = PyBuffer::try_from_borrowed_object(vm, &obj) {
Ok(buffer.contiguous_or_collect(|bytes| spec.format_bytes(bytes)))
} else {
let bytes = vm
.get_special_method(&obj, identifier!(vm, __bytes__))?
.ok_or_else(|| {
vm.new_type_error(format!(
"%b requires a bytes-like object, or an object that \
implements __bytes__, not '{}'",
obj.class().name()
))
})?
.invoke((), vm)?;
let bytes = PyBytes::try_from_borrowed_object(vm, &bytes)?;
Ok(spec.format_bytes(bytes.as_bytes()))
}
}
},
CFormatType::Number(number_type) => match number_type {
CNumberType::Decimal => match_class!(match &obj {
ref i @ PyInt => {
Ok(spec.format_number(i.as_bigint()).into_bytes())
}
ref f @ PyFloat => {
Ok(spec
.format_number(&try_f64_to_bigint(f.to_f64(), vm)?)
.into_bytes())
}
obj => {
if let Some(method) = vm.get_method(obj.clone(), identifier!(vm, __int__)) {
let result = method?.call((), vm)?;
if let Some(i) = result.payload::<PyInt>() {
return Ok(spec.format_number(i.as_bigint()).into_bytes());
}
}
Err(vm.new_type_error(format!(
"%{} format: a number is required, not {}",
spec.format_char,
obj.class().name()
)))
}
}),
_ => {
if let Some(i) = obj.payload::<PyInt>() {
Ok(spec.format_number(i.as_bigint()).into_bytes())
} else {
Err(vm.new_type_error(format!(
"%{} format: an integer is required, not {}",
spec.format_char,
obj.class().name()
)))
}
}
},
CFormatType::Float(_) => {
let class = obj.class().to_owned();
let value = ArgIntoFloat::try_from_object(vm, obj).map_err(|e| {
if e.fast_isinstance(vm.ctx.exceptions.type_error) {
// formatfloat in bytesobject.c generates its own specific exception
// text in this case, mirror it here.
vm.new_type_error(format!("float argument required, not {}", class.name()))
} else {
e
}
})?;
Ok(spec.format_float(value.into()).into_bytes())
}
CFormatType::Character => {
if let Some(i) = obj.payload::<PyInt>() {
let ch = i
.try_to_primitive::<u8>(vm)
.map_err(|_| vm.new_overflow_error("%c arg not in range(256)".to_owned()))?
as char;
return Ok(spec.format_char(ch).into_bytes());
}
if let Some(b) = obj.payload::<PyBytes>() {
if b.len() == 1 {
return Ok(spec.format_char(b.as_bytes()[0] as char).into_bytes());
}
} else if let Some(ba) = obj.payload::<PyByteArray>() {
let buf = ba.borrow_buf();
if buf.len() == 1 {
return Ok(spec.format_char(buf[0] as char).into_bytes());
}
}
Err(vm
.new_type_error("%c requires an integer in range(256) or a single byte".to_owned()))
}
}
}
fn spec_format_string(
vm: &VirtualMachine,
spec: &CFormatSpec,
obj: PyObjectRef,
idx: &usize,
) -> PyResult<String> {
match &spec.format_type {
CFormatType::String(conversion) => {
let result = match conversion {
CFormatConversion::Ascii => builtins::ascii(obj, vm)?.into(),
CFormatConversion::Str => obj.str(vm)?.as_str().to_owned(),
CFormatConversion::Repr => obj.repr(vm)?.as_str().to_owned(),
CFormatConversion::Bytes => {
// idx is the position of the %, we want the position of the b
return Err(vm.new_value_error(format!(
"unsupported format character 'b' (0x62) at index {}",
idx + 1
)));
}
};
Ok(spec.format_string(result))
}
CFormatType::Number(number_type) => match number_type {
CNumberType::Decimal => match_class!(match &obj {
ref i @ PyInt => {
Ok(spec.format_number(i.as_bigint()))
}
ref f @ PyFloat => {
Ok(spec.format_number(&try_f64_to_bigint(f.to_f64(), vm)?))
}
obj => {
if let Some(method) = vm.get_method(obj.clone(), identifier!(vm, __int__)) {
let result = method?.call((), vm)?;
if let Some(i) = result.payload::<PyInt>() {
return Ok(spec.format_number(i.as_bigint()));
}
}
Err(vm.new_type_error(format!(
"%{} format: a number is required, not {}",
spec.format_char,
obj.class().name()
)))
}
}),
_ => {
if let Some(i) = obj.payload::<PyInt>() {
Ok(spec.format_number(i.as_bigint()))
} else {
Err(vm.new_type_error(format!(
"%{} format: an integer is required, not {}",
spec.format_char,
obj.class().name()
)))
}
}
},
CFormatType::Float(_) => {
let value = ArgIntoFloat::try_from_object(vm, obj)?;
Ok(spec.format_float(value.into()))
}
CFormatType::Character => {
if let Some(i) = obj.payload::<PyInt>() {
let ch = i
.as_bigint()
.to_u32()
.and_then(std::char::from_u32)
.ok_or_else(|| {
vm.new_overflow_error("%c arg not in range(0x110000)".to_owned())
})?;
return Ok(spec.format_char(ch));
}
if let Some(s) = obj.payload::<PyStr>() {
if let Ok(ch) = s.as_str().chars().exactly_one() {
return Ok(spec.format_char(ch));
}
}
Err(vm.new_type_error("%c requires int or char".to_owned()))
}
}
}
fn try_update_quantity_from_element(
vm: &VirtualMachine,
element: Option<&PyObjectRef>,
) -> PyResult<CFormatQuantity> {
match element {
Some(width_obj) => {
if let Some(i) = width_obj.payload::<PyInt>() {
let i = i.try_to_primitive::<i32>(vm)?.unsigned_abs();
Ok(CFormatQuantity::Amount(i as usize))
} else {
Err(vm.new_type_error("* wants int".to_owned()))
}
}
None => Err(vm.new_type_error("not enough arguments for format string".to_owned())),
}
}
fn try_conversion_flag_from_tuple(
vm: &VirtualMachine,
element: Option<&PyObjectRef>,
) -> PyResult<CConversionFlags> {
match element {
Some(width_obj) => {
if let Some(i) = width_obj.payload::<PyInt>() {
let i = i.try_to_primitive::<i32>(vm)?;
let flags = if i < 0 {
CConversionFlags::LEFT_ADJUST
} else {
CConversionFlags::from_bits(0).unwrap()
};
Ok(flags)
} else {
Err(vm.new_type_error("* wants int".to_owned()))
}
}
None => Err(vm.new_type_error("not enough arguments for format string".to_owned())),
}
}
fn try_update_quantity_from_tuple<'a, I: Iterator<Item = &'a PyObjectRef>>(
vm: &VirtualMachine,
elements: &mut I,
q: &mut Option<CFormatQuantity>,
f: &mut CConversionFlags,
) -> PyResult<()> {
let Some(CFormatQuantity::FromValuesTuple) = q else {
return Ok(());
};
let element = elements.next();
f.insert(try_conversion_flag_from_tuple(vm, element)?);
let quantity = try_update_quantity_from_element(vm, element)?;
*q = Some(quantity);
Ok(())
}
fn try_update_precision_from_tuple<'a, I: Iterator<Item = &'a PyObjectRef>>(
vm: &VirtualMachine,
elements: &mut I,
p: &mut Option<CFormatPrecision>,
) -> PyResult<()> {
let Some(CFormatPrecision::Quantity(CFormatQuantity::FromValuesTuple)) = p else {
return Ok(());
};
let quantity = try_update_quantity_from_element(vm, elements.next())?;
*p = Some(CFormatPrecision::Quantity(quantity));
Ok(())
}
fn specifier_error(vm: &VirtualMachine) -> PyBaseExceptionRef {
vm.new_type_error("format requires a mapping".to_owned())
}
pub(crate) fn cformat_bytes(
vm: &VirtualMachine,
format_string: &[u8],
values_obj: PyObjectRef,
) -> PyResult<Vec<u8>> {
let mut format = CFormatBytes::parse_from_bytes(format_string)
.map_err(|err| vm.new_value_error(err.to_string()))?;
let (num_specifiers, mapping_required) = format
.check_specifiers()
.ok_or_else(|| specifier_error(vm))?;
let mut result = vec![];
let is_mapping = values_obj.class().has_attr(identifier!(vm, __getitem__))
&& !values_obj.fast_isinstance(vm.ctx.types.tuple_type)
&& !values_obj.fast_isinstance(vm.ctx.types.bytes_type)
&& !values_obj.fast_isinstance(vm.ctx.types.bytearray_type);
if num_specifiers == 0 {
// literal only
return if is_mapping
|| values_obj
.payload::<tuple::PyTuple>()
.map_or(false, |e| e.is_empty())
{
for (_, part) in format.iter_mut() {
match part {
CFormatPart::Literal(literal) => result.append(literal),
CFormatPart::Spec(_) => unreachable!(),
}
}
Ok(result)
} else {
Err(vm.new_type_error("not all arguments converted during bytes formatting".to_owned()))
};
}
if mapping_required {
// dict
return if is_mapping {
for (_, part) in format.iter_mut() {
match part {
CFormatPart::Literal(literal) => result.append(literal),
CFormatPart::Spec(spec) => {
let value = match &spec.mapping_key {
Some(key) => {
let k = vm.ctx.new_bytes(key.as_str().as_bytes().to_vec());
values_obj.get_item(k.as_object(), vm)?
}
None => unreachable!(),
};
let mut part_result = spec_format_bytes(vm, spec, value)?;
result.append(&mut part_result);
}
}
}
Ok(result)
} else {
Err(vm.new_type_error("format requires a mapping".to_owned()))
};
}
// tuple
let values = if let Some(tup) = values_obj.payload_if_subclass::<tuple::PyTuple>(vm) {
tup.as_slice()
} else {
std::slice::from_ref(&values_obj)
};
let mut value_iter = values.iter();
for (_, part) in format.iter_mut() {
match part {
CFormatPart::Literal(literal) => result.append(literal),
CFormatPart::Spec(spec) => {
try_update_quantity_from_tuple(
vm,
&mut value_iter,
&mut spec.min_field_width,
&mut spec.flags,
)?;
try_update_precision_from_tuple(vm, &mut value_iter, &mut spec.precision)?;
let value = match value_iter.next() {
Some(obj) => Ok(obj.clone()),
None => {
Err(vm.new_type_error("not enough arguments for format string".to_owned()))
}
}?;
let mut part_result = spec_format_bytes(vm, spec, value)?;
result.append(&mut part_result);
}
}
}
// check that all arguments were converted
if value_iter.next().is_some() && !is_mapping {
Err(vm.new_type_error("not all arguments converted during bytes formatting".to_owned()))
} else {
Ok(result)
}
}
pub(crate) fn cformat_string(
vm: &VirtualMachine,
format_string: &str,
values_obj: PyObjectRef,
) -> PyResult<String> {
let mut format = CFormatString::from_str(format_string)
.map_err(|err| vm.new_value_error(err.to_string()))?;
let (num_specifiers, mapping_required) = format
.check_specifiers()
.ok_or_else(|| specifier_error(vm))?;
let mut result = String::new();
let is_mapping = values_obj.class().has_attr(identifier!(vm, __getitem__))
&& !values_obj.fast_isinstance(vm.ctx.types.tuple_type)
&& !values_obj.fast_isinstance(vm.ctx.types.str_type);
if num_specifiers == 0 {
// literal only
return if is_mapping
|| values_obj
.payload::<tuple::PyTuple>()
.map_or(false, |e| e.is_empty())
{
for (_, part) in format.iter() {
match part {
CFormatPart::Literal(literal) => result.push_str(literal),
CFormatPart::Spec(_) => unreachable!(),
}
}
Ok(result)
} else {
Err(vm
.new_type_error("not all arguments converted during string formatting".to_owned()))
};
}
if mapping_required {
// dict
return if is_mapping {
for (idx, part) in format.iter() {
match part {
CFormatPart::Literal(literal) => result.push_str(literal),
CFormatPart::Spec(spec) => {
let value = match &spec.mapping_key {
Some(key) => values_obj.get_item(key.as_str(), vm)?,
None => unreachable!(),
};
let part_result = spec_format_string(vm, spec, value, idx)?;
result.push_str(&part_result);
}
}
}
Ok(result)
} else {
Err(vm.new_type_error("format requires a mapping".to_owned()))
};
}
// tuple
let values = if let Some(tup) = values_obj.payload_if_subclass::<tuple::PyTuple>(vm) {
tup.as_slice()
} else {
std::slice::from_ref(&values_obj)
};
let mut value_iter = values.iter();
for (idx, part) in format.iter_mut() {
match part {
CFormatPart::Literal(literal) => result.push_str(literal),
CFormatPart::Spec(spec) => {
try_update_quantity_from_tuple(
vm,
&mut value_iter,
&mut spec.min_field_width,
&mut spec.flags,
)?;
try_update_precision_from_tuple(vm, &mut value_iter, &mut spec.precision)?;
let value = match value_iter.next() {
Some(obj) => Ok(obj.clone()),
None => {
Err(vm.new_type_error("not enough arguments for format string".to_owned()))
}
}?;
let part_result = spec_format_string(vm, spec, value, idx)?;
result.push_str(&part_result);
}
}
}
// check that all arguments were converted
if value_iter.next().is_some() && !is_mapping {
Err(vm.new_type_error("not all arguments converted during string formatting".to_owned()))
} else {
Ok(result)
}
}
|
pub mod grayscale;
pub mod line;
pub mod binary;
pub mod hough;
pub mod ascii_art;
pub mod shrink; |
use std::iter;
use std::slice;
/// A channel slice iterator.
///
/// See [Channel::iter][crate::Channel::iter].
pub struct Iter<'a, T> {
iter: iter::StepBy<slice::Iter<'a, T>>,
}
impl<'a, T> Iter<'a, T> {
#[inline]
pub(super) fn new(slice: &'a [T], step: usize) -> Self {
Self {
iter: slice.iter().step_by(step),
}
}
}
// Note: we include a bunch of forwarding implementations since they
impl<'a, T> Iterator for Iter<'a, T>
where
T: Copy,
{
type Item = T;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().copied()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth(n).copied()
}
#[inline]
fn last(self) -> Option<Self::Item> {
self.iter.last().copied()
}
#[inline]
fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
self.iter.find(|item| predicate(*item)).copied()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
#[inline]
fn count(self) -> usize {
self.iter.count()
}
#[inline]
fn for_each<F>(self, mut f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
self.iter.for_each(move |item| f(*item));
}
#[inline]
fn all<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
self.iter.all(move |item| f(*item))
}
#[inline]
fn any<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
self.iter.any(move |item| f(*item))
}
#[inline]
fn find_map<B, F>(&mut self, mut f: F) -> Option<B>
where
Self: Sized,
F: FnMut(Self::Item) -> Option<B>,
{
self.iter.find_map(move |item| f(*item))
}
#[inline]
fn position<P>(&mut self, mut predicate: P) -> Option<usize>
where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
self.iter.position(move |item| predicate(*item))
}
}
impl<'a, T> DoubleEndedIterator for Iter<'a, T>
where
T: Copy,
{
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
self.iter.next_back().copied()
}
#[inline]
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth_back(n).copied()
}
}
impl<'a, T> ExactSizeIterator for Iter<'a, T>
where
T: Copy,
{
#[inline]
fn len(&self) -> usize {
self.iter.len()
}
}
/// A channel slice iterator.
///
/// See [ChannelMut::iter_mut][crate::ChannelMut::iter_mut].
pub struct IterMut<'a, T> {
iter: iter::StepBy<slice::IterMut<'a, T>>,
}
impl<'a, T> IterMut<'a, T> {
#[inline]
pub(super) fn new(slice: &'a mut [T], step: usize) -> Self {
Self {
iter: slice.iter_mut().step_by(step),
}
}
}
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next()
}
#[inline]
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth(n)
}
#[inline]
fn last(self) -> Option<Self::Item> {
self.iter.last()
}
#[inline]
fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
self.iter.find(predicate)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
#[inline]
fn count(self) -> usize {
self.iter.count()
}
#[inline]
fn for_each<F>(self, f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
self.iter.for_each(f);
}
#[inline]
fn all<F>(&mut self, f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
self.iter.all(f)
}
#[inline]
fn any<F>(&mut self, f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
self.iter.any(f)
}
#[inline]
fn find_map<B, F>(&mut self, f: F) -> Option<B>
where
Self: Sized,
F: FnMut(Self::Item) -> Option<B>,
{
self.iter.find_map(f)
}
#[inline]
fn position<P>(&mut self, predicate: P) -> Option<usize>
where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
self.iter.position(predicate)
}
}
impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
fn next_back(&mut self) -> Option<Self::Item> {
self.iter.next_back()
}
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
self.iter.nth_back(n)
}
}
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {
#[inline]
fn len(&self) -> usize {
self.iter.len()
}
}
|
extern crate ansi_term;
extern crate getopts;
use ansi_term::Color::Red;
use std::env;
mod arguments;
mod io;
mod parser;
fn main() {
// Get and parse arguments.
let args: Vec<String> = env::args().collect();
let arguments = match arguments::Arguments::parse(&args) {
Ok(a) => { a }
Err(f) => {
// Any empty error here is considered an early bail.
if f.is_empty() {
return;
}
// Otherwise, print the error.
eprintln!("{}", Red.paint(f));
return;
}
};
}
|
use std::cell::RefCell;
trait Operation {
fn touch(&self) -> ();
}
#[derive(Debug)]
struct BigBlob {
payload: String,
}
impl BigBlob {
fn new() -> Self {
println!("BigBlob created");
BigBlob { payload: "Me be the BigBlob!".to_string() }
}
}
impl Operation for BigBlob {
fn touch(&self) -> () {
println!("BigBlob {:?} touched", self);
}
}
struct BigBlobProxy {
target: RefCell<Option<Box<Operation>>>,
}
impl BigBlobProxy {
fn new() -> Self {
println!("BigBlobProxy created");
BigBlobProxy { target: RefCell::new(None) }
}
}
impl Operation for BigBlobProxy {
fn touch(&self) -> () {
{
let base = self.target.borrow();
if base.is_some() {
println!("Performing proxied touch");
base.as_ref().unwrap().touch();
return;
}
}
let bb = BigBlob::new();
bb.touch();
*self.target.borrow_mut() = Some(Box::new(bb));
}
}
//Here's a step-by-step guide to getting from your Java example to some decent Clojure:
//
//recognize that your singleton is just global, mutable state.
//The singleton may work fine but it's not necessary.
//So we've moved from Java: singleton -> Java: global, mutable state
//
//refactor your Java example to use local mutable state instead of global mutable state,
//by passing the map to the methods that modify it.
//So let's move from Java: global, mutable state -> Java: local, mutable state
//
//*now, instead of destructively updating the map every time, find/write a Java library
//(such as the one the Clojure implementation uses) that does
//not mutate when adding/removing key/value pairs to/from maps.
//Remember to return values that have been "updated", otherwise,
//the changes won't be visible to other code.
//So we've just moved from Java: local, mutable state -> Java: local, immutable state
//
//at this point, you have an FP solution, but it's coded in Java.
//Your initial Clojure solution could end up as a nearly 1:1 translation,
//but as you learn more Clojure you'll figure out how to take advantage of its strengths and improve
//and shorten the code. Plus, you might learn some cool patterns for dealing with "mutable"
//state in a purely functional way. So it's up to you to make the leap from Java: local,
//immutable state -> Clojure: local, immutable state
//From point 3 above: one of the major points of FP is "same thing in, same thing out".
//Mutable state totally destroys this concept, and with it, the advantages of pure code.
//Clojure doesn't force you to be pure, but it certainly makes it easy to do so.
//So if you want to learn how to write good Clojure code, you'll have to learn how to avoid (most)
//mutable state at some point.
//here interior mutability makes the day
pub fn run() {
println!("-------------------- {} --------------------", file!());
//virtual proxy
let r = BigBlobProxy::new();
r.touch();
r.touch();
r.touch();
} |
mod vecbuf;
use std::io::{ self, Read, Write };
use rustls::Session;
use rustls::WriteV;
use tokio_io::{ AsyncRead, AsyncWrite };
pub struct Stream<'a, IO: 'a, S: 'a> {
pub io: &'a mut IO,
pub session: &'a mut S,
pub eof: bool,
}
pub trait WriteTls<'a, IO: AsyncRead + AsyncWrite, S: Session>: Read + Write {
fn write_tls(&mut self) -> io::Result<usize>;
}
#[derive(Clone, Copy)]
enum Focus {
Empty,
Readable,
Writable
}
impl<'a, IO: AsyncRead + AsyncWrite, S: Session> Stream<'a, IO, S> {
pub fn new(io: &'a mut IO, session: &'a mut S) -> Self {
Stream {
io,
session,
// The state so far is only used to detect EOF, so either Stream
// or EarlyData state should both be all right.
eof: false,
}
}
pub fn set_eof(mut self, eof: bool) -> Self {
self.eof = eof;
self
}
pub fn complete_io(&mut self) -> io::Result<(usize, usize)> {
self.complete_inner_io(Focus::Empty)
}
fn complete_read_io(&mut self) -> io::Result<usize> {
let n = self.session.read_tls(self.io)?;
self.session.process_new_packets()
.map_err(|err| {
// In case we have an alert to send describing this error,
// try a last-gasp write -- but don't predate the primary
// error.
let _ = self.write_tls();
io::Error::new(io::ErrorKind::InvalidData, err)
})?;
Ok(n)
}
fn complete_write_io(&mut self) -> io::Result<usize> {
self.write_tls()
}
fn complete_inner_io(&mut self, focus: Focus) -> io::Result<(usize, usize)> {
let mut wrlen = 0;
let mut rdlen = 0;
loop {
let mut write_would_block = false;
let mut read_would_block = false;
while self.session.wants_write() {
match self.complete_write_io() {
Ok(n) => wrlen += n,
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
write_would_block = true;
break
},
Err(err) => return Err(err)
}
}
if let Focus::Writable = focus {
if !write_would_block {
return Ok((rdlen, wrlen));
} else {
return Err(io::ErrorKind::WouldBlock.into());
}
}
if !self.eof && self.session.wants_read() {
match self.complete_read_io() {
Ok(0) => self.eof = true,
Ok(n) => rdlen += n,
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
read_would_block = true
}
Err(err) => return Err(err),
}
}
let would_block = match focus {
Focus::Empty => write_would_block || read_would_block,
Focus::Readable => read_would_block,
Focus::Writable => write_would_block,
};
match (
self.eof,
self.session.is_handshaking(),
would_block,
) {
(true, true, _) => return Err(io::ErrorKind::UnexpectedEof.into()),
(_, false, true) => {
let would_block = match focus {
Focus::Empty => rdlen == 0 && wrlen == 0,
Focus::Readable => rdlen == 0,
Focus::Writable => wrlen == 0
};
return if would_block {
Err(io::ErrorKind::WouldBlock.into())
} else {
Ok((rdlen, wrlen))
};
},
(_, false, _) => return Ok((rdlen, wrlen)),
(_, true, true) => return Err(io::ErrorKind::WouldBlock.into()),
(..) => ()
}
}
}
}
impl<'a, IO: AsyncRead + AsyncWrite, S: Session> WriteTls<'a, IO, S> for Stream<'a, IO, S> {
fn write_tls(&mut self) -> io::Result<usize> {
use futures::Async;
use self::vecbuf::VecBuf;
struct V<'a, IO: 'a>(&'a mut IO);
impl<'a, IO: AsyncWrite> WriteV for V<'a, IO> {
fn writev(&mut self, vbytes: &[&[u8]]) -> io::Result<usize> {
let mut vbytes = VecBuf::new(vbytes);
match self.0.write_buf(&mut vbytes) {
Ok(Async::Ready(n)) => Ok(n),
Ok(Async::NotReady) => Err(io::ErrorKind::WouldBlock.into()),
Err(err) => Err(err)
}
}
}
let mut vecio = V(self.io);
self.session.writev_tls(&mut vecio)
}
}
impl<'a, IO: AsyncRead + AsyncWrite, S: Session> Read for Stream<'a, IO, S> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
while self.session.wants_read() {
if let (0, _) = self.complete_inner_io(Focus::Readable)? {
break
}
}
self.session.read(buf)
}
}
impl<'a, IO: AsyncRead + AsyncWrite, S: Session> Write for Stream<'a, IO, S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let len = self.session.write(buf)?;
while self.session.wants_write() {
match self.complete_inner_io(Focus::Writable) {
Ok(_) => (),
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock && len != 0 => break,
Err(err) => return Err(err)
}
}
if len != 0 || buf.is_empty() {
Ok(len)
} else {
// not write zero
self.session.write(buf)
.and_then(|len| if len != 0 {
Ok(len)
} else {
Err(io::ErrorKind::WouldBlock.into())
})
}
}
fn flush(&mut self) -> io::Result<()> {
self.session.flush()?;
while self.session.wants_write() {
self.complete_inner_io(Focus::Writable)?;
}
Ok(())
}
}
#[cfg(test)]
mod test_stream;
|
pub mod bencode;
pub use bencode::BValue;
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
#[test]
fn test_parse_number() {
assert_eq!(BValue::from_bytes(&b"i3228e"[..]), Ok((&b""[..], BValue::BNumber(3228))));
assert_eq!(BValue::from_bytes(&b"i-3228e"[..]), Ok((&b""[..], BValue::BNumber(-3228))));
}
#[test]
fn test_parse_bytes() {
assert_eq!(
BValue::from_bytes(&b"12:Hello World!"[..]),
Ok((&b""[..], BValue::BBytes("Hello World!".as_bytes().to_vec())))
);
}
#[test]
fn test_parse_list() {
let expected = BValue::BList(
vec![
BValue::BBytes("spam".as_bytes().to_vec()),
BValue::BBytes("eggs".as_bytes().to_vec())
]
);
assert_eq!(BValue::from_bytes(&b"l4:spam4:eggse"[..]), Ok((&b""[..], expected)));
}
#[test]
fn test_parse_list_diff_types() {
let mut hash_map: HashMap<String, BValue> = HashMap::new();
hash_map.entry("cow".to_string()).or_insert(BValue::BBytes("moo".as_bytes().to_vec()));
let expected = BValue::BList(
vec![
BValue::BNumber(-3228),
BValue::BBytes("spam".as_bytes().to_vec()),
BValue::BList(vec![BValue::BNumber(42)]),
BValue::BDict(hash_map)
]
);
assert_eq!(
BValue::from_bytes(&b"li-3228e4:spamli42eed3:cow3:mooee"[..]),
Ok((&b""[..], expected))
);
}
#[test]
fn test_parse_dict() {
let mut expected: HashMap<String, BValue> = HashMap::new();
expected.entry("cow".to_string()).or_insert(BValue::BBytes("moo".as_bytes().to_vec()));
expected.entry("spam".to_string()).or_insert(BValue::BBytes("eggs".as_bytes().to_vec()));
assert_eq!(BValue::from_bytes(&b"d3:cow3:moo4:spam4:eggse"[..]), Ok((&b""[..], BValue::BDict(expected))));
}
}
|
use std::cmp::Ordering;
use serde::{Serialize, Deserialize};
use smallvec::SmallVec;
#[derive(Default, Serialize, Deserialize, Debug, Clone)]
pub struct SceneNode {
pub name: String,
#[serde(default)]
pub prefab: String,
pub components: Vec<Component>,
#[serde(skip)]
pub children: SmallVec<[usize; 8]>,
}
impl SceneNode {
pub fn get_parent(&self) -> Option<&String> {
for component in &self.components {
if let Component::Parent(name) = component {
return Some(name);
}
}
None
}
pub fn get_parent_mut(&mut self) -> Option<&mut Component> {
for component in &mut self.components {
if let Component::Parent(_) = component {
return Some(component);
}
}
None
}
pub fn set_parent(&mut self, parent_name: String) {
if let Some(Component::Parent(name)) = self.get_parent_mut() {
*name = parent_name;
}
else {
self.components.push(Component::Parent(parent_name));
}
}
pub fn get_transform_mut(&mut self) -> Option<&mut Component> {
for component in &mut self.components {
if let Component::Transform{..} = component {
return Some(component);
}
}
None
}
pub fn set_transform(&mut self, new_translation: &(f32, f32), new_rotation: f32) {
if let Some(Component::Transform{ translation, rotation }) = self.get_transform_mut() {
*translation = *new_translation;
*rotation = new_rotation;
}
else {
self.components.push(Component::Transform { translation: *new_translation, rotation: new_rotation });
}
}
pub fn get_mesh_mut(&mut self) -> Option<&mut Component> {
for component in &mut self.components {
if let Component::Mesh(_) = component {
return Some(component);
}
}
None
}
pub fn set_mesh(&mut self, mesh_name: String) {
if let Some(Component::Mesh(name)) = self.get_mesh_mut() {
*name = mesh_name
}
else {
self.components.push(Component::Mesh(mesh_name));
}
}
pub fn get_rigid_body_mut(&mut self) -> Option<&mut Component> {
for component in &mut self.components {
if let Component::RigidBody(_) = component {
return Some(component);
}
}
None
}
pub fn set_rigid_body(&mut self, rigid_body_name: String) {
if let Some(Component::RigidBody(name)) = self.get_rigid_body_mut() {
*name = rigid_body_name
}
else {
self.components.push(Component::RigidBody(rigid_body_name));
}
}
pub fn get_collider_mut(&mut self) -> Option<&mut Component> {
for component in &mut self.components {
if let Component::Collider(_) = component {
return Some(component);
}
}
None
}
pub fn set_collider(&mut self, collider_name: String) {
if let Some(Component::Collider(name)) = self.get_collider_mut() {
*name = collider_name
}
else {
self.components.push(Component::Collider(collider_name));
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Component {
Transform { translation: (f32, f32), rotation: f32 },
Parent(String),
Mesh(String),
RigidBody(String),
Collider(String),
}
pub fn compare_components(component_1: &Component, component_2: &Component) -> Ordering {
let get_priority = |component: &Component| {
match component {
Component::Transform{..} => 0,
Component::Parent(_) => 1,
Component::Mesh(_) => 2,
Component::RigidBody(_) => 3,
Component::Collider(_) => 4,
}
};
let priority_1 = get_priority(component_1);
let priority_2 = get_priority(component_2);
if priority_1 < priority_2 {
Ordering::Less
}
else if priority_1 > priority_2 {
Ordering::Greater
}
else
{
Ordering::Equal
}
}
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[link(name = "windows")]
extern "system" {}
pub type Print3DManager = *mut ::core::ffi::c_void;
pub type Print3DTask = *mut ::core::ffi::c_void;
pub type Print3DTaskCompletedEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct Print3DTaskCompletion(pub i32);
impl Print3DTaskCompletion {
pub const Abandoned: Self = Self(0i32);
pub const Canceled: Self = Self(1i32);
pub const Failed: Self = Self(2i32);
pub const Slicing: Self = Self(3i32);
pub const Submitted: Self = Self(4i32);
}
impl ::core::marker::Copy for Print3DTaskCompletion {}
impl ::core::clone::Clone for Print3DTaskCompletion {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct Print3DTaskDetail(pub i32);
impl Print3DTaskDetail {
pub const Unknown: Self = Self(0i32);
pub const ModelExceedsPrintBed: Self = Self(1i32);
pub const UploadFailed: Self = Self(2i32);
pub const InvalidMaterialSelection: Self = Self(3i32);
pub const InvalidModel: Self = Self(4i32);
pub const ModelNotManifold: Self = Self(5i32);
pub const InvalidPrintTicket: Self = Self(6i32);
}
impl ::core::marker::Copy for Print3DTaskDetail {}
impl ::core::clone::Clone for Print3DTaskDetail {
fn clone(&self) -> Self {
*self
}
}
pub type Print3DTaskRequest = *mut ::core::ffi::c_void;
pub type Print3DTaskRequestedEventArgs = *mut ::core::ffi::c_void;
pub type Print3DTaskSourceChangedEventArgs = *mut ::core::ffi::c_void;
pub type Print3DTaskSourceRequestedArgs = *mut ::core::ffi::c_void;
pub type Print3DTaskSourceRequestedHandler = *mut ::core::ffi::c_void;
pub type Printing3D3MFPackage = *mut ::core::ffi::c_void;
pub type Printing3DBaseMaterial = *mut ::core::ffi::c_void;
pub type Printing3DBaseMaterialGroup = *mut ::core::ffi::c_void;
#[repr(C)]
pub struct Printing3DBufferDescription {
pub Format: Printing3DBufferFormat,
pub Stride: u32,
}
impl ::core::marker::Copy for Printing3DBufferDescription {}
impl ::core::clone::Clone for Printing3DBufferDescription {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct Printing3DBufferFormat(pub i32);
impl Printing3DBufferFormat {
pub const Unknown: Self = Self(0i32);
pub const R32G32B32A32Float: Self = Self(2i32);
pub const R32G32B32A32UInt: Self = Self(3i32);
pub const R32G32B32Float: Self = Self(6i32);
pub const R32G32B32UInt: Self = Self(7i32);
pub const Printing3DDouble: Self = Self(500i32);
pub const Printing3DUInt: Self = Self(501i32);
}
impl ::core::marker::Copy for Printing3DBufferFormat {}
impl ::core::clone::Clone for Printing3DBufferFormat {
fn clone(&self) -> Self {
*self
}
}
pub type Printing3DColorMaterial = *mut ::core::ffi::c_void;
pub type Printing3DColorMaterialGroup = *mut ::core::ffi::c_void;
pub type Printing3DComponent = *mut ::core::ffi::c_void;
pub type Printing3DComponentWithMatrix = *mut ::core::ffi::c_void;
pub type Printing3DCompositeMaterial = *mut ::core::ffi::c_void;
pub type Printing3DCompositeMaterialGroup = *mut ::core::ffi::c_void;
pub type Printing3DFaceReductionOptions = *mut ::core::ffi::c_void;
pub type Printing3DMaterial = *mut ::core::ffi::c_void;
pub type Printing3DMesh = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct Printing3DMeshVerificationMode(pub i32);
impl Printing3DMeshVerificationMode {
pub const FindFirstError: Self = Self(0i32);
pub const FindAllErrors: Self = Self(1i32);
}
impl ::core::marker::Copy for Printing3DMeshVerificationMode {}
impl ::core::clone::Clone for Printing3DMeshVerificationMode {
fn clone(&self) -> Self {
*self
}
}
pub type Printing3DMeshVerificationResult = *mut ::core::ffi::c_void;
pub type Printing3DModel = *mut ::core::ffi::c_void;
pub type Printing3DModelTexture = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct Printing3DModelUnit(pub i32);
impl Printing3DModelUnit {
pub const Meter: Self = Self(0i32);
pub const Micron: Self = Self(1i32);
pub const Millimeter: Self = Self(2i32);
pub const Centimeter: Self = Self(3i32);
pub const Inch: Self = Self(4i32);
pub const Foot: Self = Self(5i32);
}
impl ::core::marker::Copy for Printing3DModelUnit {}
impl ::core::clone::Clone for Printing3DModelUnit {
fn clone(&self) -> Self {
*self
}
}
pub type Printing3DMultiplePropertyMaterial = *mut ::core::ffi::c_void;
pub type Printing3DMultiplePropertyMaterialGroup = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct Printing3DObjectType(pub i32);
impl Printing3DObjectType {
pub const Model: Self = Self(0i32);
pub const Support: Self = Self(1i32);
pub const Others: Self = Self(2i32);
}
impl ::core::marker::Copy for Printing3DObjectType {}
impl ::core::clone::Clone for Printing3DObjectType {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct Printing3DPackageCompression(pub i32);
impl Printing3DPackageCompression {
pub const Low: Self = Self(0i32);
pub const Medium: Self = Self(1i32);
pub const High: Self = Self(2i32);
}
impl ::core::marker::Copy for Printing3DPackageCompression {}
impl ::core::clone::Clone for Printing3DPackageCompression {
fn clone(&self) -> Self {
*self
}
}
pub type Printing3DTexture2CoordMaterial = *mut ::core::ffi::c_void;
pub type Printing3DTexture2CoordMaterialGroup = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct Printing3DTextureEdgeBehavior(pub i32);
impl Printing3DTextureEdgeBehavior {
pub const None: Self = Self(0i32);
pub const Wrap: Self = Self(1i32);
pub const Mirror: Self = Self(2i32);
pub const Clamp: Self = Self(3i32);
}
impl ::core::marker::Copy for Printing3DTextureEdgeBehavior {}
impl ::core::clone::Clone for Printing3DTextureEdgeBehavior {
fn clone(&self) -> Self {
*self
}
}
pub type Printing3DTextureResource = *mut ::core::ffi::c_void;
|
// The MIT License (MIT)
//
// Copyright (c) 2015 Aaron Loucks <aloucks+github@cofront.net>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::slice::Iter;
use std::hash::Hash;
use std::hash::Hasher;
use std::fmt::{self, Debug};
use std::ops::Index;
#[derive(Copy, Clone, PartialEq, Eq)]
pub struct Entity(u64);
const INVALID_ID: u64 = std::u64::MAX;
const INVALID_INDEX: usize = std::u32::MAX as usize;
impl Default for Entity {
#[inline(always)]
fn default() -> Entity {
Entity(INVALID_ID)
}
}
impl Debug for Entity {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "Entity({{id: {}, key: {}, gen: {}}})", self.0, self.key(), self.gen())
}
}
impl Hash for Entity {
#[inline(always)]
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u64(self.0)
}
}
impl Entity {
#[inline(always)]
fn from_key_and_gen(key: u32, gen: u32) -> Entity {
Entity(((key as u64) << 32) | (gen as u64))
}
#[inline(always)]
pub fn id(&self) -> u64 {
self.0
}
#[inline(always)]
pub fn key(&self) -> u32 {
(self.0 >> 32) as u32
}
#[inline(always)]
pub fn gen(&self) -> u32 {
(self.0 & 0xFFFFFFFF) as u32
}
}
#[derive(Debug, Clone)]
pub struct EntityPool {
entities: Vec<Entity>,
entities_free: Vec<Entity>,
entity_index: Vec<usize>, // entity_index[entity.key] => index; entities[index as usize]
next_entity_key: u32,
}
impl Default for EntityPool {
#[inline(always)]
fn default() -> EntityPool {
EntityPool::new()
}
}
impl EntityPool {
/// Creates a new, empty `EntityPool`.
///
/// The `EntityPool` will not allocate until entities are created.
///
pub fn new() -> EntityPool {
EntityPool::with_capacity(0, 0)
}
/// Creates a new, empty `EntityPool` with the specified capacities.
///
/// The `EntityPool` will able to create `create_capacity` and return `return_capacity`
/// entities without reallocating. If either capacity is 0, their respective storage
/// vectors will not allocate.
pub fn with_capacity(create_capacity: usize, return_capacity: usize) -> EntityPool {
EntityPool {
entities: Vec::with_capacity(create_capacity),
entities_free: Vec::with_capacity(return_capacity),
entity_index: Vec::with_capacity(create_capacity),
next_entity_key: 0
}
}
/// Creates a unique entity.
///
/// Returns the `Entity` and it's current `index`. The index is only guaranteed to remain
/// valid until the next call to `return_entity`.
pub fn create_entity(&mut self) -> (usize, Entity) {
let (key, gen) = match self.entities_free.pop() {
Some(entity) => {
(entity.key(), entity.gen().wrapping_add(1))
},
None => {
let key = self.next_entity_key;
self.next_entity_key = key + 1;
(key, 0)
}
};
let entity = Entity::from_key_and_gen(key, gen);
let index = self.entities.len();
self.entities.push(entity);
if key as usize != self.entity_index.len() {
self.entity_index[key as usize] = index;
}
else {
debug_assert_eq!(key as usize, self.entity_index.len());
self.entity_index.push(index);
}
(index, entity)
}
/// Release ownership of the `entity`, allowing for it to be recycled. A recycled entity will
/// have it's internal generation incremented, yielding a new, unique entity.
///
/// Entities are stored in contiguous memory. When an entity is returned, the last entity is
/// swaped into the returned entity's slot; thus indexes retrieved prior to returning an
/// entity are potentially invalidated.
///
/// # Panics
///
/// Returning an entity more than once, or an entity created from another pool, results in
/// undefined behavior.
///
/// # Examples
///
/// ```
/// use entitypool::EntityPool;
///
/// let mut pool = EntityPool::new();
/// let (_, e1) = pool.create_entity();
/// pool.return_entity(e1);
/// let (_, e2) = pool.create_entity();
/// assert!(e1 != e2);
/// ```
///
/// ```
/// use entitypool::EntityPool;
///
/// let mut pool = EntityPool::new();
/// let (i1, e1) = pool.create_entity();
/// let (i2, e2) = pool.create_entity();
/// let (i3, e3) = pool.create_entity();
/// assert_eq!(0, i1);
/// assert_eq!(1, i2);
/// assert_eq!(2, i3);
/// assert_eq!(0, pool.index_of(e1));
/// assert_eq!(1, pool.index_of(e2));
/// assert_eq!(2, pool.index_of(e3));
/// pool.return_entity(e2);
/// assert_eq!(0, pool.index_of(e1));
/// assert_eq!(1, pool.index_of(e3));
/// ```
pub fn return_entity(&mut self, entity: Entity) {
debug_assert!(entity != Entity::default());
let key = entity.key();
let index = self.entity_index[key as usize];
debug_assert!(index != INVALID_INDEX, format!("Invalid or previously freed entity: {:?}",
entity));
debug_assert_eq!(entity.gen(), self.entities[index].gen());
self.entities_free.push(entity);
self.entities.swap_remove(index);
self.entity_index[key as usize] = INVALID_INDEX;
match self.entities.get(index) {
Some(e) => self.entity_index[e.key() as usize] = index,
None => {}
};
}
/// Returns the current `index` of the given `entity`, which is only guaranteed to remain
/// valid until the next call to `return_entity`.
///
/// # Panics
///
/// Querying the status of an entity from another pool results in undefined behavior.
#[inline(always)]
pub fn index_of(&self, entity: Entity) -> usize {
debug_assert!(entity != Entity::default());
let key = entity.key();
let index = self.entity_index[key as usize] as usize;
debug_assert!(index != INVALID_INDEX, format!("Invalid or previously freed entity: {:?}",
entity));
debug_assert_eq!(entity.gen(), self.entities[index].gen());
index
}
/// Returns the current `entity` at the given `index`.
///
/// # Panics
///
/// Panics if the index is greater or equal to the number of live entities in this pool.
#[inline(always)]
pub fn entity_at(&self, index: usize) -> Entity {
self.entities[index]
}
/// Returns `true` if this entity has not been returned.
///
/// # Panics
///
/// Querying the status of an entity from another pool results in undefined behavior.
pub fn is_alive(&self, entity: Entity) -> bool {
debug_assert!(entity != Entity::default());
let key = entity.key();
let index = self.entity_index[key as usize];
if index != INVALID_INDEX {
let other = self.entities[index];
key == other.key() && entity.gen() == other.gen()
}
else {
false
}
}
/// Returns an iterator to the live entities. The `Enumerate` of the returned iterator will
/// yield each `entity` and its current `index`.
///
/// # Examples
///
/// ```
/// use entitypool::EntityPool;
///
/// let mut pool = EntityPool::new();
/// pool.create_entity();
/// pool.create_entity();
/// pool.create_entity();
/// for (index, entity) in pool.iter().enumerate() {
/// assert_eq!(index, pool.index_of(*entity));
/// assert_eq!(*entity, pool.entity_at(index));
/// }
/// ```
#[inline(always)]
pub fn iter(&self) -> Iter<Entity> {
self.entities.iter()
}
/// Resets the `EnitityPool` to its initial state, without releasing allocated capacity.
///
/// All entities created prior to resetting are no longer considered members of this pool.
pub fn reset(&mut self) {
self.entities.clear();
self.entities_free.clear();
self.entity_index.clear();
self.next_entity_key = 0;
}
/// Reserves capacity for at least `additional` more entities to be created without
/// reallocation. The pool may reserve more space to avoid frequesnt reallocations.
pub fn reserve(&mut self, additional: usize) {
self.entities.reserve(additional);
self.entity_index.reserve(additional);
}
/// Reserves capacity for at least `additional` more entities to be returned without
/// reallocation. The pool may reserve more space to avoid frequesnt reallocations.
pub fn reserve_returned(&mut self, additional: usize) {
self.entities_free.reserve(additional);
}
/// Shrinks the capacity of this pool as much as possible.
pub fn shrink_to_fit(&mut self) {
self.entities.shrink_to_fit();
self.entities_free.shrink_to_fit();
self.entity_index.shrink_to_fit();
}
/// Returns the number of live entities in this pool.
#[inline(always)]
pub fn len(&self) -> usize {
debug_assert_eq!(self.entities.len(), self.entity_index.len());
self.entities.len()
}
/// Returns the number of returned entities in this pool that are ready to be recycled.
#[inline(always)]
pub fn len_returned(&self) -> usize {
self.entities_free.len()
}
/// Returns the number of entities that this pool can create without reallocation.
#[inline(always)]
pub fn capacity(&self) -> usize {
debug_assert_eq!(self.entities.capacity(), self.entity_index.capacity());
self.entities.capacity()
}
/// Returns the number of entities that can be returned without reallocation.
#[inline(always)]
pub fn capacity_returned(&self) -> usize {
self.entities_free.capacity()
}
}
impl Index<usize> for EntityPool {
type Output = Entity;
/// Returns the `entity` at the given `index`.
#[inline(always)]
fn index(&self, index: usize) -> &Entity {
&self.entities[index]
}
}
impl Index<Entity> for EntityPool {
type Output = usize;
/// Returns the index of the given `entity`.
#[inline(always)]
fn index(&self, entity: Entity) -> &usize {
let index = &self.entity_index[entity.key() as usize];
debug_assert!(*index != INVALID_INDEX, format!("Invalid or previously freed entity: {:?}",
entity));
index
}
}
impl<'a> Index<&'a usize> for EntityPool {
type Output = Entity;
/// Returns the `entity` at the given `index`.
#[inline(always)]
fn index(&self, index: &usize) -> &Entity {
&self.entities[*index]
}
}
impl<'a> Index<&'a Entity> for EntityPool {
type Output = usize;
/// Returns the index of the given `entity`.
#[inline(always)]
fn index(&self, entity: &Entity) -> &usize {
let index = &self.entity_index[entity.key() as usize];
debug_assert!(*index != INVALID_INDEX, format!("Invalid or previously freed entity: {:?}",
entity));
index
}
}
#[test]
fn it_works() {
let mut pool = EntityPool::new();
let mut entities = Vec::<Entity>::new();
for i in 0..5 {
let (index, e) = pool.create_entity();
assert_eq!(i, index);
assert_eq!(e, pool.entity_at(index));
assert_eq!(e, pool[i]);
assert_eq!(index, pool.index_of(e));
assert_eq!(index, pool[e]);
assert!(!entities.contains(&e));
assert!(pool.is_alive(e));
entities.push(e);
}
let mut alive = 5;
for e in entities.iter() {
pool.return_entity(*e);
assert!(!pool.is_alive(*e));
alive -= 1;
let mut expected_alive = 0;
for (i_alive, e_alive) in pool.iter().enumerate() {
assert!(pool.is_alive(*e_alive));
assert_eq!(i_alive, pool.index_of(*e_alive));
assert_eq!(i_alive, pool[*e_alive]);
assert_eq!(i_alive, pool[e_alive]);
assert_eq!(*e_alive, pool.entity_at(i_alive));
assert_eq!(*e_alive, pool[i_alive]);
assert_eq!(*e_alive, pool[&i_alive]);
expected_alive += 1;
}
assert_eq!(expected_alive, alive);
}
for i in 0..5 {
let (index, e) = pool.create_entity();
assert_eq!(i, index);
assert_eq!(e, pool.entity_at(index));
assert_eq!(e, pool[i]);
assert_eq!(index, pool.index_of(e));
assert_eq!(index, pool[e]);
assert!(!entities.contains(&e));
assert!(pool.is_alive(e));
entities.push(e);
}
let mut count = 0;
for (index, e) in pool.iter().enumerate() {
assert_eq!(index, pool.index_of(*e));
assert_eq!(index, pool[e]);
assert_eq!(*e, pool.entity_at(index));
assert_eq!(*e, pool[index]);
assert!(pool.is_alive(*e));
count += 1;
}
assert_eq!(5, count);
}
|
#![allow(unused_variables, non_upper_case_globals, non_snake_case, unused_unsafe, non_camel_case_types, dead_code, clippy::all)]
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct GRAPHICS_EFFECT_PROPERTY_MAPPING(pub i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_UNKNOWN: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(0i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_DIRECT: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(1i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_VECTORX: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(2i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_VECTORY: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(3i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_VECTORZ: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(4i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_VECTORW: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(5i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_RECT_TO_VECTOR4: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(6i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_RADIANS_TO_DEGREES: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(7i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_COLORMATRIX_ALPHA_MODE: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(8i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_COLOR_TO_VECTOR3: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(9i32);
pub const GRAPHICS_EFFECT_PROPERTY_MAPPING_COLOR_TO_VECTOR4: GRAPHICS_EFFECT_PROPERTY_MAPPING = GRAPHICS_EFFECT_PROPERTY_MAPPING(10i32);
impl ::core::convert::From<i32> for GRAPHICS_EFFECT_PROPERTY_MAPPING {
fn from(value: i32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for GRAPHICS_EFFECT_PROPERTY_MAPPING {
type Abi = Self;
}
#[repr(transparent)]
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: clone :: Clone, :: core :: fmt :: Debug)]
pub struct IGeometrySource2DInterop(pub ::windows::core::IUnknown);
impl IGeometrySource2DInterop {
#[cfg(feature = "Win32_Graphics_Direct2D")]
pub unsafe fn GetGeometry(&self) -> ::windows::core::Result<super::super::super::super::Graphics::Direct2D::ID2D1Geometry> {
let mut result__: <super::super::super::super::Graphics::Direct2D::ID2D1Geometry as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
(::windows::core::Interface::vtable(self).3)(::core::mem::transmute_copy(self), &mut result__).from_abi::<super::super::super::super::Graphics::Direct2D::ID2D1Geometry>(result__)
}
#[cfg(feature = "Win32_Graphics_Direct2D")]
pub unsafe fn TryGetGeometryUsingFactory<'a, Param0: ::windows::core::IntoParam<'a, super::super::super::super::Graphics::Direct2D::ID2D1Factory>>(&self, factory: Param0) -> ::windows::core::Result<super::super::super::super::Graphics::Direct2D::ID2D1Geometry> {
let mut result__: <super::super::super::super::Graphics::Direct2D::ID2D1Geometry as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
(::windows::core::Interface::vtable(self).4)(::core::mem::transmute_copy(self), factory.into_param().abi(), &mut result__).from_abi::<super::super::super::super::Graphics::Direct2D::ID2D1Geometry>(result__)
}
}
unsafe impl ::windows::core::Interface for IGeometrySource2DInterop {
type Vtable = IGeometrySource2DInterop_abi;
const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x0657af73_53fd_47cf_84ff_c8492d2a80a3);
}
impl ::core::convert::From<IGeometrySource2DInterop> for ::windows::core::IUnknown {
fn from(value: IGeometrySource2DInterop) -> Self {
value.0
}
}
impl ::core::convert::From<&IGeometrySource2DInterop> for ::windows::core::IUnknown {
fn from(value: &IGeometrySource2DInterop) -> Self {
value.0.clone()
}
}
impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for IGeometrySource2DInterop {
fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> {
::windows::core::Param::Owned(self.0)
}
}
impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for &'a IGeometrySource2DInterop {
fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> {
::windows::core::Param::Borrowed(&self.0)
}
}
#[repr(C)]
#[doc(hidden)]
pub struct IGeometrySource2DInterop_abi(
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
#[cfg(feature = "Win32_Graphics_Direct2D")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Win32_Graphics_Direct2D"))] usize,
#[cfg(feature = "Win32_Graphics_Direct2D")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, factory: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Win32_Graphics_Direct2D"))] usize,
);
#[repr(transparent)]
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: clone :: Clone, :: core :: fmt :: Debug)]
pub struct IGraphicsEffectD2D1Interop(pub ::windows::core::IUnknown);
impl IGraphicsEffectD2D1Interop {
pub unsafe fn GetEffectId(&self) -> ::windows::core::Result<::windows::core::GUID> {
let mut result__: <::windows::core::GUID as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
(::windows::core::Interface::vtable(self).3)(::core::mem::transmute_copy(self), &mut result__).from_abi::<::windows::core::GUID>(result__)
}
#[cfg(feature = "Win32_Foundation")]
pub unsafe fn GetNamedPropertyMapping<'a, Param0: ::windows::core::IntoParam<'a, super::super::super::super::Foundation::PWSTR>>(&self, name: Param0, index: *mut u32, mapping: *mut GRAPHICS_EFFECT_PROPERTY_MAPPING) -> ::windows::core::Result<()> {
(::windows::core::Interface::vtable(self).4)(::core::mem::transmute_copy(self), name.into_param().abi(), ::core::mem::transmute(index), ::core::mem::transmute(mapping)).ok()
}
pub unsafe fn GetPropertyCount(&self) -> ::windows::core::Result<u32> {
let mut result__: <u32 as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
(::windows::core::Interface::vtable(self).5)(::core::mem::transmute_copy(self), &mut result__).from_abi::<u32>(result__)
}
#[cfg(feature = "Foundation")]
pub unsafe fn GetProperty(&self, index: u32) -> ::windows::core::Result<super::super::super::super::super::Foundation::IPropertyValue> {
let mut result__: <super::super::super::super::super::Foundation::IPropertyValue as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
(::windows::core::Interface::vtable(self).6)(::core::mem::transmute_copy(self), ::core::mem::transmute(index), &mut result__).from_abi::<super::super::super::super::super::Foundation::IPropertyValue>(result__)
}
#[cfg(feature = "Graphics_Effects")]
pub unsafe fn GetSource(&self, index: u32) -> ::windows::core::Result<super::super::super::super::super::Graphics::Effects::IGraphicsEffectSource> {
let mut result__: <super::super::super::super::super::Graphics::Effects::IGraphicsEffectSource as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
(::windows::core::Interface::vtable(self).7)(::core::mem::transmute_copy(self), ::core::mem::transmute(index), &mut result__).from_abi::<super::super::super::super::super::Graphics::Effects::IGraphicsEffectSource>(result__)
}
pub unsafe fn GetSourceCount(&self) -> ::windows::core::Result<u32> {
let mut result__: <u32 as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
(::windows::core::Interface::vtable(self).8)(::core::mem::transmute_copy(self), &mut result__).from_abi::<u32>(result__)
}
}
unsafe impl ::windows::core::Interface for IGraphicsEffectD2D1Interop {
type Vtable = IGraphicsEffectD2D1Interop_abi;
const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x2fc57384_a068_44d7_a331_30982fcf7177);
}
impl ::core::convert::From<IGraphicsEffectD2D1Interop> for ::windows::core::IUnknown {
fn from(value: IGraphicsEffectD2D1Interop) -> Self {
value.0
}
}
impl ::core::convert::From<&IGraphicsEffectD2D1Interop> for ::windows::core::IUnknown {
fn from(value: &IGraphicsEffectD2D1Interop) -> Self {
value.0.clone()
}
}
impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for IGraphicsEffectD2D1Interop {
fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> {
::windows::core::Param::Owned(self.0)
}
}
impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for &'a IGraphicsEffectD2D1Interop {
fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> {
::windows::core::Param::Borrowed(&self.0)
}
}
#[repr(C)]
#[doc(hidden)]
pub struct IGraphicsEffectD2D1Interop_abi(
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, id: *mut ::windows::core::GUID) -> ::windows::core::HRESULT,
#[cfg(feature = "Win32_Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, name: super::super::super::super::Foundation::PWSTR, index: *mut u32, mapping: *mut GRAPHICS_EFFECT_PROPERTY_MAPPING) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Win32_Foundation"))] usize,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32) -> ::windows::core::HRESULT,
#[cfg(feature = "Foundation")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, index: u32, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Foundation"))] usize,
#[cfg(feature = "Graphics_Effects")] pub unsafe extern "system" fn(this: ::windows::core::RawPtr, index: u32, source: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT,
#[cfg(not(feature = "Graphics_Effects"))] usize,
pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32) -> ::windows::core::HRESULT,
);
|
//////////////////////////////////////////////////
//
// wave.rs
// .wavファイルを読み込むサンプル
//
extern crate byteorder;
// alto
use alto::{Mono, Stereo};
// byteorder
use byteorder::{LittleEndian, ReadBytesExt};
// failure
use failure::{Error, format_err};
// rust std
use std::fs::File;
use std::io::{Read, Seek, SeekFrom, BufReader};
pub struct WaveInformation
{
pub file_size: u32,
pub pcm_format: u16,
pub channels: u16,
pub sampling_rate: u32,
pub byte_per_sec: u32,
pub block_align: u16,
pub bit_per_sample: u16,
}
pub enum WaveBuffer
{
U8Mono(Vec<Mono<u8>>),
I16Mono(Vec<Mono<i16>>),
U8Stereo(Vec<Stereo<u8>>),
I16Stereo(Vec<Stereo<i16>>),
}
pub fn new(file_path: &String) -> Result<(WaveInformation, WaveBuffer), Error>
{
// create wave reader
let mut reader = BufReader::new(File::open(file_path)?);
// create variable to put collected data from the .wav file
let mut signature = [0u8; 4];
// create wave information structure
let mut info = WaveInformation
{
file_size: 0,
pcm_format: 0,
channels: 0,
sampling_rate: 0,
byte_per_sec: 0,
block_align: 0,
bit_per_sample: 0,
};
// read riff, size, wave
reader.read_exact(&mut signature)?;
if signature != [ 0x52, 0x49, 0x46, 0x46 ] { return Err(format_err!("This is not a wave file.")); }
info.file_size = reader.read_u32::<LittleEndian>()? + 8;
reader.read_exact(&mut signature)?;
if signature != [ 0x57, 0x41, 0x56, 0x45 ] { return Err(format_err!("This file does not have a WAVE header.")); }
// read chunks
loop
{
reader.read_exact(&mut signature)?;
match signature
{
// "fmt "
[ 0x66, 0x6D, 0x74, 0x20 ] =>
{
let chunk_size = reader.read_u32::<LittleEndian>()?;
info.pcm_format = reader.read_u16::<LittleEndian>()?;
info.channels = reader.read_u16::<LittleEndian>()?;
info.sampling_rate = reader.read_u32::<LittleEndian>()?;
info.byte_per_sec = reader.read_u32::<LittleEndian>()?;
info.block_align = reader.read_u16::<LittleEndian>()?;
info.bit_per_sample = reader.read_u16::<LittleEndian>()?;
reader.seek(SeekFrom::Current((chunk_size - 16) as i64))?;
},
// "data"
[ 0x64, 0x61, 0x74, 0x61 ] =>
{
let chunk_size = reader.read_u32::<LittleEndian>()?;
match (info.channels, info.bit_per_sample)
{
(1, 8) =>
{
let mut buffer = vec![Mono{ center: 0 }; chunk_size as usize];
for i in 0..chunk_size
{
buffer[i as usize] = Mono
{
center: reader.read_u8()?
};
}
let buffer = WaveBuffer::U8Mono(buffer);
return Ok((info, buffer));
}
(2, 8) =>
{
let mut buffer = vec![Stereo{ left: 0, right: 0 }; (chunk_size / 2) as usize];
for i in 0..(chunk_size / 2)
{
buffer[i as usize] = Stereo
{
left: reader.read_u8()?,
right: reader.read_u8()?
};
}
let buffer = WaveBuffer::U8Stereo(buffer);
return Ok((info, buffer));
}
(1, 16) =>
{
let mut buffer = vec![Mono{ center: 0 }; (chunk_size / 2) as usize];
for i in 0..(chunk_size / 2)
{
buffer[i as usize] = Mono
{
center: reader.read_i16::<LittleEndian>()?
};
}
let buffer = WaveBuffer::I16Mono(buffer);
return Ok((info, buffer));
}
(2, 16) =>
{
let mut buffer = vec![Stereo{ left: 0, right: 0 }; (chunk_size / 4) as usize];
for i in 0..(chunk_size / 4)
{
buffer[i as usize] = Stereo
{
left: reader.read_i16::<LittleEndian>()?,
right: reader.read_i16::<LittleEndian>()?
};
}
let buffer = WaveBuffer::I16Stereo(buffer);
return Ok((info, buffer));
}
(_, _) => return Err(format_err!("Invalid format."))
};
},
// others
_ =>
{
let chunk_size = reader.read_u32::<LittleEndian>()?;
reader.seek(SeekFrom::Current(chunk_size as i64))?;
},
}
}
} |
fn main() {
let msg = Some("Hello world");
if let Some(ref m) = msg {
println!("{}", *m);
}
} |
//! Example that definitely works on Raspberry Pi.
//! Make sure you have "SPI" on your Pi enabled and that MOSI-Pin is connected
//! with DIN-Pin. You just need DIN pin, no clock. WS2818 uses one-wire-protocol.
//! See the specification for details
use ws2818_rgb_led_spi_driver::encoding::{encode_rgb};
use ws2818_rgb_led_spi_driver::adapter::WS28xxAdapter;
fn main() {
println!("make sure you have \"SPI\" on your Pi enabled and that MOSI-Pin is connected with DIN-Pin!");
let mut adapter = WS28xxAdapter::new("/dev/spidev0.0").unwrap();
// Method 1: encode first and write in two step (prefered way; better performance)
{
let mut spi_encoded_rgb_bits = vec![];
// set first three pixels to bright red, bright green and bright blue
spi_encoded_rgb_bits.extend_from_slice(&encode_rgb(255, 0, 0));
spi_encoded_rgb_bits.extend_from_slice(&encode_rgb(0, 255, 0));
spi_encoded_rgb_bits.extend_from_slice(&encode_rgb(0, 0, 255));
adapter.write_encoded_rgb(&spi_encoded_rgb_bits).unwrap();
}
// Method 2: encode and write in one step
{
let mut rgb_values = vec![];
// set first three pixels to bright red, bright green and bright blue
rgb_values.push((255, 0, 0));
rgb_values.push((0, 255, 0));
rgb_values.push((0, 0, 255));
adapter.write_rgb(&rgb_values).unwrap();
}
}
|
//! A map implemented on a trie. Unlike `std::collections::HashMap` the keys in this map are not
//! hashed but are instead serialized.
use crate::collections::next_trie_id;
use crate::env;
use borsh::{BorshDeserialize, BorshSerialize};
use near_vm_logic::types::IteratorIndex;
use std::marker::PhantomData;
#[derive(BorshSerialize, BorshDeserialize)]
pub struct Map<K, V> {
len: u64,
prefix: Vec<u8>,
#[borsh_skip]
key: PhantomData<K>,
#[borsh_skip]
value: PhantomData<V>,
}
impl<K, V> Map<K, V> {
/// Returns the number of elements in the map, also referred to as its 'size'.
pub fn len(&self) -> u64 {
self.len
}
}
impl<K, V> Default for Map<K, V> {
fn default() -> Self {
Self::new(next_trie_id())
}
}
impl<K, V> Map<K, V> {
/// Create new map with zero elements. Use `id` as a unique identifier.
pub fn new(id: Vec<u8>) -> Self {
Self { len: 0, prefix: id, key: PhantomData, value: PhantomData }
}
}
impl<K, V> Map<K, V>
where
K: BorshSerialize + BorshDeserialize,
V: BorshSerialize + BorshDeserialize,
{
/// Serializes key into an array of bytes.
fn serialize_key(&self, key: &K) -> Vec<u8> {
let mut res = self.prefix.clone();
let data = key.try_to_vec().expect("Key should be serializable with Borsh.");
res.extend(data);
res
}
/// Serializes value into an array of bytes.
fn serialize_value(&self, value: &V) -> Vec<u8> {
value.try_to_vec().expect("Value should be serializable with Borsh.")
}
/// Deserializes key, taking prefix into account.
fn deserialize_key(prefix: &[u8], raw_key: &[u8]) -> K {
let key = &raw_key[prefix.len()..];
K::try_from_slice(key).expect("Key should be deserializable with Borsh.")
}
/// Deserializes value.
fn deserialize_value(value: &[u8]) -> V {
V::try_from_slice(value).expect("Value should be deserializable with Borsh.")
}
/// An iterator visiting all keys. The iterator element type is `K`.
pub fn keys<'a>(&'a self) -> impl Iterator<Item = K> + 'a {
let prefix = self.prefix.clone();
self.raw_keys().into_iter().map(move |k| Self::deserialize_key(&prefix, &k))
}
/// An iterator visiting all values. The iterator element type is `V`.
pub fn values<'a>(&'a self) -> impl Iterator<Item = V> + 'a {
self.raw_values().map(|v| Self::deserialize_value(&v))
}
/// Returns value by key, or None if key is not present
pub fn get(&self, key: &K) -> Option<V> {
let raw_key = self.serialize_key(key);
env::storage_read(&raw_key).map(|raw_value| Self::deserialize_value(&raw_value))
}
/// Removes a key from the map, returning the value at the key if the key was previously in the map.
pub fn remove(&mut self, key: &K) -> Option<V> {
let raw_key = self.serialize_key(key);
if env::storage_remove(&raw_key) {
self.len -= 1;
let data = env::storage_get_evicted()
.expect("The removal signaled that the value was evicted.");
Some(Self::deserialize_value(&data))
} else {
None
}
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, [`None`] is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned.
pub fn insert(&mut self, key: &K, value: &V) -> Option<V> {
let key = self.serialize_key(key);
let value = self.serialize_value(value);
if env::storage_write(&key, &value) {
let data = env::storage_get_evicted()
.expect("The insert signaled that the value was evicted.");
Some(Self::deserialize_value(&data))
} else {
self.len += 1;
None
}
}
/// Copies elements into an `std::vec::Vec`.
pub fn to_vec(&self) -> std::vec::Vec<(K, V)> {
self.iter().collect()
}
/// Raw serialized keys.
fn raw_keys(&self) -> Vec<Vec<u8>> {
let iterator_id = env::storage_iter_prefix(&self.prefix);
IntoMapRawKeys { iterator_id }.collect()
}
/// Raw serialized values.
fn raw_values(&self) -> IntoMapRawValues {
let iterator_id = env::storage_iter_prefix(&self.prefix);
IntoMapRawValues { iterator_id }
}
/// Clears the map, removing all elements.
pub fn clear(&mut self) {
let keys: Vec<Vec<u8>> = self.raw_keys();
for key in keys {
env::storage_remove(&key);
}
self.len = 0;
}
pub fn iter(&self) -> IntoMapRef<K, V> {
let iterator_id = env::storage_iter_prefix(&self.prefix);
IntoMapRef { iterator_id, map: self }
}
pub fn extend<IT: IntoIterator<Item = (K, V)>>(&mut self, iter: IT) {
for (el_key, el_value) in iter {
let key = self.serialize_key(&el_key);
let value = self.serialize_value(&el_value);
if !env::storage_write(&key, &value) {
self.len += 1;
}
}
}
}
/// Non-consuming iterator for `Map<K, V>`.
pub struct IntoMapRef<'a, K, V> {
iterator_id: IteratorIndex,
map: &'a Map<K, V>,
}
impl<'a, K, V> Iterator for IntoMapRef<'a, K, V>
where
K: BorshSerialize + BorshDeserialize,
V: BorshSerialize + BorshDeserialize,
{
type Item = (K, V);
fn next(&mut self) -> Option<Self::Item> {
if env::storage_iter_next(self.iterator_id) {
let key = env::storage_iter_key_read()?;
let value = env::storage_iter_value_read()?;
Some((
Map::<K, V>::deserialize_key(&self.map.prefix, &key),
Map::<K, V>::deserialize_value(&value),
))
} else {
None
}
}
}
/// Non-consuming iterator over raw serialized keys of `Map<K, V>`.
pub struct IntoMapRawKeys {
iterator_id: IteratorIndex,
}
impl Iterator for IntoMapRawKeys {
type Item = Vec<u8>;
fn next(&mut self) -> Option<Self::Item> {
if env::storage_iter_next(self.iterator_id) {
env::storage_iter_key_read()
} else {
None
}
}
}
/// Non-consuming iterator over serialized values of `Map<K, V>`.
pub struct IntoMapRawValues {
iterator_id: u64,
}
impl Iterator for IntoMapRawValues {
type Item = Vec<u8>;
fn next(&mut self) -> Option<Self::Item> {
if env::storage_iter_next(self.iterator_id) {
env::storage_iter_value_read()
} else {
None
}
}
}
|
use crate::{
builtins::{PyBaseExceptionRef, PyModule, PySet},
common::crt_fd::Fd,
convert::IntoPyException,
function::{ArgumentError, FromArgs, FsPath, FuncArgs},
AsObject, Py, PyObjectRef, PyPayload, PyResult, TryFromObject, VirtualMachine,
};
use std::{
ffi, fs, io,
path::{Path, PathBuf},
};
#[derive(Debug, Copy, Clone)]
pub(super) enum OutputMode {
String,
Bytes,
}
impl OutputMode {
pub(super) fn process_path(self, path: impl Into<PathBuf>, vm: &VirtualMachine) -> PyResult {
fn inner(mode: OutputMode, path: PathBuf, vm: &VirtualMachine) -> PyResult {
let path_as_string = |p: PathBuf| {
p.into_os_string().into_string().map_err(|_| {
vm.new_unicode_decode_error(
"Can't convert OS path to valid UTF-8 string".into(),
)
})
};
match mode {
OutputMode::String => path_as_string(path).map(|s| vm.ctx.new_str(s).into()),
OutputMode::Bytes => {
#[cfg(any(unix, target_os = "wasi"))]
{
use rustpython_common::os::ffi::OsStringExt;
Ok(vm.ctx.new_bytes(path.into_os_string().into_vec()).into())
}
#[cfg(windows)]
{
path_as_string(path).map(|s| vm.ctx.new_bytes(s.into_bytes()).into())
}
}
}
}
inner(self, path.into(), vm)
}
}
// path_ without allow_fd in CPython
#[derive(Clone)]
pub struct OsPath {
pub path: ffi::OsString,
pub(super) mode: OutputMode,
}
impl OsPath {
pub fn new_str(path: impl Into<ffi::OsString>) -> Self {
let path = path.into();
Self {
path,
mode: OutputMode::String,
}
}
pub(crate) fn from_fspath(fspath: FsPath, vm: &VirtualMachine) -> PyResult<OsPath> {
let path = fspath.as_os_str(vm)?.to_owned();
let mode = match fspath {
FsPath::Str(_) => OutputMode::String,
FsPath::Bytes(_) => OutputMode::Bytes,
};
Ok(OsPath { path, mode })
}
pub fn as_path(&self) -> &Path {
Path::new(&self.path)
}
#[cfg(any(unix, target_os = "wasi"))]
pub fn into_bytes(self) -> Vec<u8> {
use rustpython_common::os::ffi::OsStrExt;
self.path.as_bytes().to_vec()
}
#[cfg(windows)]
pub fn into_bytes(self) -> Vec<u8> {
self.path.to_string_lossy().to_string().into_bytes()
}
pub fn into_cstring(self, vm: &VirtualMachine) -> PyResult<ffi::CString> {
ffi::CString::new(self.into_bytes()).map_err(|err| err.into_pyexception(vm))
}
#[cfg(windows)]
pub fn to_widecstring(&self, vm: &VirtualMachine) -> PyResult<widestring::WideCString> {
widestring::WideCString::from_os_str(&self.path).map_err(|err| err.into_pyexception(vm))
}
pub fn filename(&self, vm: &VirtualMachine) -> PyResult {
self.mode.process_path(self.path.clone(), vm)
}
}
pub(super) fn fs_metadata<P: AsRef<Path>>(
path: P,
follow_symlink: bool,
) -> io::Result<fs::Metadata> {
if follow_symlink {
fs::metadata(path.as_ref())
} else {
fs::symlink_metadata(path.as_ref())
}
}
impl AsRef<Path> for OsPath {
fn as_ref(&self) -> &Path {
self.as_path()
}
}
impl TryFromObject for OsPath {
// TODO: path_converter with allow_fd=0 in CPython
fn try_from_object(vm: &VirtualMachine, obj: PyObjectRef) -> PyResult<Self> {
let fspath = FsPath::try_from(obj, true, vm)?;
Self::from_fspath(fspath, vm)
}
}
// path_t with allow_fd in CPython
#[derive(Clone)]
pub(crate) enum OsPathOrFd {
Path(OsPath),
Fd(i32),
}
impl TryFromObject for OsPathOrFd {
fn try_from_object(vm: &VirtualMachine, obj: PyObjectRef) -> PyResult<Self> {
let r = match obj.try_index_opt(vm) {
Some(int) => Self::Fd(int?.try_to_primitive(vm)?),
None => Self::Path(obj.try_into_value(vm)?),
};
Ok(r)
}
}
impl From<OsPath> for OsPathOrFd {
fn from(path: OsPath) -> Self {
Self::Path(path)
}
}
impl OsPathOrFd {
pub fn filename(&self, vm: &VirtualMachine) -> PyObjectRef {
match self {
OsPathOrFd::Path(path) => path.filename(vm).unwrap_or_else(|_| vm.ctx.none()),
OsPathOrFd::Fd(fd) => vm.ctx.new_int(*fd).into(),
}
}
}
#[cfg(unix)]
impl IntoPyException for nix::Error {
fn into_pyexception(self, vm: &VirtualMachine) -> PyBaseExceptionRef {
io::Error::from(self).into_pyexception(vm)
}
}
// TODO: preserve the input `PyObjectRef` of filename and filename2 (Failing check `self.assertIs(err.filename, name, str(func)`)
pub struct IOErrorBuilder {
error: io::Error,
filename: Option<OsPathOrFd>,
filename2: Option<OsPathOrFd>,
}
impl IOErrorBuilder {
pub fn new(error: io::Error) -> Self {
Self {
error,
filename: None,
filename2: None,
}
}
pub(crate) fn filename(mut self, filename: impl Into<OsPathOrFd>) -> Self {
let filename = filename.into();
self.filename.replace(filename);
self
}
pub(crate) fn filename2(mut self, filename: impl Into<OsPathOrFd>) -> Self {
let filename = filename.into();
self.filename2.replace(filename);
self
}
}
impl IntoPyException for IOErrorBuilder {
fn into_pyexception(self, vm: &VirtualMachine) -> PyBaseExceptionRef {
let excp = self.error.into_pyexception(vm);
if let Some(filename) = self.filename {
excp.as_object()
.set_attr("filename", filename.filename(vm), vm)
.unwrap();
}
if let Some(filename2) = self.filename2 {
excp.as_object()
.set_attr("filename2", filename2.filename(vm), vm)
.unwrap();
}
excp
}
}
/// Convert the error stored in the `errno` variable into an Exception
#[inline]
pub fn errno_err(vm: &VirtualMachine) -> PyBaseExceptionRef {
crate::common::os::errno().into_pyexception(vm)
}
#[allow(dead_code)]
#[derive(FromArgs, Default)]
pub struct TargetIsDirectory {
#[pyarg(any, default = "false")]
pub(crate) target_is_directory: bool,
}
cfg_if::cfg_if! {
if #[cfg(all(any(unix, target_os = "wasi"), not(target_os = "redox")))] {
use libc::AT_FDCWD;
} else {
const AT_FDCWD: i32 = -100;
}
}
const DEFAULT_DIR_FD: Fd = Fd(AT_FDCWD);
// XXX: AVAILABLE should be a bool, but we can't yet have it as a bool and just cast it to usize
#[derive(Copy, Clone)]
pub struct DirFd<const AVAILABLE: usize>(pub(crate) [Fd; AVAILABLE]);
impl<const AVAILABLE: usize> Default for DirFd<AVAILABLE> {
fn default() -> Self {
Self([DEFAULT_DIR_FD; AVAILABLE])
}
}
// not used on all platforms
#[allow(unused)]
impl DirFd<1> {
#[inline(always)]
pub(crate) fn fd_opt(&self) -> Option<Fd> {
self.get_opt().map(Fd)
}
#[inline]
pub(crate) fn get_opt(&self) -> Option<i32> {
let fd = self.fd();
if fd == DEFAULT_DIR_FD {
None
} else {
Some(fd.0)
}
}
#[inline(always)]
pub(crate) fn fd(&self) -> Fd {
self.0[0]
}
}
impl<const AVAILABLE: usize> FromArgs for DirFd<AVAILABLE> {
fn from_args(vm: &VirtualMachine, args: &mut FuncArgs) -> Result<Self, ArgumentError> {
let fd = match args.take_keyword("dir_fd") {
Some(o) if vm.is_none(&o) => DEFAULT_DIR_FD,
None => DEFAULT_DIR_FD,
Some(o) => {
let fd = o.try_index_opt(vm).unwrap_or_else(|| {
Err(vm.new_type_error(format!(
"argument should be integer or None, not {}",
o.class().name()
)))
})?;
let fd = fd.try_to_primitive(vm)?;
Fd(fd)
}
};
if AVAILABLE == 0 && fd != DEFAULT_DIR_FD {
return Err(vm
.new_not_implemented_error("dir_fd unavailable on this platform".to_owned())
.into());
}
Ok(Self([fd; AVAILABLE]))
}
}
#[derive(FromArgs)]
pub(super) struct FollowSymlinks(
#[pyarg(named, name = "follow_symlinks", default = "true")] pub bool,
);
fn bytes_as_osstr<'a>(b: &'a [u8], vm: &VirtualMachine) -> PyResult<&'a ffi::OsStr> {
rustpython_common::os::bytes_as_osstr(b)
.map_err(|_| vm.new_unicode_decode_error("can't decode path for utf-8".to_owned()))
}
#[pymodule(sub)]
pub(super) mod _os {
use super::{
errno_err, DirFd, FollowSymlinks, IOErrorBuilder, OsPath, OsPathOrFd, OutputMode,
SupportFunc,
};
use crate::{
builtins::{
PyBytesRef, PyGenericAlias, PyIntRef, PyStrRef, PyTuple, PyTupleRef, PyTypeRef,
},
common::crt_fd::{Fd, Offset},
common::lock::{OnceCell, PyRwLock},
common::suppress_iph,
convert::{IntoPyException, ToPyObject},
function::{ArgBytesLike, Either, FsPath, FuncArgs, OptionalArg},
protocol::PyIterReturn,
recursion::ReprGuard,
types::{IterNext, Iterable, PyStructSequence, Representable, SelfIter},
vm::VirtualMachine,
AsObject, Py, PyObjectRef, PyPayload, PyRef, PyResult, TryFromObject,
};
use crossbeam_utils::atomic::AtomicCell;
use itertools::Itertools;
use std::{
env, ffi, fs,
fs::OpenOptions,
io::{self, Read, Write},
path::PathBuf,
time::{Duration, SystemTime},
};
const OPEN_DIR_FD: bool = cfg!(not(any(windows, target_os = "redox")));
pub(crate) const MKDIR_DIR_FD: bool = cfg!(not(any(windows, target_os = "redox")));
const STAT_DIR_FD: bool = cfg!(not(any(windows, target_os = "redox")));
const UTIME_DIR_FD: bool = cfg!(not(any(windows, target_os = "redox")));
pub(crate) const SYMLINK_DIR_FD: bool = cfg!(not(any(windows, target_os = "redox")));
#[pyattr]
use libc::{
O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, O_TRUNC, O_WRONLY, SEEK_CUR, SEEK_END,
SEEK_SET,
};
#[pyattr]
pub(crate) const F_OK: u8 = 0;
#[pyattr]
pub(crate) const R_OK: u8 = 1 << 2;
#[pyattr]
pub(crate) const W_OK: u8 = 1 << 1;
#[pyattr]
pub(crate) const X_OK: u8 = 1 << 0;
#[pyfunction]
fn close(fileno: i32, vm: &VirtualMachine) -> PyResult<()> {
Fd(fileno).close().map_err(|e| e.into_pyexception(vm))
}
#[pyfunction]
fn closerange(fd_low: i32, fd_high: i32) {
for fileno in fd_low..fd_high {
let _ = Fd(fileno).close();
}
}
#[cfg(any(unix, windows, target_os = "wasi"))]
#[derive(FromArgs)]
struct OpenArgs {
path: OsPath,
flags: i32,
#[pyarg(any, default)]
mode: Option<i32>,
#[pyarg(flatten)]
dir_fd: DirFd<{ OPEN_DIR_FD as usize }>,
}
#[pyfunction]
fn open(args: OpenArgs, vm: &VirtualMachine) -> PyResult<i32> {
os_open(args.path, args.flags, args.mode, args.dir_fd, vm)
}
#[cfg(any(unix, windows, target_os = "wasi"))]
pub(crate) fn os_open(
name: OsPath,
flags: i32,
mode: Option<i32>,
dir_fd: DirFd<{ OPEN_DIR_FD as usize }>,
vm: &VirtualMachine,
) -> PyResult<i32> {
let mode = mode.unwrap_or(0o777);
#[cfg(windows)]
let fd = {
let [] = dir_fd.0;
let name = name.to_widecstring(vm)?;
let flags = flags | libc::O_NOINHERIT;
Fd::wopen(&name, flags, mode)
};
#[cfg(not(windows))]
let fd = {
let name = name.clone().into_cstring(vm)?;
#[cfg(not(target_os = "wasi"))]
let flags = flags | libc::O_CLOEXEC;
#[cfg(not(target_os = "redox"))]
if let Some(dir_fd) = dir_fd.fd_opt() {
dir_fd.openat(&name, flags, mode)
} else {
Fd::open(&name, flags, mode)
}
#[cfg(target_os = "redox")]
{
let [] = dir_fd.0;
Fd::open(&name, flags, mode)
}
};
fd.map(|fd| fd.0)
.map_err(|e| IOErrorBuilder::new(e).filename(name).into_pyexception(vm))
}
#[pyfunction]
fn fsync(fd: i32, vm: &VirtualMachine) -> PyResult<()> {
Fd(fd).fsync().map_err(|err| err.into_pyexception(vm))
}
#[pyfunction]
fn read(fd: i32, n: usize, vm: &VirtualMachine) -> PyResult<PyBytesRef> {
let mut buffer = vec![0u8; n];
let mut file = Fd(fd);
let n = file
.read(&mut buffer)
.map_err(|err| err.into_pyexception(vm))?;
buffer.truncate(n);
Ok(vm.ctx.new_bytes(buffer))
}
#[pyfunction]
fn write(fd: i32, data: ArgBytesLike, vm: &VirtualMachine) -> PyResult {
let mut file = Fd(fd);
let written = data
.with_ref(|b| file.write(b))
.map_err(|err| err.into_pyexception(vm))?;
Ok(vm.ctx.new_int(written).into())
}
#[pyfunction]
#[pyfunction(name = "unlink")]
fn remove(path: OsPath, dir_fd: DirFd<0>, vm: &VirtualMachine) -> PyResult<()> {
let [] = dir_fd.0;
let is_junction = cfg!(windows)
&& fs::metadata(&path).map_or(false, |meta| meta.file_type().is_dir())
&& fs::symlink_metadata(&path).map_or(false, |meta| meta.file_type().is_symlink());
let res = if is_junction {
fs::remove_dir(&path)
} else {
fs::remove_file(&path)
};
res.map_err(|e| IOErrorBuilder::new(e).filename(path).into_pyexception(vm))
}
#[cfg(not(windows))]
#[pyfunction]
fn mkdir(
path: OsPath,
mode: OptionalArg<i32>,
dir_fd: DirFd<{ MKDIR_DIR_FD as usize }>,
vm: &VirtualMachine,
) -> PyResult<()> {
let mode = mode.unwrap_or(0o777);
let path = path.into_cstring(vm)?;
#[cfg(not(target_os = "redox"))]
if let Some(fd) = dir_fd.get_opt() {
let res = unsafe { libc::mkdirat(fd, path.as_ptr(), mode as _) };
let res = if res < 0 { Err(errno_err(vm)) } else { Ok(()) };
return res;
}
#[cfg(target_os = "redox")]
let [] = dir_fd.0;
let res = unsafe { libc::mkdir(path.as_ptr(), mode as _) };
if res < 0 {
Err(errno_err(vm))
} else {
Ok(())
}
}
#[pyfunction]
fn mkdirs(path: PyStrRef, vm: &VirtualMachine) -> PyResult<()> {
fs::create_dir_all(path.as_str()).map_err(|err| err.into_pyexception(vm))
}
#[pyfunction]
fn rmdir(path: OsPath, dir_fd: DirFd<0>, vm: &VirtualMachine) -> PyResult<()> {
let [] = dir_fd.0;
fs::remove_dir(&path)
.map_err(|e| IOErrorBuilder::new(e).filename(path).into_pyexception(vm))
}
const LISTDIR_FD: bool = cfg!(all(unix, not(target_os = "redox")));
#[pyfunction]
fn listdir(path: OptionalArg<OsPathOrFd>, vm: &VirtualMachine) -> PyResult<Vec<PyObjectRef>> {
let path = path.unwrap_or_else(|| OsPathOrFd::Path(OsPath::new_str(".")));
let list = match path {
OsPathOrFd::Path(path) => {
let dir_iter = fs::read_dir(&path).map_err(|err| err.into_pyexception(vm))?;
dir_iter
.map(|entry| match entry {
Ok(entry_path) => path.mode.process_path(entry_path.file_name(), vm),
Err(e) => Err(IOErrorBuilder::new(e)
.filename(path.clone())
.into_pyexception(vm)),
})
.collect::<PyResult<_>>()?
}
OsPathOrFd::Fd(fno) => {
#[cfg(not(all(unix, not(target_os = "redox"))))]
{
let _ = fno;
return Err(vm.new_not_implemented_error(
"can't pass fd to listdir on this platform".to_owned(),
));
}
#[cfg(all(unix, not(target_os = "redox")))]
{
use rustpython_common::os::ffi::OsStrExt;
let new_fd = nix::unistd::dup(fno).map_err(|e| e.into_pyexception(vm))?;
let mut dir =
nix::dir::Dir::from_fd(new_fd).map_err(|e| e.into_pyexception(vm))?;
dir.iter()
.filter_map(|entry| {
entry
.map_err(|e| e.into_pyexception(vm))
.and_then(|entry| {
let fname = entry.file_name().to_bytes();
Ok(match fname {
b"." | b".." => None,
_ => Some(
OutputMode::String
.process_path(ffi::OsStr::from_bytes(fname), vm)?,
),
})
})
.transpose()
})
.collect::<PyResult<_>>()?
}
}
};
Ok(list)
}
fn pyref_as_str<'a>(
obj: &'a Either<PyStrRef, PyBytesRef>,
vm: &VirtualMachine,
) -> PyResult<&'a str> {
Ok(match obj {
Either::A(ref s) => s.as_str(),
Either::B(ref b) => super::bytes_as_osstr(b.as_bytes(), vm)?
.to_str()
.ok_or_else(|| {
vm.new_unicode_decode_error("can't decode bytes for utf-8".to_owned())
})?,
})
}
#[pyfunction]
fn putenv(
key: Either<PyStrRef, PyBytesRef>,
value: Either<PyStrRef, PyBytesRef>,
vm: &VirtualMachine,
) -> PyResult<()> {
let key = pyref_as_str(&key, vm)?;
let value = pyref_as_str(&value, vm)?;
if key.contains('\0') || value.contains('\0') {
return Err(vm.new_value_error("embedded null byte".to_string()));
}
if key.is_empty() || key.contains('=') {
return Err(vm.new_value_error("illegal environment variable name".to_string()));
}
env::set_var(key, value);
Ok(())
}
#[pyfunction]
fn unsetenv(key: Either<PyStrRef, PyBytesRef>, vm: &VirtualMachine) -> PyResult<()> {
let key = pyref_as_str(&key, vm)?;
if key.contains('\0') {
return Err(vm.new_value_error("embedded null byte".to_string()));
}
if key.is_empty() || key.contains('=') {
return Err(vm.new_value_error("illegal environment variable name".to_string()));
}
env::remove_var(key);
Ok(())
}
#[pyfunction]
fn readlink(path: OsPath, dir_fd: DirFd<0>, vm: &VirtualMachine) -> PyResult {
let mode = path.mode;
let [] = dir_fd.0;
let path = fs::read_link(&path)
.map_err(|err| IOErrorBuilder::new(err).filename(path).into_pyexception(vm))?;
mode.process_path(path, vm)
}
#[pyattr]
#[pyclass(name)]
#[derive(Debug, PyPayload)]
struct DirEntry {
file_name: std::ffi::OsString,
pathval: PathBuf,
file_type: io::Result<fs::FileType>,
mode: OutputMode,
stat: OnceCell<PyObjectRef>,
lstat: OnceCell<PyObjectRef>,
#[cfg(unix)]
ino: AtomicCell<u64>,
#[cfg(not(unix))]
ino: AtomicCell<Option<u64>>,
}
#[pyclass(with(Representable))]
impl DirEntry {
#[pygetset]
fn name(&self, vm: &VirtualMachine) -> PyResult {
self.mode.process_path(&self.file_name, vm)
}
#[pygetset]
fn path(&self, vm: &VirtualMachine) -> PyResult {
self.mode.process_path(&self.pathval, vm)
}
fn perform_on_metadata(
&self,
follow_symlinks: FollowSymlinks,
action: fn(fs::Metadata) -> bool,
vm: &VirtualMachine,
) -> PyResult<bool> {
match super::fs_metadata(&self.pathval, follow_symlinks.0) {
Ok(meta) => Ok(action(meta)),
Err(e) => {
// FileNotFoundError is caught and not raised
if e.kind() == io::ErrorKind::NotFound {
Ok(false)
} else {
Err(e.into_pyexception(vm))
}
}
}
}
#[pymethod]
fn is_dir(&self, follow_symlinks: FollowSymlinks, vm: &VirtualMachine) -> PyResult<bool> {
self.perform_on_metadata(
follow_symlinks,
|meta: fs::Metadata| -> bool { meta.is_dir() },
vm,
)
}
#[pymethod]
fn is_file(&self, follow_symlinks: FollowSymlinks, vm: &VirtualMachine) -> PyResult<bool> {
self.perform_on_metadata(
follow_symlinks,
|meta: fs::Metadata| -> bool { meta.is_file() },
vm,
)
}
#[pymethod]
fn is_symlink(&self, vm: &VirtualMachine) -> PyResult<bool> {
Ok(self
.file_type
.as_ref()
.map_err(|err| err.into_pyexception(vm))?
.is_symlink())
}
#[pymethod]
fn stat(
&self,
dir_fd: DirFd<{ STAT_DIR_FD as usize }>,
follow_symlinks: FollowSymlinks,
vm: &VirtualMachine,
) -> PyResult {
let do_stat = |follow_symlinks| {
stat(
OsPath {
path: self.pathval.as_os_str().to_owned(),
mode: OutputMode::String,
}
.into(),
dir_fd,
FollowSymlinks(follow_symlinks),
vm,
)
};
let lstat = || self.lstat.get_or_try_init(|| do_stat(false));
let stat = if follow_symlinks.0 {
// if follow_symlinks == true and we aren't a symlink, cache both stat and lstat
self.stat.get_or_try_init(|| {
if self.is_symlink(vm)? {
do_stat(true)
} else {
lstat().map(Clone::clone)
}
})?
} else {
lstat()?
};
Ok(stat.clone())
}
#[cfg(not(unix))]
#[pymethod]
fn inode(&self, vm: &VirtualMachine) -> PyResult<u64> {
match self.ino.load() {
Some(ino) => Ok(ino),
None => {
let stat = stat_inner(
OsPath {
path: self.pathval.as_os_str().to_owned(),
mode: OutputMode::String,
}
.into(),
DirFd::default(),
FollowSymlinks(false),
)
.map_err(|e| e.into_pyexception(vm))?
.ok_or_else(|| crate::exceptions::cstring_error(vm))?;
// Err(T) means other thread set `ino` at the mean time which is safe to ignore
let _ = self.ino.compare_exchange(None, Some(stat.st_ino));
Ok(stat.st_ino)
}
}
}
#[cfg(unix)]
#[pymethod]
fn inode(&self, _vm: &VirtualMachine) -> PyResult<u64> {
Ok(self.ino.load())
}
#[pymethod(magic)]
fn fspath(&self, vm: &VirtualMachine) -> PyResult {
self.path(vm)
}
#[pyclassmethod(magic)]
fn class_getitem(cls: PyTypeRef, args: PyObjectRef, vm: &VirtualMachine) -> PyGenericAlias {
PyGenericAlias::new(cls, args, vm)
}
}
impl Representable for DirEntry {
#[inline]
fn repr_str(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<String> {
let name = match zelf.as_object().get_attr("name", vm) {
Ok(name) => Some(name),
Err(e)
if e.fast_isinstance(vm.ctx.exceptions.attribute_error)
|| e.fast_isinstance(vm.ctx.exceptions.value_error) =>
{
None
}
Err(e) => return Err(e),
};
if let Some(name) = name {
if let Some(_guard) = ReprGuard::enter(vm, zelf.as_object()) {
let repr = name.repr(vm)?;
Ok(format!("<{} {}>", zelf.class(), repr))
} else {
Err(vm.new_runtime_error(format!(
"reentrant call inside {}.__repr__",
zelf.class()
)))
}
} else {
Ok(format!("<{}>", zelf.class()))
}
}
}
#[pyattr]
#[pyclass(name = "ScandirIter")]
#[derive(Debug, PyPayload)]
struct ScandirIterator {
entries: PyRwLock<Option<fs::ReadDir>>,
mode: OutputMode,
}
#[pyclass(with(IterNext, Iterable))]
impl ScandirIterator {
#[pymethod]
fn close(&self) {
let entryref: &mut Option<fs::ReadDir> = &mut self.entries.write();
let _dropped = entryref.take();
}
#[pymethod(magic)]
fn enter(zelf: PyRef<Self>) -> PyRef<Self> {
zelf
}
#[pymethod(magic)]
fn exit(zelf: PyRef<Self>, _args: FuncArgs) {
zelf.close()
}
}
impl SelfIter for ScandirIterator {}
impl IterNext for ScandirIterator {
fn next(zelf: &crate::Py<Self>, vm: &VirtualMachine) -> PyResult<PyIterReturn> {
let entryref: &mut Option<fs::ReadDir> = &mut zelf.entries.write();
match entryref {
None => Ok(PyIterReturn::StopIteration(None)),
Some(inner) => match inner.next() {
Some(entry) => match entry {
Ok(entry) => {
#[cfg(unix)]
let ino = {
use std::os::unix::fs::DirEntryExt;
entry.ino()
};
#[cfg(not(unix))]
let ino = None;
Ok(PyIterReturn::Return(
DirEntry {
file_name: entry.file_name(),
pathval: entry.path(),
file_type: entry.file_type(),
mode: zelf.mode,
lstat: OnceCell::new(),
stat: OnceCell::new(),
ino: AtomicCell::new(ino),
}
.into_ref(&vm.ctx)
.into(),
))
}
Err(err) => Err(err.into_pyexception(vm)),
},
None => {
let _dropped = entryref.take();
Ok(PyIterReturn::StopIteration(None))
}
},
}
}
}
#[pyfunction]
fn scandir(path: OptionalArg<OsPath>, vm: &VirtualMachine) -> PyResult {
let path = path.unwrap_or_else(|| OsPath::new_str("."));
let entries = fs::read_dir(path.path).map_err(|err| err.into_pyexception(vm))?;
Ok(ScandirIterator {
entries: PyRwLock::new(Some(entries)),
mode: path.mode,
}
.into_ref(&vm.ctx)
.into())
}
#[pyattr]
#[pyclass(module = "os", name = "stat_result")]
#[derive(Debug, PyStructSequence, FromArgs)]
struct StatResult {
pub st_mode: PyIntRef,
pub st_ino: PyIntRef,
pub st_dev: PyIntRef,
pub st_nlink: PyIntRef,
pub st_uid: PyIntRef,
pub st_gid: PyIntRef,
pub st_size: PyIntRef,
// TODO: unnamed structsequence fields
#[pyarg(positional, default)]
pub __st_atime_int: libc::time_t,
#[pyarg(positional, default)]
pub __st_mtime_int: libc::time_t,
#[pyarg(positional, default)]
pub __st_ctime_int: libc::time_t,
#[pyarg(any, default)]
pub st_atime: f64,
#[pyarg(any, default)]
pub st_mtime: f64,
#[pyarg(any, default)]
pub st_ctime: f64,
#[pyarg(any, default)]
pub st_atime_ns: i128,
#[pyarg(any, default)]
pub st_mtime_ns: i128,
#[pyarg(any, default)]
pub st_ctime_ns: i128,
}
#[pyclass(with(PyStructSequence))]
impl StatResult {
fn from_stat(stat: &StatStruct, vm: &VirtualMachine) -> Self {
let (atime, mtime, ctime);
#[cfg(any(unix, windows))]
#[cfg(not(target_os = "netbsd"))]
{
atime = (stat.st_atime, stat.st_atime_nsec);
mtime = (stat.st_mtime, stat.st_mtime_nsec);
ctime = (stat.st_ctime, stat.st_ctime_nsec);
}
#[cfg(target_os = "netbsd")]
{
atime = (stat.st_atime, stat.st_atimensec);
mtime = (stat.st_mtime, stat.st_mtimensec);
ctime = (stat.st_ctime, stat.st_ctimensec);
}
#[cfg(target_os = "wasi")]
{
atime = (stat.st_atim.tv_sec, stat.st_atim.tv_nsec);
mtime = (stat.st_mtim.tv_sec, stat.st_mtim.tv_nsec);
ctime = (stat.st_ctim.tv_sec, stat.st_ctim.tv_nsec);
}
const NANOS_PER_SEC: u32 = 1_000_000_000;
let to_f64 = |(s, ns)| (s as f64) + (ns as f64) / (NANOS_PER_SEC as f64);
let to_ns = |(s, ns)| s as i128 * NANOS_PER_SEC as i128 + ns as i128;
StatResult {
st_mode: vm.ctx.new_pyref(stat.st_mode),
st_ino: vm.ctx.new_pyref(stat.st_ino),
st_dev: vm.ctx.new_pyref(stat.st_dev),
st_nlink: vm.ctx.new_pyref(stat.st_nlink),
st_uid: vm.ctx.new_pyref(stat.st_uid),
st_gid: vm.ctx.new_pyref(stat.st_gid),
st_size: vm.ctx.new_pyref(stat.st_size),
__st_atime_int: atime.0,
__st_mtime_int: mtime.0,
__st_ctime_int: ctime.0,
st_atime: to_f64(atime),
st_mtime: to_f64(mtime),
st_ctime: to_f64(ctime),
st_atime_ns: to_ns(atime),
st_mtime_ns: to_ns(mtime),
st_ctime_ns: to_ns(ctime),
}
}
#[pyslot]
fn slot_new(_cls: PyTypeRef, args: FuncArgs, vm: &VirtualMachine) -> PyResult {
let flatten_args = |r: &[PyObjectRef]| {
let mut vec_args = Vec::from(r);
loop {
if let Ok(obj) = vec_args.iter().exactly_one() {
match obj.payload::<PyTuple>() {
Some(t) => {
vec_args = Vec::from(t.as_slice());
}
None => {
return vec_args;
}
}
} else {
return vec_args;
}
}
};
let args: FuncArgs = flatten_args(&args.args).into();
let stat: StatResult = args.bind(vm)?;
Ok(stat.to_pyobject(vm))
}
}
#[cfg(not(windows))]
use libc::stat as StatStruct;
#[cfg(windows)]
struct StatStruct {
st_dev: libc::c_ulong,
st_ino: u64,
st_mode: libc::c_ushort,
st_nlink: i32,
st_uid: i32,
st_gid: i32,
st_size: u64,
st_atime: libc::time_t,
st_atime_nsec: i32,
st_mtime: libc::time_t,
st_mtime_nsec: i32,
st_ctime: libc::time_t,
st_ctime_nsec: i32,
}
#[cfg(windows)]
fn meta_to_stat(meta: &fs::Metadata) -> io::Result<StatStruct> {
let st_mode = {
// Based on CPython fileutils.c' attributes_to_mode
let mut m = 0;
if meta.is_dir() {
m |= libc::S_IFDIR | 0o111; /* IFEXEC for user,group,other */
} else {
m |= libc::S_IFREG;
}
if meta.permissions().readonly() {
m |= 0o444;
} else {
m |= 0o666;
}
m as _
};
let (atime, mtime, ctime) = (meta.accessed()?, meta.modified()?, meta.created()?);
let sec = |systime: SystemTime| match systime.duration_since(SystemTime::UNIX_EPOCH) {
Ok(d) => d.as_secs() as libc::time_t,
Err(e) => -(e.duration().as_secs() as libc::time_t),
};
let nsec = |systime: SystemTime| match systime.duration_since(SystemTime::UNIX_EPOCH) {
Ok(d) => d.subsec_nanos() as i32,
Err(e) => -(e.duration().subsec_nanos() as i32),
};
Ok(StatStruct {
st_dev: 0,
st_ino: 0,
st_mode,
st_nlink: 0,
st_uid: 0,
st_gid: 0,
st_size: meta.len(),
st_atime: sec(atime),
st_mtime: sec(mtime),
st_ctime: sec(ctime),
st_atime_nsec: nsec(atime),
st_mtime_nsec: nsec(mtime),
st_ctime_nsec: nsec(ctime),
})
}
#[cfg(windows)]
fn stat_inner(
file: OsPathOrFd,
dir_fd: DirFd<{ STAT_DIR_FD as usize }>,
follow_symlinks: FollowSymlinks,
) -> io::Result<Option<StatStruct>> {
// TODO: replicate CPython's win32_xstat
let [] = dir_fd.0;
let meta = match file {
OsPathOrFd::Path(path) => super::fs_metadata(path, follow_symlinks.0)?,
OsPathOrFd::Fd(fno) => {
use std::os::windows::io::FromRawHandle;
let handle = Fd(fno).to_raw_handle()?;
let file =
std::mem::ManuallyDrop::new(unsafe { std::fs::File::from_raw_handle(handle) });
file.metadata()?
}
};
meta_to_stat(&meta).map(Some)
}
#[cfg(not(windows))]
fn stat_inner(
file: OsPathOrFd,
dir_fd: DirFd<{ STAT_DIR_FD as usize }>,
follow_symlinks: FollowSymlinks,
) -> io::Result<Option<StatStruct>> {
let mut stat = std::mem::MaybeUninit::uninit();
let ret = match file {
OsPathOrFd::Path(path) => {
use rustpython_common::os::ffi::OsStrExt;
let path = path.as_ref().as_os_str().as_bytes();
let path = match ffi::CString::new(path) {
Ok(x) => x,
Err(_) => return Ok(None),
};
#[cfg(not(target_os = "redox"))]
let fstatat_ret = dir_fd.get_opt().map(|dir_fd| {
let flags = if follow_symlinks.0 {
0
} else {
libc::AT_SYMLINK_NOFOLLOW
};
unsafe { libc::fstatat(dir_fd, path.as_ptr(), stat.as_mut_ptr(), flags) }
});
#[cfg(target_os = "redox")]
let ([], fstatat_ret) = (dir_fd.0, None);
fstatat_ret.unwrap_or_else(|| {
if follow_symlinks.0 {
unsafe { libc::stat(path.as_ptr(), stat.as_mut_ptr()) }
} else {
unsafe { libc::lstat(path.as_ptr(), stat.as_mut_ptr()) }
}
})
}
OsPathOrFd::Fd(fd) => unsafe { libc::fstat(fd, stat.as_mut_ptr()) },
};
if ret < 0 {
return Err(io::Error::last_os_error());
}
Ok(Some(unsafe { stat.assume_init() }))
}
#[pyfunction]
#[pyfunction(name = "fstat")]
fn stat(
file: OsPathOrFd,
dir_fd: DirFd<{ STAT_DIR_FD as usize }>,
follow_symlinks: FollowSymlinks,
vm: &VirtualMachine,
) -> PyResult {
let stat = stat_inner(file.clone(), dir_fd, follow_symlinks)
.map_err(|e| IOErrorBuilder::new(e).filename(file).into_pyexception(vm))?
.ok_or_else(|| crate::exceptions::cstring_error(vm))?;
Ok(StatResult::from_stat(&stat, vm).to_pyobject(vm))
}
#[pyfunction]
fn lstat(
file: OsPathOrFd,
dir_fd: DirFd<{ STAT_DIR_FD as usize }>,
vm: &VirtualMachine,
) -> PyResult {
stat(file, dir_fd, FollowSymlinks(false), vm)
}
fn curdir_inner(vm: &VirtualMachine) -> PyResult<PathBuf> {
env::current_dir().map_err(|err| err.into_pyexception(vm))
}
#[pyfunction]
fn getcwd(vm: &VirtualMachine) -> PyResult {
OutputMode::String.process_path(curdir_inner(vm)?, vm)
}
#[pyfunction]
fn getcwdb(vm: &VirtualMachine) -> PyResult {
OutputMode::Bytes.process_path(curdir_inner(vm)?, vm)
}
#[pyfunction]
fn chdir(path: OsPath, vm: &VirtualMachine) -> PyResult<()> {
env::set_current_dir(&path.path)
.map_err(|err| IOErrorBuilder::new(err).filename(path).into_pyexception(vm))
}
#[pyfunction]
fn fspath(path: PyObjectRef, vm: &VirtualMachine) -> PyResult<FsPath> {
FsPath::try_from(path, false, vm)
}
#[pyfunction]
#[pyfunction(name = "replace")]
fn rename(src: OsPath, dst: OsPath, vm: &VirtualMachine) -> PyResult<()> {
fs::rename(&src.path, &dst.path).map_err(|err| {
IOErrorBuilder::new(err)
.filename(src)
.filename2(dst)
.into_pyexception(vm)
})
}
#[pyfunction]
fn getpid(vm: &VirtualMachine) -> PyObjectRef {
let pid = std::process::id();
vm.ctx.new_int(pid).into()
}
#[pyfunction]
fn cpu_count(vm: &VirtualMachine) -> PyObjectRef {
let cpu_count = num_cpus::get();
vm.ctx.new_int(cpu_count).into()
}
#[pyfunction]
fn _exit(code: i32) {
std::process::exit(code)
}
#[pyfunction]
fn abort() {
extern "C" {
fn abort();
}
unsafe { abort() }
}
#[pyfunction]
fn urandom(size: isize, vm: &VirtualMachine) -> PyResult<Vec<u8>> {
if size < 0 {
return Err(vm.new_value_error("negative argument not allowed".to_owned()));
}
let mut buf = vec![0u8; size as usize];
getrandom::getrandom(&mut buf).map_err(|e| match e.raw_os_error() {
Some(errno) => io::Error::from_raw_os_error(errno).into_pyexception(vm),
None => vm.new_os_error("Getting random failed".to_owned()),
})?;
Ok(buf)
}
#[pyfunction]
pub fn isatty(fd: i32) -> bool {
unsafe { suppress_iph!(libc::isatty(fd)) != 0 }
}
#[pyfunction]
pub fn lseek(fd: i32, position: Offset, how: i32, vm: &VirtualMachine) -> PyResult<Offset> {
#[cfg(not(windows))]
let res = unsafe { suppress_iph!(libc::lseek(fd, position, how)) };
#[cfg(windows)]
let res = unsafe {
use winapi::um::{fileapi, winnt};
let handle = Fd(fd).to_raw_handle().map_err(|e| e.into_pyexception(vm))?;
let mut li = winnt::LARGE_INTEGER::default();
*li.QuadPart_mut() = position;
let ret = fileapi::SetFilePointer(
handle,
li.u().LowPart as _,
&mut li.u_mut().HighPart,
how as _,
);
if ret == fileapi::INVALID_SET_FILE_POINTER {
-1
} else {
li.u_mut().LowPart = ret;
*li.QuadPart()
}
};
if res < 0 {
Err(errno_err(vm))
} else {
Ok(res)
}
}
#[pyfunction]
fn link(src: OsPath, dst: OsPath, vm: &VirtualMachine) -> PyResult<()> {
fs::hard_link(&src.path, &dst.path).map_err(|err| {
IOErrorBuilder::new(err)
.filename(src)
.filename2(dst)
.into_pyexception(vm)
})
}
#[derive(FromArgs)]
struct UtimeArgs {
path: OsPath,
#[pyarg(any, default)]
times: Option<PyTupleRef>,
#[pyarg(named, default)]
ns: Option<PyTupleRef>,
#[pyarg(flatten)]
dir_fd: DirFd<{ UTIME_DIR_FD as usize }>,
#[pyarg(flatten)]
follow_symlinks: FollowSymlinks,
}
#[pyfunction]
fn utime(args: UtimeArgs, vm: &VirtualMachine) -> PyResult<()> {
let parse_tup = |tup: &PyTuple| -> Option<(PyObjectRef, PyObjectRef)> {
if tup.len() != 2 {
None
} else {
Some((tup[0].clone(), tup[1].clone()))
}
};
let (acc, modif) = match (args.times, args.ns) {
(Some(t), None) => {
let (a, m) = parse_tup(&t).ok_or_else(|| {
vm.new_type_error(
"utime: 'times' must be either a tuple of two ints or None".to_owned(),
)
})?;
(a.try_into_value(vm)?, m.try_into_value(vm)?)
}
(None, Some(ns)) => {
let (a, m) = parse_tup(&ns).ok_or_else(|| {
vm.new_type_error("utime: 'ns' must be a tuple of two ints".to_owned())
})?;
let ns_in_sec: PyObjectRef = vm.ctx.new_int(1_000_000_000).into();
let ns_to_dur = |obj: PyObjectRef| {
let divmod = vm._divmod(&obj, &ns_in_sec)?;
let (div, rem) =
divmod
.payload::<PyTuple>()
.and_then(parse_tup)
.ok_or_else(|| {
vm.new_type_error(format!(
"{}.__divmod__() must return a 2-tuple, not {}",
obj.class().name(),
divmod.class().name()
))
})?;
let secs = div.try_index(vm)?.try_to_primitive(vm)?;
let ns = rem.try_index(vm)?.try_to_primitive(vm)?;
Ok(Duration::new(secs, ns))
};
// TODO: do validation to make sure this doesn't.. underflow?
(ns_to_dur(a)?, ns_to_dur(m)?)
}
(None, None) => {
let now = SystemTime::now();
let now = now.duration_since(SystemTime::UNIX_EPOCH).unwrap();
(now, now)
}
(Some(_), Some(_)) => {
return Err(vm.new_value_error(
"utime: you may specify either 'times' or 'ns' but not both".to_owned(),
))
}
};
utime_impl(args.path, acc, modif, args.dir_fd, args.follow_symlinks, vm)
}
fn utime_impl(
path: OsPath,
acc: Duration,
modif: Duration,
dir_fd: DirFd<{ UTIME_DIR_FD as usize }>,
_follow_symlinks: FollowSymlinks,
vm: &VirtualMachine,
) -> PyResult<()> {
#[cfg(any(target_os = "wasi", unix))]
{
#[cfg(not(target_os = "redox"))]
{
let path = path.into_cstring(vm)?;
let ts = |d: Duration| libc::timespec {
tv_sec: d.as_secs() as _,
tv_nsec: d.subsec_nanos() as _,
};
let times = [ts(acc), ts(modif)];
let ret = unsafe {
libc::utimensat(
dir_fd.fd().0,
path.as_ptr(),
times.as_ptr(),
if _follow_symlinks.0 {
0
} else {
libc::AT_SYMLINK_NOFOLLOW
},
)
};
if ret < 0 {
Err(errno_err(vm))
} else {
Ok(())
}
}
#[cfg(target_os = "redox")]
{
let [] = dir_fd.0;
let tv = |d: Duration| libc::timeval {
tv_sec: d.as_secs() as _,
tv_usec: d.as_micros() as _,
};
nix::sys::stat::utimes(path.as_ref(), &tv(acc).into(), &tv(modif).into())
.map_err(|err| err.into_pyexception(vm))
}
}
#[cfg(windows)]
{
use std::{fs::OpenOptions, os::windows::prelude::*};
use winapi::{
shared::minwindef::{DWORD, FILETIME},
um::fileapi::SetFileTime,
};
let [] = dir_fd.0;
let ft = |d: Duration| {
let intervals =
((d.as_secs() as i64 + 11644473600) * 10_000_000) + (d.as_nanos() as i64 / 100);
FILETIME {
dwLowDateTime: intervals as DWORD,
dwHighDateTime: (intervals >> 32) as DWORD,
}
};
let acc = ft(acc);
let modif = ft(modif);
let f = OpenOptions::new()
.write(true)
.custom_flags(winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS)
.open(path)
.map_err(|err| err.into_pyexception(vm))?;
let ret =
unsafe { SetFileTime(f.as_raw_handle() as _, std::ptr::null(), &acc, &modif) };
if ret == 0 {
Err(io::Error::last_os_error().into_pyexception(vm))
} else {
Ok(())
}
}
}
#[cfg(all(any(unix, windows), not(target_os = "redox")))]
#[pyattr]
#[pyclass(module = "os", name = "times_result")]
#[derive(Debug, PyStructSequence)]
struct TimesResult {
pub user: f64,
pub system: f64,
pub children_user: f64,
pub children_system: f64,
pub elapsed: f64,
}
#[cfg(all(any(unix, windows), not(target_os = "redox")))]
#[pyclass(with(PyStructSequence))]
impl TimesResult {}
#[cfg(all(any(unix, windows), not(target_os = "redox")))]
#[pyfunction]
fn times(vm: &VirtualMachine) -> PyResult {
#[cfg(windows)]
{
use winapi::shared::minwindef::FILETIME;
use winapi::um::processthreadsapi::{GetCurrentProcess, GetProcessTimes};
let mut _create = FILETIME::default();
let mut _exit = FILETIME::default();
let mut kernel = FILETIME::default();
let mut user = FILETIME::default();
unsafe {
let h_proc = GetCurrentProcess();
GetProcessTimes(h_proc, &mut _create, &mut _exit, &mut kernel, &mut user);
}
let times_result = TimesResult {
user: user.dwHighDateTime as f64 * 429.4967296 + user.dwLowDateTime as f64 * 1e-7,
system: kernel.dwHighDateTime as f64 * 429.4967296
+ kernel.dwLowDateTime as f64 * 1e-7,
children_user: 0.0,
children_system: 0.0,
elapsed: 0.0,
};
Ok(times_result.to_pyobject(vm))
}
#[cfg(unix)]
{
let mut t = libc::tms {
tms_utime: 0,
tms_stime: 0,
tms_cutime: 0,
tms_cstime: 0,
};
let tick_for_second = unsafe { libc::sysconf(libc::_SC_CLK_TCK) } as f64;
let c = unsafe { libc::times(&mut t as *mut _) };
// XXX: The signedness of `clock_t` varies from platform to platform.
if c == (-1i8) as libc::clock_t {
return Err(vm.new_os_error("Fail to get times".to_string()));
}
let times_result = TimesResult {
user: t.tms_utime as f64 / tick_for_second,
system: t.tms_stime as f64 / tick_for_second,
children_user: t.tms_cutime as f64 / tick_for_second,
children_system: t.tms_cstime as f64 / tick_for_second,
elapsed: c as f64 / tick_for_second,
};
Ok(times_result.to_pyobject(vm))
}
}
#[cfg(target_os = "linux")]
#[derive(FromArgs)]
struct CopyFileRangeArgs {
#[pyarg(positional)]
src: i32,
#[pyarg(positional)]
dst: i32,
#[pyarg(positional)]
count: i64,
#[pyarg(any, default)]
offset_src: Option<Offset>,
#[pyarg(any, default)]
offset_dst: Option<Offset>,
}
#[cfg(target_os = "linux")]
#[pyfunction]
fn copy_file_range(args: CopyFileRangeArgs, vm: &VirtualMachine) -> PyResult<usize> {
let p_offset_src = args.offset_src.as_ref().map_or_else(std::ptr::null, |x| x);
let p_offset_dst = args.offset_dst.as_ref().map_or_else(std::ptr::null, |x| x);
let count: usize = args
.count
.try_into()
.map_err(|_| vm.new_value_error("count should >= 0".to_string()))?;
// The flags argument is provided to allow
// for future extensions and currently must be to 0.
let flags = 0u32;
// Safety: p_offset_src and p_offset_dst is a unique pointer for offset_src and offset_dst respectively,
// and will only be freed after this function ends.
//
// Why not use `libc::copy_file_range`: On `musl-libc`, `libc::copy_file_range` is not provided. Therefore
// we use syscalls directly instead.
let ret = unsafe {
libc::syscall(
libc::SYS_copy_file_range,
args.src,
p_offset_src as *mut i64,
args.dst,
p_offset_dst as *mut i64,
count,
flags,
)
};
usize::try_from(ret).map_err(|_| errno_err(vm))
}
#[pyfunction]
fn strerror(e: i32) -> String {
unsafe { ffi::CStr::from_ptr(libc::strerror(e)) }
.to_string_lossy()
.into_owned()
}
#[pyfunction]
pub fn ftruncate(fd: i32, length: Offset, vm: &VirtualMachine) -> PyResult<()> {
Fd(fd).ftruncate(length).map_err(|e| e.into_pyexception(vm))
}
#[pyfunction]
fn truncate(path: PyObjectRef, length: Offset, vm: &VirtualMachine) -> PyResult<()> {
if let Ok(fd) = path.try_to_value(vm) {
return ftruncate(fd, length, vm);
}
let path = OsPath::try_from_object(vm, path)?;
// TODO: just call libc::truncate() on POSIX
let f = OpenOptions::new()
.write(true)
.open(path)
.map_err(|e| e.into_pyexception(vm))?;
f.set_len(length as u64)
.map_err(|e| e.into_pyexception(vm))?;
drop(f);
Ok(())
}
#[cfg(all(unix, not(any(target_os = "redox", target_os = "android"))))]
#[pyfunction]
fn getloadavg(vm: &VirtualMachine) -> PyResult<(f64, f64, f64)> {
let mut loadavg = [0f64; 3];
// Safety: loadavg is on stack and only write by `getloadavg` and are freed
// after this function ends.
unsafe {
if libc::getloadavg(&mut loadavg[0] as *mut f64, 3) != 3 {
return Err(vm.new_os_error("Load averages are unobtainable".to_string()));
}
}
Ok((loadavg[0], loadavg[1], loadavg[2]))
}
#[cfg(any(unix, windows))]
#[pyfunction]
fn waitstatus_to_exitcode(status: i32, vm: &VirtualMachine) -> PyResult<i32> {
let status = u32::try_from(status)
.map_err(|_| vm.new_value_error(format!("invalid WEXITSTATUS: {status}")))?;
cfg_if::cfg_if! {
if #[cfg(not(windows))] {
let status = status as libc::c_int;
if libc::WIFEXITED(status) {
return Ok(libc::WEXITSTATUS(status));
}
if libc::WIFSIGNALED(status) {
return Ok(-libc::WTERMSIG(status));
}
Err(vm.new_value_error(format!("Invalid wait status: {status}")))
} else {
i32::try_from(status.rotate_right(8))
.map_err(|_| vm.new_value_error(format!("invalid wait status: {status}")))
}
}
}
#[pyfunction]
fn device_encoding(fd: i32, _vm: &VirtualMachine) -> PyResult<Option<String>> {
if !isatty(fd) {
return Ok(None);
}
cfg_if::cfg_if! {
if #[cfg(any(target_os = "android", target_os = "redox"))] {
Ok(Some("UTF-8".to_owned()))
} else if #[cfg(windows)] {
let cp = match fd {
0 => unsafe { winapi::um::consoleapi::GetConsoleCP() },
1 | 2 => unsafe { winapi::um::consoleapi::GetConsoleOutputCP() },
_ => 0,
};
Ok(Some(format!("cp{cp}")))
} else {
let encoding = unsafe {
let encoding = libc::nl_langinfo(libc::CODESET);
if encoding.is_null() || encoding.read() == '\0' as libc::c_char {
"UTF-8".to_owned()
} else {
ffi::CStr::from_ptr(encoding).to_string_lossy().into_owned()
}
};
Ok(Some(encoding))
}
}
}
#[pyattr]
#[pyclass(module = "os", name = "terminal_size")]
#[derive(PyStructSequence)]
#[allow(dead_code)]
pub(crate) struct PyTerminalSize {
pub columns: usize,
pub lines: usize,
}
#[pyclass(with(PyStructSequence))]
impl PyTerminalSize {}
#[pyattr]
#[pyclass(module = "os", name = "uname_result")]
#[derive(Debug, PyStructSequence)]
pub(crate) struct UnameResult {
pub sysname: String,
pub nodename: String,
pub release: String,
pub version: String,
pub machine: String,
}
#[pyclass(with(PyStructSequence))]
impl UnameResult {}
pub(super) fn support_funcs() -> Vec<SupportFunc> {
let mut supports = super::platform::module::support_funcs();
supports.extend(vec![
SupportFunc::new("open", Some(false), Some(OPEN_DIR_FD), Some(false)),
SupportFunc::new("access", Some(false), Some(false), None),
SupportFunc::new("chdir", None, Some(false), Some(false)),
// chflags Some, None Some
SupportFunc::new("listdir", Some(LISTDIR_FD), Some(false), Some(false)),
SupportFunc::new("mkdir", Some(false), Some(MKDIR_DIR_FD), Some(false)),
// mkfifo Some Some None
// mknod Some Some None
SupportFunc::new("readlink", Some(false), None, Some(false)),
SupportFunc::new("remove", Some(false), None, Some(false)),
SupportFunc::new("unlink", Some(false), None, Some(false)),
SupportFunc::new("rename", Some(false), None, Some(false)),
SupportFunc::new("replace", Some(false), None, Some(false)), // TODO: Fix replace
SupportFunc::new("rmdir", Some(false), None, Some(false)),
SupportFunc::new("scandir", None, Some(false), Some(false)),
SupportFunc::new("stat", Some(true), Some(STAT_DIR_FD), Some(true)),
SupportFunc::new("fstat", Some(true), Some(STAT_DIR_FD), Some(true)),
SupportFunc::new("symlink", Some(false), Some(SYMLINK_DIR_FD), Some(false)),
SupportFunc::new("truncate", Some(true), Some(false), Some(false)),
SupportFunc::new(
"utime",
Some(false),
Some(UTIME_DIR_FD),
Some(cfg!(all(unix, not(target_os = "redox")))),
),
]);
supports
}
}
pub(crate) use _os::{ftruncate, isatty, lseek};
pub(crate) struct SupportFunc {
name: &'static str,
// realistically, each of these is just a bool of "is this function in the supports_* set".
// However, None marks that the function maybe _should_ support fd/dir_fd/follow_symlinks, but
// we haven't implemented it yet.
fd: Option<bool>,
dir_fd: Option<bool>,
follow_symlinks: Option<bool>,
}
impl SupportFunc {
pub(crate) fn new(
name: &'static str,
fd: Option<bool>,
dir_fd: Option<bool>,
follow_symlinks: Option<bool>,
) -> Self {
Self {
name,
fd,
dir_fd,
follow_symlinks,
}
}
}
pub fn extend_module(vm: &VirtualMachine, module: &Py<PyModule>) {
let support_funcs = _os::support_funcs();
let supports_fd = PySet::default().into_ref(&vm.ctx);
let supports_dir_fd = PySet::default().into_ref(&vm.ctx);
let supports_follow_symlinks = PySet::default().into_ref(&vm.ctx);
for support in support_funcs {
let func_obj = module.get_attr(support.name, vm).unwrap();
if support.fd.unwrap_or(false) {
supports_fd.clone().add(func_obj.clone(), vm).unwrap();
}
if support.dir_fd.unwrap_or(false) {
supports_dir_fd.clone().add(func_obj.clone(), vm).unwrap();
}
if support.follow_symlinks.unwrap_or(false) {
supports_follow_symlinks.clone().add(func_obj, vm).unwrap();
}
}
extend_module!(vm, module, {
"supports_fd" => supports_fd,
"supports_dir_fd" => supports_dir_fd,
"supports_follow_symlinks" => supports_follow_symlinks,
"error" => vm.ctx.exceptions.os_error.to_owned(),
});
}
pub(crate) use _os::os_open as open;
#[cfg(not(windows))]
use super::posix as platform;
#[cfg(windows)]
use super::nt as platform;
pub(crate) use platform::module::MODULE_NAME;
|
/**
* @lc app=leetcode.cn id=60 lang=rust
*
* [60] 第k个排列
*
* https://leetcode-cn.com/problems/permutation-sequence/description/
*
* algorithms
* Medium (45.34%)
* Total Accepted: 6.3K
* Total Submissions: 13.9K
* Testcase Example: '3\n3'
*
* 给出集合 [1,2,3,…,n],其所有元素共有 n! 种排列。
*
* 按大小顺序列出所有排列情况,并一一标记,当 n = 3 时, 所有排列如下:
*
*
* "123"
* "132"
* "213"
* "231"
* "312"
* "321"
*
*
* 给定 n 和 k,返回第 k 个排列。
*
* 说明:
*
*
* 给定 n 的范围是 [1, 9]。
* 给定 k 的范围是[1, n!]。
*
*
* 示例 1:
*
* 输入: n = 3, k = 3
* 输出: "213"
*
*
* 示例 2:
*
* 输入: n = 4, k = 9
* 输出: "2314"
*
*
*/
impl Solution {
pub fn get_permutation(n: i32, mut k: i32) -> String {
fn j(n: i32) -> i32 {
(1..n).fold(1, |a, b| a * b)
}
let mut v: Vec<i32> = (1..=n).collect();
let mut res = vec![];
k -= 1;
while !v.is_empty() {
let jn = j(v.len() as i32);
let idx = k / jn;
let n = v.remove(idx as usize);
res.push(n);
k = k % jn;
}
for n in v {
res.push(n)
}
res.iter()
.map(|it| (*it).to_string())
.collect::<Vec<_>>()
.join("")
}
}
struct Solution {}
fn main() {
fn check(m: i32, k: i32) {
let res = Solution::get_permutation(m, k);
println!("{:?}", res);
}
check(3, 1);
check(3, 2);
check(3, 3);
check(3, 4);
check(3, 5);
check(3, 6);
check(9, 3);
}
|
#[derive(Debug, Serialize, Deserialize)]
pub struct CsvParams {
#[serde(rename = "quoteChar")]
pub quote_char: Option<String>,
#[serde(rename = "escapeChar")]
pub escape_char: Option<String>,
#[serde(rename = "separatorChar")]
pub separator_char: Option<String>,
#[serde(rename = "endOfLine")]
pub end_of_line: Option<String>,
}
|
use crate::backend::c;
use crate::ugid::{Gid, Uid};
#[cfg(not(target_os = "wasi"))]
#[inline]
#[must_use]
pub(crate) fn getuid() -> Uid {
unsafe {
let uid = c::getuid();
Uid::from_raw(uid)
}
}
#[cfg(not(target_os = "wasi"))]
#[inline]
#[must_use]
pub(crate) fn geteuid() -> Uid {
unsafe {
let uid = c::geteuid();
Uid::from_raw(uid)
}
}
#[cfg(not(target_os = "wasi"))]
#[inline]
#[must_use]
pub(crate) fn getgid() -> Gid {
unsafe {
let gid = c::getgid();
Gid::from_raw(gid)
}
}
#[cfg(not(target_os = "wasi"))]
#[inline]
#[must_use]
pub(crate) fn getegid() -> Gid {
unsafe {
let gid = c::getegid();
Gid::from_raw(gid)
}
}
|
use crate::math::vec3::Vec3;
pub struct Mesh {
pub vertices: Vec<Vec3>,
pub indices: Vec<u32>,
pub uvs: Vec<Vec3>,
pub uv_indices: Vec<u32>,
pub normals: Vec<Vec3>,
pub normal_indices: Vec<u32>
}
impl Mesh {
pub fn new(new_vertices: Vec<Vec3>, new_indices: Vec<u32>, new_uvs: Vec<Vec3>, new_uv_indices: Vec<u32>, new_normals: Vec<Vec3>, new_normal_indices: Vec<u32>) -> Mesh {
Mesh {
vertices: new_vertices,
indices: new_indices,
uvs: new_uvs,
uv_indices: new_uv_indices,
normals: new_normals,
normal_indices: new_normal_indices
}
}
} |
use pest::iterators::Pair;
use std::collections::HashMap;
use super::Constant;
use super::Rule;
pub fn parse_number(pair: Pair<Rule>) -> Constant {
match pair.as_rule() {
Rule::float => Constant::Float(pair.into_span().as_str().parse::<f32>().unwrap()),
Rule::integer => Constant::Integer(pair.into_span().as_str().parse::<i32>().unwrap()),
_ => unreachable!()
}
}
pub fn parse_boolean(pair: Pair<Rule>) -> Constant {
match pair.as_rule() {
Rule::k_true => Constant::Boolean(true),
Rule::k_false => Constant::Boolean(false),
_ => unreachable!()
}
}
pub fn parse_string(pair: Pair<Rule>) -> Constant {
Constant::String(String::from(pair.into_inner().nth(0).unwrap().into_span().as_str()))
}
pub fn parse_constant(pair: Pair<Rule>, memory: &HashMap<&str, Constant>) -> Constant {
let const_name = pair.into_span().as_str();
let value = memory.get(const_name);
if value.is_none() {
eprintln!("Constant \"{}\" was not defined!", const_name);
panic!()
}
value.unwrap().clone()
} |
pub fn compute(signal: &Vec<f64>) -> f64 {
let energy = signal
.iter()
.fold(0_f64, |acc, &sample| acc + sample.abs().powi(2));
return energy;
}
#[cfg(test)]
mod tests {
use super::compute;
use crate::utils::test;
use std::f64;
const FLOAT_PRECISION: f64 = 0.000_000_010;
fn test_against(dataset: &test::data::TestDataSet) -> () {
let energy = compute(&dataset.signal);
assert_relative_eq!(
energy,
dataset.features.energy,
epsilon = f64::EPSILON,
max_relative = FLOAT_PRECISION
);
}
#[test]
fn test_energy() {
let datasets = test::data::get_all();
for dataset in datasets.iter() {
test_against(dataset);
}
}
}
|
use crate::prelude::*;
use crate::textures::{Textures, UVCoords};
use crate::sounds::Sounds;
pub struct Graphics {
pub world_texture_program: Program,
pub background_program: Program,
pub textures: Textures,
pub sounds: Sounds,
pub display: Display,
}
impl Graphics {
pub fn new(display: &Display, sounds: Sounds) -> Self {
println!("Compiling texture shader...");
let world_texture_program = Program::from_source(display, TEXTURE_VERTEX_SHADER, TEXTURE_FRAGMENT_SHADER, None).unwrap();
println!("Compiling background shader...");
let background_program = Program::from_source(display, BACKGROUND_VERTEX_SHADER, BACKGROUND_FRAGMENT_SHADER, None).unwrap();
// @Cleanup: Don't unwrap here, silly!
let textures = Textures::load("assets.txt", &display).unwrap();
Graphics {
sounds,
world_texture_program,
background_program,
textures,
display: display.clone(),
}
}
// pub fn draw_texture_immediate(&self, surface: &mut impl Surface, aspect: f32, rect: [f32; 4], texture: TextureId) {
// let uv = self.textures.get_uv(texture);
// let vertices = VertexBuffer::new(&self.display,
// &[TextureVertex {
// position: [rect[0], rect[1], 1.0],
// uv: [uv.left, uv.bottom],
// },
// TextureVertex {
// position: [rect[0], rect[3], 1.0],
// uv: [uv.left, uv.top],
// },
// TextureVertex {
// position: [rect[2], rect[3], 1.0],
// uv: [uv.right, uv.top],
// },
// TextureVertex {
// position: [rect[2], rect[1], 1.0],
// uv: [uv.right, uv.bottom],
// }]
// ).unwrap();
// let indices = IndexBuffer::new(&self.display,
// index::PrimitiveType::TrianglesList,
// &[0, 1, 2, 0, 2, 3u32],
// ).unwrap();
// surface.draw(
// &vertices,
// &indices,
// &self.world_texture_program,
// &uniform! {
// model_transform: [
// [1.0, 0.0, 0.0f32],
// [0.0, 1.0, 0.0f32],
// [0.0, 0.0, 1.0f32],
// ],
// camera_transform: [
// [1.0 / aspect, 0.0, 0.0f32],
// [0.0, 1.0, 0.0f32],
// [0.0, 0.0, 1.0f32],
// ],
// atlas: self.textures.atlas.sampled().magnify_filter(uniforms::MagnifySamplerFilter::Nearest),
// },
// &DrawParameters {
// blend: Blend {
// color: BlendingFunction::Addition {
// source: LinearBlendingFactor::One,
// destination: LinearBlendingFactor::OneMinusSourceAlpha,
// },
// ..Default::default()
// },
// ..Default::default()
// }
// ).unwrap();
// }
pub fn draw_background_immediate(&self, surface: &mut impl Surface, rect: [f32; 4], uv: UVCoords, time: f32) {
let vertices = VertexBuffer::new(&self.display,
&[BackgroundVertex {
position: [rect[0], rect[1], 1.0],
uv: [uv.left, uv.bottom],
},
BackgroundVertex {
position: [rect[0], rect[3], 1.0],
uv: [uv.left, uv.top],
},
BackgroundVertex {
position: [rect[2], rect[3], 1.0],
uv: [uv.right, uv.top],
},
BackgroundVertex {
position: [rect[2], rect[1], 1.0],
uv: [uv.right, uv.bottom],
}]
).unwrap();
let indices = IndexBuffer::new(&self.display,
index::PrimitiveType::TrianglesList,
&[0, 1, 2, 0, 2, 3u32],
).unwrap();
surface.draw(
&vertices,
&indices,
&self.background_program,
&uniform! {
time: time,
},
&DrawParameters {
..Default::default()
}
).unwrap();
}
pub fn push_texture_quad(&self,
vertices: &mut Vec<TextureVertex>,
indices: &mut Vec<u32>,
pos: [f32; 4],
uv: UVCoords,
) {
let vert_index = vertices.len() as u32;
vertices.push(TextureVertex {
position: [pos[0] as f32, pos[1] as f32, 1.0],
uv: [uv.left, uv.bottom, uv.texture],
});
vertices.push(TextureVertex {
position: [pos[0] as f32, pos[1] as f32 + pos[3], 1.0],
uv: [uv.left, uv.top, uv.texture],
});
vertices.push(TextureVertex {
position: [pos[0] as f32 + pos[2], pos[1] as f32 + pos[3], 1.0],
uv: [uv.right, uv.top, uv.texture],
});
vertices.push(TextureVertex {
position: [pos[0] as f32 + pos[2], pos[1] as f32, 1.0],
uv: [uv.right, uv.bottom, uv.texture],
});
indices.push(vert_index);
indices.push(vert_index + 1);
indices.push(vert_index + 2);
indices.push(vert_index);
indices.push(vert_index + 2);
indices.push(vert_index + 3);
}
}
#[derive(Clone, Copy)]
pub struct BackgroundVertex {
pub position: [f32; 3],
pub uv: [f32; 2],
}
implement_vertex!(BackgroundVertex, position, uv);
const BACKGROUND_VERTEX_SHADER: &str = r##"
#version 130
uniform float time;
in vec3 position;
in vec2 uv;
out vec2 out_uv;
out float out_time;
void main() {
out_uv = uv;
out_time = time;
gl_Position = vec4(position, 1.0);
}
"##;
const BACKGROUND_FRAGMENT_SHADER: &str = r##"
#version 130
in float out_time;
in vec2 out_uv;
void main() {
float x = floor(out_uv.x);
float y = floor(out_uv.y);
float real_c = (x + y + 10.0 * sin(out_time * 0.1)) / 32.0;
float colors = 3.0;
float floor_c = floor(real_c * colors) / colors;
float error_c = floor((real_c - floor_c) * 50.0) / 50.0;
float c = floor_c + floor(mod(x * 13.0 - y * 11.0, 1.0 + error_c) * colors) / colors;
gl_FragColor = vec4(c / 70.0 + 0.15, c / 60.0 + 0.18, c / 90.0 + 0.20, 1.0);
}
"##;
#[derive(Clone, Copy)]
pub struct TextureVertex {
pub position: [f32; 3],
pub uv: [f32; 3],
}
implement_vertex!(TextureVertex, position, uv);
const TEXTURE_VERTEX_SHADER: &str = r##"
#version 130
uniform mat3 model_transform;
uniform mat3 camera_transform;
in vec3 position;
in vec3 uv;
out vec3 out_uv;
void main() {
out_uv = uv;
gl_Position = vec4(camera_transform * model_transform * position, 1.0);
}
"##;
const TEXTURE_FRAGMENT_SHADER: &str = r##"
#version 130
uniform sampler2DArray atlas;
in vec3 out_uv;
void main() {
gl_FragColor = texture(atlas, out_uv);
}
"##;
|
mod vertex;
use ecs::{Entity, ECS};
use renderer::{RendererDevice, MeshFlags};
use std::path::Path;
use vertex::Vertex;
const GRID_SIZE: i32 = 10;
const GRID_STEP: f32 = 4.0;
const GRID_HEIGHT: f32 = -20.0;
pub fn load(ecs: &mut ECS) {
let context = ecs.resources.get_mut::<RendererDevice>().unwrap();
let mut vertex_data: Vec<Vertex> = Vec::new();
let h_size = (GRID_SIZE as f32 * GRID_STEP) / 2.0;
let mut cursor = -h_size;
for _ in 0..(GRID_SIZE + 1) {
vertex_data.push(Vertex::new(cursor, GRID_HEIGHT, -h_size, 1.0, 0.0, 0.0));
vertex_data.push(Vertex::new(cursor, GRID_HEIGHT, h_size, 0.0, 1.0, 0.0));
vertex_data.push(Vertex::new(-h_size, GRID_HEIGHT, cursor, 0.0, 0.0, 1.0));
vertex_data.push(Vertex::new(h_size, GRID_HEIGHT, cursor, 1.0, 1.0, 0.0));
vertex_data.push(Vertex::new(cursor, -GRID_HEIGHT, -h_size, 1.0, 0.0, 0.0));
vertex_data.push(Vertex::new(cursor, -GRID_HEIGHT, h_size, 0.0, 1.0, 0.0));
vertex_data.push(Vertex::new(-h_size, -GRID_HEIGHT, cursor, 0.0, 0.0, 1.0));
vertex_data.push(Vertex::new(h_size, -GRID_HEIGHT, cursor, 1.0, 1.0, 0.0));
cursor += GRID_STEP;
}
ecs.add_entity(Entity::new().with(context.new_mesh(
&Path::new("shaders/color.glsl"),
vertex_data,
None,
vec![],
MeshFlags::new().lines_mode().opt(),
)));
}
|
use js_sys::WebAssembly;
use wasm_bindgen::JsCast;
use web_sys::WebGl2RenderingContext as GL;
use web_sys::*;
use crate::render::traits::*;
pub struct Rectangle {
width_: f32,
height_: f32,
indices: js_sys::Uint32Array,
vertices: js_sys::Float32Array,
vao: WebGlVertexArrayObject,
}
impl Rectangle {
pub fn new(gl: &GL, width: f32, height: f32) -> Self {
let indices : [u32; 6] = [
0, 1, 3,
3, 1, 2,
];
let vertices : [f32; 12] = [
0.0, height, 0.0,
0.0, 0.0, 0.0,
width, 0.0, 0.0,
width, height, 0.0,
];
let f_mem = wasm_bindgen::memory().dyn_into::<WebAssembly::Memory>().unwrap().buffer();
let i_mem = wasm_bindgen::memory().dyn_into::<WebAssembly::Memory>().unwrap().buffer();
let indices_location = indices.as_ptr() as u32 / 4;
let vertices_location = vertices.as_ptr() as u32 / 4;
let indices = js_sys::Uint32Array::new(&f_mem)
.subarray(indices_location, indices_location + indices.len() as u32);
let vertices = js_sys::Float32Array::new(&f_mem)
.subarray(vertices_location, vertices_location + vertices.len() as u32);
debug!("New Rectangle!");
let rect = Rectangle { width_: width, height_: height,
indices,
vertices,
vao: gl.create_vertex_array().unwrap() };
rect.bind(gl);
rect.buffer_indices_u32(gl);
rect.buffer_data_f32(gl);
rect
}
pub fn width(&self) -> &f32 {
&self.width_
}
pub fn height(&self) -> &f32 {
&self.height_
}
}
impl Draw for Rectangle {
fn draw(&self, gl: &GL) {
gl.draw_elements_with_i32(GL::TRIANGLES,
self.indices.length() as i32,
GL::UNSIGNED_INT,
0);
}
}
impl Buffer for Rectangle {
fn bind(&self, gl: &GL) {
gl.bind_vertex_array(Some(&self.vao));
}
fn buffer_indices_u32(&self, gl: &GL) {
let id = gl.create_buffer().unwrap();
gl.bind_buffer(GL::ELEMENT_ARRAY_BUFFER, Some(&id));
gl.buffer_data_with_array_buffer_view(
GL::ELEMENT_ARRAY_BUFFER,
&self.indices,
GL::STATIC_DRAW);
}
fn buffer_data_f32(&self, gl: &GL) {
let id = gl.create_buffer().unwrap();
gl.bind_buffer(GL::ARRAY_BUFFER, Some(&id));
gl.buffer_data_with_array_buffer_view(
GL::ARRAY_BUFFER,
&self.vertices,
GL::STATIC_DRAW,
);
gl.vertex_attrib_pointer_with_i32(0, 3, GL::FLOAT, false, 0, 0);
}
} |
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Serialize, Debug, Clone)]
pub struct Mission {
pub id: i32,
pub name: String,
pub description: String,
}
impl Default for Mission {
fn default() -> Self {
Mission {
id: 0,
name: "Unknown".to_string(),
description: "No description found".to_string(),
}
}
}
|
// NOTE we intentionally avoid using the `quote` crate here because it doesn't work with the
// `x86_64-unknown-linux-musl` target.
// NOTE usually the only thing you need to do to test a new math function is to add it to one of the
// macro invocations found in the bottom of this file.
#[macro_use]
extern crate itertools;
extern crate rand;
use std::error::Error;
use std::fmt::Write as _0;
use std::fs::{self, File};
use std::io::Write as _1;
use std::{f32, f64, i16, u16, u32, u64, u8};
use rand::{Rng, SeedableRng, XorShiftRng};
// Number of test cases to generate
const NTESTS: usize = 10_000;
// TODO tweak these functions to generate edge cases (zero, infinity, NaN) more often
fn f32(rng: &mut XorShiftRng) -> f32 {
let sign = if rng.gen_bool(0.5) { 1 << 31 } else { 0 };
let exponent = (rng.gen_range(0, u8::MAX) as u32) << 23;
let mantissa = rng.gen_range(0, u32::MAX) & ((1 << 23) - 1);
f32::from_bits(sign + exponent + mantissa)
}
fn f64(rng: &mut XorShiftRng) -> f64 {
let sign = if rng.gen_bool(0.5) { 1 << 63 } else { 0 };
let exponent = (rng.gen_range(0, u16::MAX) as u64 & ((1 << 11) - 1)) << 52;
let mantissa = rng.gen_range(0, u64::MAX) & ((1 << 52) - 1);
f64::from_bits(sign + exponent + mantissa)
}
const EDGE_CASES32: &[f32] = &[
-0.,
0.,
f32::EPSILON,
f32::INFINITY,
f32::MAX,
f32::MIN,
f32::MIN_POSITIVE,
f32::NAN,
f32::NEG_INFINITY,
];
const EDGE_CASES64: &[f64] = &[
-0.,
0.,
f64::EPSILON,
f64::INFINITY,
f64::MAX,
f64::MIN,
f64::MIN_POSITIVE,
f64::NAN,
f64::NEG_INFINITY,
];
// fn(f32) -> f32
macro_rules! f32_f32 {
($($intr:ident,)*) => {
fn f32_f32(rng: &mut XorShiftRng) -> Result<(), Box<Error>> {
// MUSL C implementation of the function to test
extern "C" {
$(fn $intr(_: f32) -> f32;)*
}
$(
let mut cases = String::new();
// random inputs
for inp in EDGE_CASES32.iter().cloned().chain((0..NTESTS).map(|_| f32(rng))) {
let out = unsafe { $intr(inp) };
let inp = inp.to_bits();
let out = out.to_bits();
write!(cases, "({}, {})", inp, out).unwrap();
cases.push(',');
}
let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?;
write!(f, "
#![deny(warnings)]
extern crate libm;
use std::panic;
#[test]
fn {0}() {{
const CASES: &[(u32, u32)] = &[
{1}
];
for case in CASES {{
let (inp, expected) = *case;
if let Ok(outf) =
panic::catch_unwind(|| libm::{0}(f32::from_bits(inp)))
{{
let outi = outf.to_bits();
if !((outf.is_nan() && f32::from_bits(expected).is_nan())
|| libm::_eqf(outi, expected))
{{
panic!(
\"input: {{}}, output: {{}}, expected: {{}}\",
inp, outi, expected,
);
}}
}} else {{
panic!(
\"input: {{}}, output: PANIC, expected: {{}}\",
inp, expected,
);
}}
}}
}}
",
stringify!($intr),
cases)?;
)*
Ok(())
}
}
}
// fn(f32, f32) -> f32
macro_rules! f32f32_f32 {
($($intr:ident,)*) => {
fn f32f32_f32(rng: &mut XorShiftRng) -> Result<(), Box<Error>> {
extern "C" {
$(fn $intr(_: f32, _: f32) -> f32;)*
}
let mut rng2 = rng.clone();
let mut rng3 = rng.clone();
$(
let mut cases = String::new();
for (i1, i2) in iproduct!(
EDGE_CASES32.iter().cloned(),
EDGE_CASES32.iter().cloned()
).chain(EDGE_CASES32.iter().map(|i1| (*i1, f32(rng))))
.chain(EDGE_CASES32.iter().map(|i2| (f32(&mut rng2), *i2)))
.chain((0..NTESTS).map(|_| (f32(&mut rng3), f32(&mut rng3))))
{
let out = unsafe { $intr(i1, i2) };
let i1 = i1.to_bits();
let i2 = i2.to_bits();
let out = out.to_bits();
write!(cases, "(({}, {}), {})", i1, i2, out).unwrap();
cases.push(',');
}
let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?;
write!(f, "
#![deny(warnings)]
extern crate libm;
use std::panic;
#[test]
fn {0}() {{
const CASES: &[((u32, u32), u32)] = &[
{1}
];
for case in CASES {{
let ((i1, i2), expected) = *case;
if let Ok(outf) = panic::catch_unwind(|| {{
libm::{0}(f32::from_bits(i1), f32::from_bits(i2))
}}) {{
let outi = outf.to_bits();
if !((outf.is_nan() && f32::from_bits(expected).is_nan())
|| libm::_eqf(outi, expected))
{{
panic!(
\"input: {{:?}}, output: {{}}, expected: {{}}\",
(i1, i2),
outi,
expected,
);
}}
}} else {{
panic!(
\"input: {{:?}}, output: PANIC, expected: {{}}\",
(i1, i2),
expected,
);
}}
}}
}}
",
stringify!($intr),
cases)?;
)*
Ok(())
}
};
}
// fn(f32, f32, f32) -> f32
macro_rules! f32f32f32_f32 {
($($intr:ident,)*) => {
fn f32f32f32_f32(rng: &mut XorShiftRng) -> Result<(), Box<Error>> {
extern "C" {
$(fn $intr(_: f32, _: f32, _: f32) -> f32;)*
}
let mut rng2 = rng.clone();
$(
let mut cases = String::new();
for (i1, i2, i3) in iproduct!(
EDGE_CASES32.iter().cloned(),
EDGE_CASES32.iter().cloned(),
EDGE_CASES32.iter().cloned()
).chain(EDGE_CASES32.iter().map(|i1| (*i1, f32(rng), f32(rng))))
.chain((0..NTESTS).map(|_| (f32(&mut rng2), f32(&mut rng2), f32(&mut rng2))))
{
let out = unsafe { $intr(i1, i2, i3) };
let i1 = i1.to_bits();
let i2 = i2.to_bits();
let i3 = i3.to_bits();
let out = out.to_bits();
write!(cases, "(({}, {}, {}), {})", i1, i2, i3, out).unwrap();
cases.push(',');
}
let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?;
write!(f, "
#![deny(warnings)]
extern crate libm;
use std::panic;
#[test]
fn {0}() {{
const CASES: &[((u32, u32, u32), u32)] = &[
{1}
];
for case in CASES {{
let ((i1, i2, i3), expected) = *case;
if let Ok(outf) = panic::catch_unwind(|| {{
libm::{0}(
f32::from_bits(i1),
f32::from_bits(i2),
f32::from_bits(i3),
)
}}) {{
let outi = outf.to_bits();
if !((outf.is_nan() && f32::from_bits(expected).is_nan())
|| libm::_eqf(outi, expected))
{{
panic!(
\"input: {{:?}}, output: {{}}, expected: {{}}\",
(i1, i2, i3),
outi,
expected,
);
}}
}} else {{
panic!(
\"input: {{:?}}, output: PANIC, expected: {{}}\",
(i1, i2, i3),
expected,
);
}}
}}
}}
",
stringify!($intr),
cases)?;
)*
Ok(())
}
};
}
// fn(f32, i32) -> f32
macro_rules! f32i32_f32 {
($($intr:ident,)*) => {
fn f32i32_f32(rng: &mut XorShiftRng) -> Result<(), Box<Error>> {
extern "C" {
$(fn $intr(_: f32, _: i32) -> f32;)*
}
let mut rng2 = rng.clone();
$(
let mut cases = String::new();
for i1 in EDGE_CASES32.iter().cloned().chain((0..NTESTS).map(|_| f32(&mut rng2))) {
let i2 = rng.gen_range(i16::MIN, i16::MAX);
let out = unsafe { $intr(i1, i2 as i32) };
let i1 = i1.to_bits();
let out = out.to_bits();
write!(cases, "(({}, {}), {})", i1, i2, out).unwrap();
cases.push(',');
}
let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?;
write!(f, "
#![deny(warnings)]
extern crate libm;
use std::panic;
#[test]
fn {0}() {{
const CASES: &[((u32, i16), u32)] = &[
{1}
];
for case in CASES {{
let ((i1, i2), expected) = *case;
if let Ok(outf) = panic::catch_unwind(|| {{
libm::{0}(f32::from_bits(i1), i2 as i32)
}}) {{
let outi = outf.to_bits();
if !((outf.is_nan() && f32::from_bits(expected).is_nan())
|| libm::_eqf(outi, expected))
{{
panic!(
\"input: {{:?}}, output: {{}}, expected: {{}}\",
(i1, i2),
outi,
expected,
);
}}
}} else {{
panic!(
\"input: {{:?}}, output: PANIC, expected: {{}}\",
(i1, i2),
expected,
);
}}
}}
}}
",
stringify!($intr),
cases)?;
)*
Ok(())
}
};
}
// fn(f64) -> f64
macro_rules! f64_f64 {
($($intr:ident,)*) => {
fn f64_f64(rng: &mut XorShiftRng) -> Result<(), Box<Error>> {
// MUSL C implementation of the function to test
extern "C" {
$(fn $intr(_: f64) -> f64;)*
}
$(
let mut cases = String::new();
for inp in EDGE_CASES64.iter().cloned().chain((0..NTESTS).map(|_| f64(rng))) {
let out = unsafe { $intr(inp) };
let inp = inp.to_bits();
let out = out.to_bits();
write!(cases, "({}, {})", inp, out).unwrap();
cases.push(',');
}
let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?;
write!(f, "
#![deny(warnings)]
extern crate libm;
use std::panic;
#[test]
fn {0}() {{
const CASES: &[(u64, u64)] = &[
{1}
];
for case in CASES {{
let (inp, expected) = *case;
if let Ok(outf) = panic::catch_unwind(|| {{
libm::{0}(f64::from_bits(inp))
}}) {{
let outi = outf.to_bits();
if !((outf.is_nan() && f64::from_bits(expected).is_nan())
|| libm::_eq(outi, expected))
{{
panic!(
\"input: {{}}, output: {{}}, expected: {{}}\",
inp,
outi,
expected,
);
}}
}} else {{
panic!(
\"input: {{}}, output: PANIC, expected: {{}}\",
inp,
expected,
);
}}
}}
}}
",
stringify!($intr),
cases)?;
)*
Ok(())
}
}
}
// fn(f64, f64) -> f64
macro_rules! f64f64_f64 {
($($intr:ident,)*) => {
fn f64f64_f64(rng: &mut XorShiftRng) -> Result<(), Box<Error>> {
extern "C" {
$(fn $intr(_: f64, _: f64) -> f64;)*
}
let mut rng2 = rng.clone();
let mut rng3 = rng.clone();
$(
let mut cases = String::new();
for (i1, i2) in iproduct!(
EDGE_CASES64.iter().cloned(),
EDGE_CASES64.iter().cloned()
).chain(EDGE_CASES64.iter().map(|i1| (*i1, f64(rng))))
.chain(EDGE_CASES64.iter().map(|i2| (f64(&mut rng2), *i2)))
.chain((0..NTESTS).map(|_| (f64(&mut rng3), f64(&mut rng3))))
{
let out = unsafe { $intr(i1, i2) };
let i1 = i1.to_bits();
let i2 = i2.to_bits();
let out = out.to_bits();
write!(cases, "(({}, {}), {})", i1, i2, out).unwrap();
cases.push(',');
}
let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?;
write!(f, "
#![deny(warnings)]
extern crate libm;
use std::panic;
#[test]
fn {0}() {{
const CASES: &[((u64, u64), u64)] = &[
{1}
];
for case in CASES {{
let ((i1, i2), expected) = *case;
if let Ok(outf) = panic::catch_unwind(|| {{
libm::{0}(f64::from_bits(i1), f64::from_bits(i2))
}}) {{
let outi = outf.to_bits();
if !((outf.is_nan() && f64::from_bits(expected).is_nan()) ||
libm::_eq(outi, expected)) {{
panic!(
\"input: {{:?}}, output: {{}}, expected: {{}}\",
(i1, i2),
outi,
expected,
);
}}
}} else {{
panic!(
\"input: {{:?}}, output: PANIC, expected: {{}}\",
(i1, i2),
expected,
);
}}
}}
}}
",
stringify!($intr),
cases)?;
)*
Ok(())
}
};
}
// fn(f64, f64, f64) -> f64
macro_rules! f64f64f64_f64 {
($($intr:ident,)*) => {
fn f64f64f64_f64(rng: &mut XorShiftRng) -> Result<(), Box<Error>> {
extern "C" {
$(fn $intr(_: f64, _: f64, _: f64) -> f64;)*
}
let mut rng2 = rng.clone();
$(
let mut cases = String::new();
for (i1, i2, i3) in iproduct!(
EDGE_CASES64.iter().cloned(),
EDGE_CASES64.iter().cloned(),
EDGE_CASES64.iter().cloned()
).chain(EDGE_CASES64.iter().map(|i1| (*i1, f64(rng), f64(rng))))
.chain((0..NTESTS).map(|_| (f64(&mut rng2), f64(&mut rng2), f64(&mut rng2))))
{
let out = unsafe { $intr(i1, i2, i3) };
let i1 = i1.to_bits();
let i2 = i2.to_bits();
let i3 = i3.to_bits();
let out = out.to_bits();
write!(cases, "(({}, {}, {}), {})", i1, i2, i3, out).unwrap();
cases.push(',');
}
let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?;
write!(f, "
#![deny(warnings)]
extern crate libm;
use std::panic;
#[test]
fn {0}() {{
const CASES: &[((u64, u64, u64), u64)] = &[
{1}
];
for case in CASES {{
let ((i1, i2, i3), expected) = *case;
if let Ok(outf) = panic::catch_unwind(|| {{
libm::{0}(
f64::from_bits(i1),
f64::from_bits(i2),
f64::from_bits(i3),
)
}}) {{
let outi = outf.to_bits();
if !((outf.is_nan() && f64::from_bits(expected).is_nan())
|| libm::_eq(outi, expected))
{{
panic!(
\"input: {{:?}}, output: {{}}, expected: {{}}\",
(i1, i2, i3),
outi,
expected,
);
}}
}} else {{
panic!(
\"input: {{:?}}, output: PANIC, expected: {{}}\",
(i1, i2, i3),
expected,
);
}}
}}
}}
",
stringify!($intr),
cases)?;
)*
Ok(())
}
};
}
// fn(f64, i32) -> f64
macro_rules! f64i32_f64 {
($($intr:ident,)*) => {
fn f64i32_f64(rng: &mut XorShiftRng) -> Result<(), Box<Error>> {
extern "C" {
$(fn $intr(_: f64, _: i32) -> f64;)*
}
let mut rng2 = rng.clone();
$(
let mut cases = String::new();
for i1 in EDGE_CASES64.iter().cloned().chain((0..NTESTS).map(|_| f64(&mut rng2))) {
let i2 = rng.gen_range(i16::MIN, i16::MAX);
let out = unsafe { $intr(i1, i2 as i32) };
let i1 = i1.to_bits();
let out = out.to_bits();
write!(cases, "(({}, {}), {})", i1, i2, out).unwrap();
cases.push(',');
}
let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?;
write!(f, "
#![deny(warnings)]
extern crate libm;
use std::panic;
#[test]
fn {0}() {{
const CASES: &[((u64, i16), u64)] = &[
{1}
];
for case in CASES {{
let ((i1, i2), expected) = *case;
if let Ok(outf) = panic::catch_unwind(|| {{
libm::{0}(f64::from_bits(i1), i2 as i32)
}}) {{
let outi = outf.to_bits();
if !((outf.is_nan() && f64::from_bits(expected).is_nan()) ||
libm::_eq(outi, expected)) {{
panic!(
\"input: {{:?}}, output: {{}}, expected: {{}}\",
(i1, i2),
outi,
expected,
);
}}
}} else {{
panic!(
\"input: {{:?}}, output: PANIC, expected: {{}}\",
(i1, i2),
expected,
);
}}
}}
}}
",
stringify!($intr),
cases)?;
)*
Ok(())
}
};
}
fn main() -> Result<(), Box<Error>> {
fs::remove_dir_all("tests").ok();
fs::create_dir("tests")?;
let mut rng = XorShiftRng::from_rng(&mut rand::thread_rng())?;
f32_f32(&mut rng)?;
f32f32_f32(&mut rng)?;
f32f32f32_f32(&mut rng)?;
f32i32_f32(&mut rng)?;
f64_f64(&mut rng)?;
f64f64_f64(&mut rng)?;
f64f64f64_f64(&mut rng)?;
f64i32_f64(&mut rng)?;
Ok(())
}
/* Functions to test */
// With signature `fn(f32) -> f32`
f32_f32! {
acosf,
floorf,
truncf,
asinf,
atanf,
cbrtf,
cosf,
ceilf,
coshf,
exp2f,
expf,
expm1f,
log10f,
log1pf,
log2f,
logf,
roundf,
sinf,
sinhf,
tanf,
tanhf,
fabsf,
sqrtf,
}
// With signature `fn(f32, f32) -> f32`
f32f32_f32! {
atan2f,
fdimf,
hypotf,
fmodf,
powf,
}
// With signature `fn(f32, f32, f32) -> f32`
f32f32f32_f32! {
fmaf,
}
// With signature `fn(f32, i32) -> f32`
f32i32_f32! {
scalbnf,
}
// With signature `fn(f64) -> f64`
f64_f64! {
acos,
asin,
atan,
cbrt,
ceil,
cos,
cosh,
exp,
exp2,
expm1,
floor,
log,
log10,
log1p,
log2,
round,
sin,
sinh,
sqrt,
tan,
tanh,
trunc,
fabs,
}
// With signature `fn(f64, f64) -> f64`
f64f64_f64! {
atan2,
fdim,
fmod,
hypot,
pow,
}
// With signature `fn(f64, f64, f64) -> f64`
f64f64f64_f64! {
fma,
}
// With signature `fn(f64, i32) -> f64`
f64i32_f64! {
scalbn,
}
|
use circuit::{CircuitDesc, Output};
pub trait ProtocolDesc {
type VarType;
type CDescType: CircuitDesc;
fn new(party: u32)->Self;
fn get_party(&self)->u32;
fn exec_circuit(&self, circuit: &Self::CDescType)->
Vec<(Output, Self::VarType)>;
}
|
pub mod error;
pub mod tokens; |
use holo_hash::{DnaHash};
use holochain_zome_types::capability::CapSecret;
use holochain_serialized_bytes::prelude::*;
/// Payload to send to remote DNAs that the local DNA wants to authenticate with.
/// Made unauthenticated, to allow subsequent requests to be authed against a CapClaim.
///
#[derive(Debug, Serialize, Deserialize, SerializedBytes)]
pub struct DnaRegistration {
pub remote_dna: DnaHash,
pub permission_id: String,
pub secret: CapSecret,
}
|
use crate::protocol::parts::option_part::{OptionId, OptionPart};
use crate::protocol::parts::option_value::OptionValue;
const VERSION: &str = env!("CARGO_PKG_VERSION");
// An Options part that is used by the client to specify the client version, client
// type, and application name.
pub type ClientContext = OptionPart<ClientContextId>;
impl ClientContext {
pub fn new() -> Self {
let mut cc: Self = Self::default();
cc.insert(
ClientContextId::ClientVersion,
OptionValue::STRING(VERSION.to_string()),
);
let client_type = if cfg!(feature = "async") {
"hdbconnect_async (rust native HANA driver, https://crates.io/crates/hdbconnect_async)"
} else {
"hdbconnect (rust native HANA driver, https://crates.io/crates/hdbconnect)"
}
.to_string();
cc.insert(
ClientContextId::ClientType,
OptionValue::STRING(client_type),
);
cc.insert(
ClientContextId::ClientApplicationProgramm,
OptionValue::STRING(
std::env::args()
.next()
.unwrap_or_else(|| "<unknown>".to_string()),
),
);
cc
}
}
#[derive(Debug, Eq, PartialEq, Hash)]
pub enum ClientContextId {
ClientVersion, // 1 // STRING //
ClientType, // 2 // STRING //
ClientApplicationProgramm, // 3 // STRING //
__Unexpected__(u8),
}
impl OptionId<ClientContextId> for ClientContextId {
fn to_u8(&self) -> u8 {
match *self {
Self::ClientVersion => 1,
Self::ClientType => 2,
Self::ClientApplicationProgramm => 3,
Self::__Unexpected__(val) => val,
}
}
fn from_u8(val: u8) -> Self {
match val {
1 => Self::ClientVersion,
2 => Self::ClientType,
3 => Self::ClientApplicationProgramm,
val => {
warn!("Unsupported value for ClientContextId received: {}", val);
Self::__Unexpected__(val)
}
}
}
fn part_type(&self) -> &'static str {
"ClientContext"
}
}
impl std::fmt::Display for ClientContextId {
fn fmt(&self, w: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
w,
"{}",
match *self {
Self::ClientVersion => "ClientVersion",
Self::ClientType => "ClientType",
Self::ClientApplicationProgramm => "ClientApplicationProgram",
Self::__Unexpected__(val) => unreachable!("illegal value: {}", val),
}
)
}
}
#[cfg(test)]
mod test {
#[test]
fn test_to_string() {
println!("{}", super::ClientContext::new());
}
}
|
extern crate ethereum_types;
extern crate keccak_hash;
use ethereum_types::H256;
use keccak_hash::keccak;
use std::collections::HashMap;
use std::vec::Vec;
pub type Bytes = std::vec::Vec<u8>;
pub struct SparseMerkleTree {
/// the ith element holds all non-default nodes in the tree with height i.
nodes: Vec<HashMap<u64, H256>>,
/// number of edges from root to leaf, where a leaf is either the hash of
/// a LeafItem or H(0).
depth: usize,
/// a hash for each level of the tree, where all leaves = H(0).
default_hashes: Vec<H256>,
}
impl SparseMerkleTree {
pub fn new(leaf_items: Vec<impl LeafItem>) -> Self {
let mut leaves = HashMap::new();
for item in leaf_items {
leaves.insert(item.slot(), item.hash());
}
SmtBuilder::tree(leaves)
}
pub fn proof(&self, leaf_item: impl LeafItem) -> SmtProof {
SmtProver::proof(leaf_item.slot(), &self.nodes)
}
/// Returns true if the given leaf item is part of this merkle tree,
/// using proof to reconstruct the root.
pub fn verify(&self, leaf_item: impl LeafItem, proof: SmtProof) -> bool {
let proof_root =
SmtProver::proof_root(leaf_item.slot(), &self.nodes, &self.default_hashes, proof);
proof_root == self.root()
}
pub fn root(&self) -> H256 {
match self.nodes[self.depth].get(&0) {
Some(root) => root.clone(),
None => self.default_hashes[self.depth].clone(),
}
}
}
/// LeafItem is a type from which a SparseMerkleTree can be constructed out
/// of, e.g., a set of Transactions for a plasma block.
pub trait LeafItem {
fn slot(&self) -> u64;
fn hash(&self) -> H256;
}
struct SmtBuilder;
impl SmtBuilder {
fn tree(leaves: HashMap<u64, H256>) -> SparseMerkleTree {
let depth = 64;
let default_hashes = Self::default_hashes(depth);
let mut nodes = vec![leaves.clone()];
let mut curr_nodes = leaves;
for curr_level in 0..depth {
let mut parent_nodes = HashMap::new();
for (slot, node) in curr_nodes.iter() {
let parent = Self::parent_node(slot, node, &curr_nodes, default_hashes[curr_level]);
if let Some(parent_hash) = parent {
parent_nodes.insert(slot / 2, parent_hash);
}
}
nodes.push(parent_nodes.clone());
curr_nodes = parent_nodes;
}
SparseMerkleTree {
nodes,
depth,
default_hashes,
}
}
/// Returns a vector of hashes of length height, were
/// hash[i] = keccak(hash[i-1], hash[i-1]).
fn default_hashes(depth: usize) -> Vec<H256> {
let mut hashes = vec![keccak(H256::from(0))];
for k in 1..depth + 1 {
let mut input = vec![];
let last = hashes[k - 1];
input.extend(&last[..]);
input.extend(&last[..]);
hashes.push(keccak(input));
}
hashes
}
/// Returns the parent node of the given node in the merkle tree.
/// Slot is the position of the given node at it's level (i.e. is it
/// the first node, second node, etc).
/// Level_nodes are all the nodes at the same level as the given node.
/// Levl_default_hash is the default hash to use a this level.
fn parent_node(
slot: &u64,
node: &H256,
level_nodes: &HashMap<u64, H256>,
level_default_hash: H256,
) -> Option<H256> {
if slot % 2 == 0 {
let sibling_slot = slot + 1;
let parent_hash = match level_nodes.get(&sibling_slot) {
Some(sibling_hash) => {
let mut input = vec![];
input.extend(&node[..]);
input.extend(&sibling_hash[..]);
keccak(input)
}
None => {
let mut input = vec![];
input.extend(&node[..]);
input.extend(&level_default_hash[..]);
keccak(input)
}
};
Some(parent_hash)
} else {
let sibling_slot = slot - 1;
if let None = level_nodes.get(&sibling_slot) {
let mut input = vec![];
input.extend(&level_default_hash[..]);
input.extend(&node[..]);
let parent_hash = keccak(input);
Some(parent_hash)
} else {
// the left sibling is not default and so we'll calculate the
// parent in the first case
None
}
}
}
}
struct SmtProver;
impl SmtProver {
fn proof(slot: u64, nodes: &Vec<HashMap<u64, H256>>) -> SmtProof {
let depth = 64;
let mut is_default_bits = 0;
let mut non_default_hashes = vec![];
let mut slot = slot;
for curr_level in 0..depth {
let sibling_slot = if slot % 2 == 0 { slot + 1 } else { slot - 1 };
if let Some(sibling) = nodes[curr_level].get(&sibling_slot) {
non_default_hashes.extend(&sibling[..]);
is_default_bits |= 1 << curr_level;
}
slot /= 2;
}
SmtProof {
is_default_bits,
non_default_hashes,
}
}
fn proof_root(
slot: u64,
nodes: &Vec<HashMap<u64, H256>>,
default_hashes: &Vec<H256>,
proof: SmtProof,
) -> H256 {
if nodes[0].get(&slot).is_none() {
return H256::from(0);
}
let depth = 64;
let mut proof_bits = proof.is_default_bits;
let non_default_hashes = proof.non_default_hashes;
let mut proof_index = 0;
let mut curr_slot = slot;
let mut root_builder = *nodes[0].get(&slot).unwrap();
for curr_level in 0..depth {
let sibling_hash = if proof_bits % 2 == 0 {
&default_hashes[curr_level][..]
} else {
let non_default_sibling = &non_default_hashes[proof_index..proof_index + 32];
proof_index += 32;
non_default_sibling
};
// hash order changes if the curr node is a left or right child
if curr_slot % 2 == 0 {
let mut input = vec![];
input.extend(&root_builder[..]);
input.extend(&sibling_hash[..]);
root_builder = keccak(input);
} else {
let mut input = vec![];
input.extend(&sibling_hash[..]);
input.extend(&root_builder[..]);
root_builder = keccak(input);
}
proof_bits /= 2;
curr_slot /= 2;
}
root_builder
}
}
pub struct SmtProof {
pub is_default_bits: u64,
pub non_default_hashes: Bytes,
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn loom_build_tests() {
let cases = [
(
vec![],
"6f35419d1da1260bc0f33d52e8f6d73fc5d672c0dca13bb960b4ae1adec17937",
),
(
vec![(
// slot
14414645988802088183 as u64,
// leaf_item hash
"4b114962ecf0d681fa416dc1a6f0255d52d701ab53433297e8962065c9d439bd",
)],
// expected merkle root
"0ed6599c03641e5a20d9688f892278dbb48bbcf8b1ff2c9a0e2b7423af831a83",
),
(
vec![(
14414645988802088183 as u64,
"510a183d5457e0d22951440a273f0d8e28e01d15f750d79fd1b27442299f7220",
)],
"8d0ae4c94eaad54df5489e5f9d62eeb4bf06ff774a00b925e8a52776256e910f",
),
];
for (leaves, expected_root) in cases.iter() {
run_loom_test(leaves, expected_root);
}
}
fn run_loom_test(leaves: &[(u64, &str)], expected_root: &str) {
let tree = SmtBuilder::tree(build_leaves(leaves));
let expected_root = H256::from_str(expected_root).unwrap();
assert_eq!(tree.root(), expected_root);
}
fn build_leaves(leaves: &[(u64, &str)]) -> HashMap<u64, H256> {
let mut leaf_hashes = HashMap::new();
for (slot, leaf) in leaves {
let leaf_hash = H256::from_str(leaf).unwrap();
leaf_hashes.insert(*slot, leaf_hash);
}
leaf_hashes
}
#[test]
fn loom_proof_tests() {
let cases = [
(
// leaves to build tree with
vec![(
14414645988802088183 as u64,
"510a183d5457e0d22951440a273f0d8e28e01d15f750d79fd1b27442299f7220",
)],
// slots to build proofs of and expected outcome
vec![
(14414645988802088183 as u64, true),
(14414645988802088184 as u64, false),
(14414645988802088182 as u64, false),
],
),
(
vec![
(
2 as u64,
"cf04ea8bb4ff94066eb84dd932f9e66d1c9f40d84d5491f5a7735200de010d84",
),
(
600 as u64,
"abcabcabacbc94566eb84dd932f9e66d1c9f40d84d5491f5a7735200de010d84",
),
(
30000 as u64,
"abcaaaaaaaaaaaaaaaaaaaaaaaaaaaaa1c9f40d84d5491f5a7735200de010d84",
),
],
vec![(2 as u64, true), (600 as u64, true), (30000 as u64, true)],
),
];
for (leaves, slot_verifications) in cases.iter() {
run_loom_proof_tests(leaves, slot_verifications);
}
}
fn run_loom_proof_tests(leaves: &[(u64, &str)], slot_verifications: &[(u64, bool)]) {
let tree = SmtBuilder::tree(build_leaves(&leaves));
for (slot, expected_verification) in slot_verifications.iter() {
let proof = SmtProver::proof(*slot, &tree.nodes);
let result = SmtProver::proof_root(*slot, &tree.nodes, &tree.default_hashes, proof);
if *expected_verification {
assert_eq!(result, tree.root());
} else {
assert_eq!(result, H256::from(0));
}
}
}
}
|
fn main() {
let s = String::from("tejas bubane bangalore");
println!("First word position is {}", first_word(&s));
// with slices
println!("First word is {}", first_word_slice(&s));
// array slices
let a = [1, 2, 3, 4, 5];
let first_three: &[i32] = &a[..3];
println!("First 3 are {:?}", first_three);
}
fn first_word(s: &String) -> usize {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' { // b is byte literal syntax
return i
}
}
s.len()
} // but this index is no way related to string which can mutate leaving result index invalid
// slice is reference to portion of string
fn first_word_slice(s: &str) -> &str {
let bytes = s.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &s[..i]
}
}
&s[..]
}
|
use std::thread;
use std::collections::HashMap;
use std::intrinsics::type_name;
use std::mem;
use std::ptr::{self, Unique};
use std::sync::{Arc, Barrier, Mutex};
use std::time::Duration;
use bootstrap::input::ScanCode;
use bootstrap::window::Window;
use bootstrap::window::Message::*;
use bootstrap::time::Timer;
use bs_audio;
use polygon::{Renderer, RendererBuilder};
use singleton::Singleton;
use stopwatch::{Collector, Stopwatch};
use scene::*;
use resource::ResourceManager;
use ecs::*;
use component::*;
use debug_draw::DebugDraw;
pub const TARGET_FRAME_TIME_SECONDS: f32 = 1.0 / 60.0;
pub const TARGET_FRAME_TIME_MS: f32 = TARGET_FRAME_TIME_SECONDS * 1000.0;
static mut INSTANCE: *mut Engine = ptr::null_mut();
pub struct Engine {
renderer: Mutex<Box<Renderer>>,
window: Window,
resource_manager: Box<ResourceManager>,
systems: HashMap<SystemId, Box<System>>,
debug_systems: HashMap<SystemId, Box<System>>,
// TODO: Replace explicit update ordering with something more automatic (e.g. dependency hierarchy).
audio_update: Box<System>,
alarm_update: Box<System>,
collision_update: Box<System>,
scene: Scene,
debug_draw: DebugDraw,
close: bool,
debug_pause: bool,
}
impl Engine {
/// Starts the engine's main loop, blocking until the game shuts down.
///
/// This function starts the engine's internal update loop which handles the details of
/// processing input from the OS, invoking game code, and rendering each frame. This function
/// blocks until the engine recieves a message to being the shutdown process, at which point it
/// will end the update loop and perform any necessary shutdown and cleanup procedures. Once
/// those have completed this function will return.
///
/// Panics if the engine hasn't been created yet.
pub fn start() {
let instance = unsafe {
debug_assert!(!INSTANCE.is_null(), "Cannot retrieve Engine instance because none exists");
&mut *INSTANCE
};
// Run main loop.
instance.main_loop();
// Perform cleanup.
unsafe { Engine::destroy_instance(); }
}
/// Retrieves a reference to current scene.
///
/// Panics if the engine hasn't been created yet.
pub fn scene<'a>() -> &'a Scene {
let instance = Engine::instance();
&instance.scene
}
/// Retrieves a reference to the resource manager.
///
/// TODO: The resource manager should probably be a singleton too since it's already setup to
/// be used through shared references.
pub fn resource_manager<'a>() -> &'a ResourceManager {
let instance = Engine::instance();
&*instance.resource_manager
}
pub fn renderer<F, T>(func: F) -> T
where F: FnOnce(&mut Renderer) -> T,
{
let instance = Engine::instance();
let mut renderer = instance.renderer.lock().expect("Could not acquire lock on renderer mutex");
func(&mut **renderer)
}
pub fn window() -> &'static Window {
&Engine::instance().window
}
fn main_loop(&mut self) {
let timer = Timer::new();
let mut collector = Collector::new().unwrap();
loop {
let _stopwatch = Stopwatch::new("loop");
let start_time = timer.now();
self.update();
self.draw();
if self.close {
println!("shutting down engine");
break;
}
if !cfg!(feature="timing") && timer.elapsed_ms(start_time) > TARGET_FRAME_TIME_MS {
println!(
"WARNING: Missed frame time. Frame time: {}ms, target frame time: {}ms",
timer.elapsed_ms(start_time),
TARGET_FRAME_TIME_MS);
}
// Wait for target frame time.
let mut remaining_time_ms = TARGET_FRAME_TIME_MS - timer.elapsed_ms(start_time);
while remaining_time_ms > 1.0 {
thread::sleep(Duration::from_millis(remaining_time_ms as u64));
remaining_time_ms = TARGET_FRAME_TIME_MS - timer.elapsed_ms(start_time);
}
while remaining_time_ms > 0.0 {
remaining_time_ms = TARGET_FRAME_TIME_MS - timer.elapsed_ms(start_time);
}
// TODO: Don't flip buffers until end of frame time?
};
collector.flush_to_file("stopwatch.csv");
}
fn update(&mut self) {
let _stopwatch = Stopwatch::new("update");
let scene = &mut self.scene;
scene.input.clear();
// TODO: Make this an iterator to simplify this loop.
while let Some(message) = self.window.next_message() {
match message {
Activate => (),
Close => self.close = true,
Destroy => (),
Paint => (),
// Handle inputs.
KeyDown(_)
| KeyUp(_)
| MouseMove(_, _)
| MousePos(_, _)
| MouseButtonPressed(_)
| MouseButtonReleased(_)
| MouseWheel(_) => scene.input.push_input(message),
}
}
// TODO: More efficient handling of debug pause (i.e. something that doesn't have any
// overhead when doing a release build).
if !self.debug_pause || scene.input.key_pressed(ScanCode::F11) {
self.debug_draw.clear_buffer();
self.alarm_update.update(scene, TARGET_FRAME_TIME_SECONDS);
// Update systems.
for (_, system) in self.systems.iter_mut() {
system.update(scene, TARGET_FRAME_TIME_SECONDS);
}
// Update component managers.
scene.update_managers();
}
// Update debug systems always forever.
for (_, system) in self.debug_systems.iter_mut() {
system.update(scene, TARGET_FRAME_TIME_SECONDS);
}
// NOTE: Transform update used to go here.
if !self.debug_pause || scene.input.key_pressed(ScanCode::F11) {
self.collision_update.update(scene, TARGET_FRAME_TIME_SECONDS);
self.audio_update.update(scene, TARGET_FRAME_TIME_SECONDS);
}
if scene.input.key_pressed(ScanCode::F9) {
self.debug_pause = !self.debug_pause;
}
if scene.input.key_pressed(ScanCode::F11) {
self.debug_pause = true;
}
}
#[cfg(not(feature="no-draw"))]
fn draw(&mut self) {
let _stopwatch = Stopwatch::new("draw");
self.renderer
.lock()
.expect("Unable to acquire lock on renderer mutex for drawing")
.draw();
}
#[cfg(feature="no-draw")]
fn draw(&mut self) {}
}
unsafe impl Singleton for Engine {
/// Creates the instance of the singleton.
fn set_instance(engine: Engine) {
assert!(unsafe { INSTANCE.is_null() }, "Cannot create more than one Engine instance");
let boxed_engine = Box::new(engine);
unsafe {
INSTANCE = Box::into_raw(boxed_engine);
}
}
/// Retrieves an immutable reference to the singleton instance.
///
/// This function is unsafe because there is no way of know
fn instance() -> &'static Self {
unsafe {
debug_assert!(!INSTANCE.is_null(), "Cannot retrieve Engine instance because none exists");
&*INSTANCE
}
}
/// Destroys the instance of the singleton.
unsafe fn destroy_instance() {
let ptr = mem::replace(&mut INSTANCE, ptr::null_mut());
Box::from_raw(ptr);
}
}
pub struct EngineBuilder {
systems: HashMap<SystemId, Box<System>>,
debug_systems: HashMap<SystemId, Box<System>>,
managers: ManagerMap,
max_workers: usize,
}
/// A builder for configuring the components and systems registered with the game engine.
///
/// Component managers and systems cannot be changed once the engine has been instantiated so they
/// must be provided all together when the instance is created. `EngineBuilder` provides an
/// interface for gathering all managers and systems to be provided to the engine.
impl EngineBuilder {
/// Creates a new `EngineBuilder` object.
pub fn new() -> EngineBuilder {
let mut builder = EngineBuilder {
systems: HashMap::new(),
debug_systems: HashMap::new(),
managers: ManagerMap::new(),
max_workers: 1,
};
// Register internal component managers.
builder.register_component::<Transform>();
builder.register_component::<Camera>();
builder.register_component::<Light>();
builder.register_component::<Mesh>();
builder.register_component::<AudioSource>();
builder.register_component::<AlarmId>();
builder.register_component::<Collider>();
builder
}
/// Consumes the builder and creates the `Engine` instance.
///
/// No `Engine` object is returned because this method instantiates the engine singleton.
pub fn build(self) {
let engine = {
let window = {
let mut window = unsafe { mem::uninitialized() };
let mut out = unsafe { Unique::new(&mut window as *mut _) };
let barrier = Arc::new(Barrier::new(2));
let barrier_clone = barrier.clone();
thread::spawn(move || {
let mut window = Window::new("gunship game").unwrap();
let mut message_pump = window.message_pump();
// write data out to `window` without dropping the old (uninitialized) value.
unsafe { ptr::write(out.get_mut(), window); }
// Sync with
barrier_clone.wait();
message_pump.run();
});
// Wait until window thread finishe creating the window.
barrier.wait();
window
};
let mut renderer = RendererBuilder::new(&window).build();
let debug_draw = DebugDraw::new(&mut *renderer);
let resource_manager = Box::new(ResourceManager::new());
let audio_source = match bs_audio::init() {
Ok(audio_source) => audio_source,
Err(error) => {
// TODO: Rather than panicking, create a null audio system and keep running.
panic!("Error while initialzing audio subsystem: {}", error)
},
};
Engine {
window: window,
renderer: Mutex::new(renderer),
resource_manager: resource_manager,
systems: self.systems,
debug_systems: self.debug_systems,
audio_update: Box::new(AudioSystem),
alarm_update: Box::new(alarm_update),
collision_update: Box::new(CollisionSystem::new()),
scene: Scene::new(audio_source, self.managers),
debug_draw: debug_draw,
close: false,
debug_pause: false,
}
};
// Init aysnc subsystem.
::async::init();
::async::start_workers(self.max_workers);
Engine::set_instance(engine);
run!(Engine::start());
}
pub fn max_workers(&mut self, workers: usize) -> &mut EngineBuilder {
assert!(workers > 0, "There must be at least one worker for the engine to run");
self.max_workers = workers;
self
}
/// Registers the manager for the specified component type.
///
/// Defers internally to `register_manager()`.
pub fn register_component<T: Component>(&mut self) -> &mut EngineBuilder {
T::Manager::register(self);
self
}
/// Registers the specified manager with the engine.
///
/// Defers internally to `ComponentManager::register()`.
pub fn register_manager<T: ComponentManager>(&mut self, manager: T) -> &mut EngineBuilder {
let manager_id = ManagerId::of::<T>();
assert!(
!self.managers.contains_key(&manager_id),
"Manager {} with ID {:?} already registered", unsafe { type_name::<T>() }, &manager_id);
// Box the manager as a trait object to construct the data and vtable pointers.
let boxed_manager = Box::new(manager);
// Add the manager to the type map and the component id to the component map.
self.managers.insert(manager_id, boxed_manager);
self
}
/// Registers the system with the engine.
pub fn register_system<T: System>(&mut self, system: T) -> &mut EngineBuilder {
let system_id = SystemId::of::<T>();
assert!(
!self.systems.contains_key(&system_id),
"System {} with ID {:?} already registered", unsafe { type_name::<T>() }, &system_id);
self.systems.insert(system_id, Box::new(system));
self
}
/// Registers the debug system with the engine.
pub fn register_debug_system<T: System>(&mut self, system: T) -> &mut EngineBuilder {
let system_id = SystemId::of::<T>();
assert!(
!self.debug_systems.contains_key(&system_id),
"System {} with ID {:?} already registered", unsafe { type_name::<T>() }, &system_id);
self.debug_systems.insert(system_id, Box::new(system));
self
}
}
|
mod options;
use std::collections::VecDeque;
use std::convert::TryInto;
use std::mem;
use std::sync::Arc;
use num_bigint::{BigInt, Sign};
use liblumen_alloc::erts::process::Process;
use liblumen_alloc::erts::term::closure::{Creator, Definition};
use liblumen_alloc::erts::term::prelude::*;
use liblumen_alloc::erts::Node;
use crate::runtime::distribution::nodes::node::{self, arc_node};
use crate::runtime::distribution::external_term_format::{version, Tag};
use options::*;
pub fn term_to_binary(process: &Process, term: Term, options: Options) -> Term {
let byte_vec = term_to_byte_vec(process, &options, term);
process.binary_from_bytes(&byte_vec)
}
// Private
// TODO implement creation rotation
// > A 32-bit big endian unsigned integer. All identifiers originating from the same node
// > incarnation must have identical Creation values. This makes it possible to separate identifiers
// > from old (crashed) nodes from a new one. The value zero should be avoided for normal operations
// > as it is used as a wild card for debug purpose (like a pid returned by erlang:list_to_pid/1).
const CREATION: u8 = 0;
const NEWER_REFERENCE_EXT_MAX_U32_LEN: usize = 3;
const SMALL_INTEGER_EXT_MIN: isize = std::u8::MIN as isize;
const SMALL_INTEGER_EXT_MAX: isize = std::u8::MAX as isize;
const INTEGER_EXT_MIN: isize = std::i32::MIN as isize;
const INTEGER_EXT_MAX: isize = std::i32::MAX as isize;
const SMALL_TUPLE_EXT_MAX_LEN: usize = std::u8::MAX as usize;
const STRING_EXT_MAX_LEN: usize = std::u16::MAX as usize;
const SMALL_BIG_EXT_MAX_LEN: usize = std::u8::MAX as usize;
const SMALL_ATOM_UTF8_EXT_MAX_LEN: usize = std::u8::MAX as usize;
fn append_big_int(byte_vec: &mut Vec<u8>, big_int: &BigInt) {
let (sign, mut little_endian_bytes) = big_int.to_bytes_le();
let sign_byte: u8 = match sign {
Sign::Minus => 1,
_ => 0,
};
let len_usize = little_endian_bytes.len();
if len_usize <= SMALL_BIG_EXT_MAX_LEN {
push_tag(byte_vec, Tag::SmallBig);
byte_vec.push(len_usize as u8);
} else {
push_tag(byte_vec, Tag::LargeBig);
append_usize_as_u32(byte_vec, len_usize);
}
byte_vec.push(sign_byte);
byte_vec.append(&mut little_endian_bytes);
}
fn append_binary_bytes(byte_vec: &mut Vec<u8>, binary_bytes: &[u8]) {
byte_vec.extend_from_slice(binary_bytes)
}
fn append_creator(byte_vec: &mut Vec<u8>, creator: &Creator) {
match creator {
Creator::Local(pid) => append_pid(
byte_vec,
node::arc_node(),
pid.number() as u32,
pid.serial() as u32,
),
Creator::External(external_pid) => append_pid(
byte_vec,
external_pid.arc_node(),
external_pid.number() as u32,
external_pid.serial() as u32,
),
}
}
fn append_pid(byte_vec: &mut Vec<u8>, arc_node: Arc<Node>, id: u32, serial: u32) {
let creation = arc_node.creation();
let tag = if creation <= (std::u8::MAX as u32) {
Tag::PID
} else {
Tag::NewPID
};
push_tag(byte_vec, tag);
byte_vec.extend_from_slice(&atom_to_byte_vec(arc_node.name()));
byte_vec.extend_from_slice(&id.to_be_bytes());
byte_vec.extend_from_slice(&serial.to_be_bytes());
if creation <= (std::u8::MAX as u32) {
byte_vec.push(creation as u8);
} else {
byte_vec.extend_from_slice(&creation.to_be_bytes());
};
}
fn append_usize_as_u16(byte_vec: &mut Vec<u8>, len_usize: usize) {
assert!(len_usize <= (std::u16::MAX as usize));
let len_u16 = len_usize as u16;
byte_vec.extend_from_slice(&len_u16.to_be_bytes());
}
fn append_usize_as_u32(byte_vec: &mut Vec<u8>, len_usize: usize) {
assert!(len_usize <= (std::u32::MAX as usize));
let len_u32 = len_usize as u32;
byte_vec.extend_from_slice(&len_u32.to_be_bytes());
}
fn atom_to_byte_vec(atom: Atom) -> Vec<u8> {
let bytes = atom.name().as_bytes();
let len_usize = bytes.len();
let mut byte_vec: Vec<u8> = Vec::new();
if bytes.iter().all(|byte| byte.is_ascii()) {
push_tag(&mut byte_vec, Tag::Atom);
append_usize_as_u16(&mut byte_vec, len_usize);
} else if len_usize <= SMALL_ATOM_UTF8_EXT_MAX_LEN {
push_tag(&mut byte_vec, Tag::SmallAtomUTF8);
let len_u8 = len_usize as u8;
byte_vec.push(len_u8);
} else {
push_tag(&mut byte_vec, Tag::AtomUTF8);
append_usize_as_u16(&mut byte_vec, len_usize);
}
byte_vec.extend_from_slice(bytes);
byte_vec
}
// Tail is the final tail of the list; it is NIL_EXT for a proper list, but can be any type if the
// list is improper (for example, [a|b]).
// -- http://erlang.org/doc/apps/erts/erl_ext_dist.html#list_ext
fn cons_to_element_vec_tail(cons: &Cons) -> (Vec<Term>, Term) {
let mut element_vec: Vec<Term> = Vec::new();
let mut tail = Term::NIL;
for result in cons.into_iter() {
match result {
Ok(element) => element_vec.push(element),
Err(ImproperList {
tail: improper_list_tail,
}) => tail = improper_list_tail,
}
}
(element_vec, tail)
}
fn push_tag(byte_vec: &mut Vec<u8>, tag: Tag) {
byte_vec.push(tag.into());
}
fn term_to_byte_vec(process: &Process, options: &Options, term: Term) -> Vec<u8> {
let mut stack = VecDeque::new();
stack.push_front(term);
let mut byte_vec: Vec<u8> = vec![version::NUMBER];
while let Some(front_term) = stack.pop_front() {
match front_term.decode().unwrap() {
TypedTerm::Atom(atom) => {
byte_vec.extend_from_slice(&atom_to_byte_vec(atom));
}
TypedTerm::List(cons) => {
match try_cons_to_string_ext_byte_vec(&cons) {
Ok(mut string_ext_byte_vec) => byte_vec.append(&mut string_ext_byte_vec),
Err(_) => {
push_tag(&mut byte_vec, Tag::List);
let (element_vec, tail) = cons_to_element_vec_tail(&cons);
let len_usize = element_vec.len();
append_usize_as_u32(&mut byte_vec, len_usize);
stack.push_front(tail);
for element in element_vec.into_iter().rev() {
stack.push_front(element)
}
}
};
}
TypedTerm::Nil => {
push_tag(&mut byte_vec, Tag::Nil);
}
TypedTerm::Pid(pid) => {
append_pid(
&mut byte_vec,
arc_node(),
pid.number() as u32,
pid.serial() as u32,
);
}
TypedTerm::SmallInteger(small_integer) => {
let small_integer_isize: isize = small_integer.into();
match try_append_isize_as_small_integer_or_integer(
&mut byte_vec,
small_integer_isize,
) {
Ok(()) => (),
Err(_) => {
let small_integer_i64 = small_integer_isize as i64;
// convert to big int, so that the number of bytes is minimum instead of
// jumping to 8 to hold i64.
let small_integer_big_int: BigInt = small_integer_i64.into();
append_big_int(&mut byte_vec, &small_integer_big_int);
}
}
}
TypedTerm::BigInteger(big_integer) => {
let big_int: &BigInt = big_integer.as_ref().into();
append_big_int(&mut byte_vec, big_int);
}
TypedTerm::Float(float) => {
let float_f64: f64 = float.into();
push_tag(&mut byte_vec, Tag::NewFloat);
byte_vec.extend_from_slice(&float_f64.to_be_bytes());
}
TypedTerm::Closure(closure) => {
match closure.definition() {
Definition::Export { function } => {
push_tag(&mut byte_vec, Tag::Export);
byte_vec.append(&mut atom_to_byte_vec(closure.module()));
byte_vec.append(&mut atom_to_byte_vec(*function));
try_append_isize_as_small_integer_or_integer(
&mut byte_vec,
closure.arity() as isize,
)
.unwrap();
}
Definition::Anonymous {
index,
old_unique,
unique,
//creator,
} => {
let default_creator = Creator::Local(Pid::default());
let mut sized_byte_vec: Vec<u8> = Vec::new();
let module_function_arity = closure.module_function_arity();
sized_byte_vec.push(module_function_arity.arity);
sized_byte_vec.extend_from_slice(unique);
sized_byte_vec.extend_from_slice(&index.to_be_bytes());
let env_len_u32: u32 = closure.env_len().try_into().unwrap();
sized_byte_vec.extend_from_slice(&env_len_u32.to_be_bytes());
sized_byte_vec.append(&mut atom_to_byte_vec(module_function_arity.module));
// > [index] encoded using SMALL_INTEGER_EXT or INTEGER_EXT.
try_append_isize_as_small_integer_or_integer(
&mut sized_byte_vec,
(*index).try_into().unwrap(),
)
.unwrap();
// > An integer encoded using SMALL_INTEGER_EXT or INTEGER_EXT
// But this means OldUniq can't be the same a Uniq with a different
// encoding,
try_append_isize_as_small_integer_or_integer(
&mut sized_byte_vec,
(*old_unique).try_into().unwrap(),
)
.unwrap();
append_creator(&mut sized_byte_vec, &default_creator);
for term in closure.env_slice() {
sized_byte_vec.append(&mut term_to_byte_vec(process, options, *term));
}
const SIZE_BYTE_LEN: usize = mem::size_of::<u32>();
let size = (SIZE_BYTE_LEN + sized_byte_vec.len()) as u32;
push_tag(&mut byte_vec, Tag::NewFunction);
byte_vec.extend_from_slice(&size.to_be_bytes());
byte_vec.append(&mut sized_byte_vec);
}
}
}
TypedTerm::ExternalPid(external_pid) => {
append_pid(
&mut byte_vec,
external_pid.arc_node(),
external_pid.number() as u32,
external_pid.serial() as u32,
);
}
TypedTerm::Map(map) => {
push_tag(&mut byte_vec, Tag::Map);
let len_usize = map.len();
append_usize_as_u32(&mut byte_vec, len_usize);
for (key, value) in map.iter() {
stack.push_front(*value);
stack.push_front(*key);
}
}
TypedTerm::HeapBinary(heap_bin) => {
push_tag(&mut byte_vec, Tag::Binary);
let len_usize = heap_bin.full_byte_len();
append_usize_as_u32(&mut byte_vec, len_usize);
byte_vec.extend_from_slice(heap_bin.as_bytes());
}
TypedTerm::MatchContext(match_context) => {
if match_context.is_binary() {
if match_context.is_aligned() {
append_binary_bytes(&mut byte_vec, unsafe {
match_context.as_bytes_unchecked()
});
} else {
unimplemented!()
}
} else {
unimplemented!()
}
}
TypedTerm::ProcBin(proc_bin) => {
push_tag(&mut byte_vec, Tag::Binary);
let len_usize = proc_bin.full_byte_len();
append_usize_as_u32(&mut byte_vec, len_usize);
byte_vec.extend_from_slice(proc_bin.as_bytes());
}
TypedTerm::Reference(reference) => {
let scheduler_id_u32: u32 = reference.scheduler_id().into();
let number: u64 = reference.number().into();
push_tag(&mut byte_vec, Tag::NewerReference);
let u32_byte_len = mem::size_of::<u32>();
let len_usize = (mem::size_of::<u32>() + mem::size_of::<u64>()) / u32_byte_len;
// > Len - A 16-bit big endian unsigned integer not larger than 3.
assert!(len_usize <= NEWER_REFERENCE_EXT_MAX_U32_LEN);
append_usize_as_u16(&mut byte_vec, len_usize);
byte_vec.extend_from_slice(&atom_to_byte_vec(node::atom()));
let creation_u32 = CREATION as u32;
byte_vec.extend_from_slice(&creation_u32.to_be_bytes());
byte_vec.extend_from_slice(&scheduler_id_u32.to_be_bytes());
byte_vec.extend_from_slice(&number.to_be_bytes());
}
TypedTerm::SubBinary(subbinary) => {
if subbinary.is_binary() {
push_tag(&mut byte_vec, Tag::Binary);
let len_usize = subbinary.full_byte_len();
append_usize_as_u32(&mut byte_vec, len_usize);
if subbinary.is_aligned() {
byte_vec.extend_from_slice(unsafe { subbinary.as_bytes_unchecked() });
} else {
byte_vec.extend(subbinary.full_byte_iter());
}
} else {
push_tag(&mut byte_vec, Tag::BitBinary);
let len_usize = subbinary.total_byte_len();
append_usize_as_u32(&mut byte_vec, len_usize);
let bits_u8 = subbinary.partial_byte_bit_len();
byte_vec.push(bits_u8);
if subbinary.is_aligned() {
byte_vec.extend_from_slice(unsafe { subbinary.as_bytes_unchecked() });
} else {
byte_vec.extend(subbinary.full_byte_iter());
}
let mut last_byte: u8 = 0;
for (index, bit) in subbinary.partial_byte_bit_iter().enumerate() {
last_byte |= bit << (7 - index);
}
byte_vec.push(last_byte);
}
}
TypedTerm::Tuple(tuple) => {
let len_usize = tuple.len();
if len_usize <= SMALL_TUPLE_EXT_MAX_LEN {
push_tag(&mut byte_vec, Tag::SmallTuple);
byte_vec.push(len_usize as u8);
} else {
push_tag(&mut byte_vec, Tag::LargeTuple);
append_usize_as_u32(&mut byte_vec, len_usize);
}
for element in tuple.iter().rev() {
stack.push_front(*element);
}
}
_ => unimplemented!("term_to_binary({:?})", front_term),
};
}
byte_vec
}
fn try_append_isize_as_small_integer_or_integer(
mut byte_vec: &mut Vec<u8>,
integer: isize,
) -> Result<(), TypeError> {
if SMALL_INTEGER_EXT_MIN <= integer && integer <= SMALL_INTEGER_EXT_MAX {
let integer_u8: u8 = integer as u8;
push_tag(&mut byte_vec, Tag::SmallInteger);
byte_vec.extend_from_slice(&integer_u8.to_be_bytes());
Ok(())
} else if INTEGER_EXT_MIN <= integer && integer <= INTEGER_EXT_MAX {
let small_integer_i32: i32 = integer as i32;
push_tag(&mut byte_vec, Tag::Integer);
byte_vec.extend_from_slice(&small_integer_i32.to_be_bytes());
Ok(())
} else {
Err(TypeError)
}
}
fn try_cons_to_string_ext_byte_vec(cons: &Cons) -> Result<Vec<u8>, TypeError> {
let mut character_byte_vec: Vec<u8> = Vec::new();
// STRING_EXT is used (https://github.com/erlang/otp/blob/e6a69b021bc2aee6aca42bd72583a96d06f4ba9d/erts/emulator/beam/external.c#L2893)
// only after checking `is_external_string` (https://github.com/erlang/otp/blob/e6a69b021bc2aee6aca42bd72583a96d06f4ba9d/erts/emulator/beam/external.c#L2892).
// `is_external_string` only checks if the element is an integer between 0 and 255. It does not
// care about printability. (https://github.com/erlang/otp/blob/e6a69b021bc2aee6aca42bd72583a96d06f4ba9d/erts/emulator/beam/external.c#L3164-L3191)
for (index, result) in cons.into_iter().enumerate() {
if index < STRING_EXT_MAX_LEN {
match result {
Ok(element) => {
let character_byte: u8 = element.try_into().map_err(|_| TypeError)?;
character_byte_vec.push(character_byte);
}
Err(_) => return Err(TypeError),
}
} else {
return Err(TypeError);
}
}
let mut byte_vec = vec![Tag::String.into()];
let len_usize = character_byte_vec.len();
append_usize_as_u16(&mut byte_vec, len_usize);
byte_vec.extend_from_slice(&character_byte_vec);
Ok(byte_vec)
}
|
use crate::types::SimulationEnvironment;
use crate::utilities::zeros1d;
pub fn phase_1(time: f64, y: &Vec<f64>, simulation_environment: SimulationEnvironment) -> Vec<f64> {
let ret = zeros1d(y.len() as u32);
let m11 = -1.0 * simulation_environment.projectile.proj_mass * simulation_environment.trebuchet.l_arm_lo.powi(2)
} |
pub struct Solution;
impl Solution {
pub fn integer_replacement(n: i32) -> i32 {
if n == std::i32::MAX {
32
} else {
Solver::new().solve(n)
}
}
}
use std::collections::HashMap;
#[derive(Default)]
struct Solver {
memo: HashMap<i32, i32>,
}
impl Solver {
fn new() -> Self {
Self::default()
}
fn solve(&mut self, n: i32) -> i32 {
if n == 1 {
0
} else if let Some(&k) = self.memo.get(&n) {
k
} else {
let k = if n & 1 == 0 {
self.solve(n >> 1)
} else {
self.solve(n - 1).min(self.solve(n + 1))
} + 1;
self.memo.insert(n, k);
k
}
}
}
#[test]
fn test0397() {
fn case(n: i32, want: i32) {
let got = Solution::integer_replacement(n);
assert_eq!(got, want);
}
case(8, 3);
case(7, 4);
case(2147483647, 32);
}
|
// #[macro_use]
// extern crate serde_derive;
// pub mod algorithm;
pub mod clustering;
pub mod drawing;
pub mod edge_bundling;
pub mod graph;
// pub mod grouping;
pub mod layout;
pub mod quality_metrics;
pub mod rng;
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! This module contains the core algorithm for `WorkScheduler`, a component manager subsytem for
//! dispatching batches of work.
//!
//! The subsystem's interface consists of three FIDL prototocols::
//!
//! * `fuchsia.sys2.WorkScheduler`: A framework service for scheduling and canceling work.
//! * `fuchsia.sys2.Worker`: A service that `WorkScheduler` clients expose to the framework to be
//! notified when work units are dispatched.
//! * `fuchsia.sys2.WorkSchedulerControl`: A built-in service for controlling the period between
//! wakeup, batch, and dispatch cycles.
use {
crate::{
capability::*,
model::{error::ModelError, hooks::*, Realm},
work_scheduler::{dispatcher::Dispatcher, work_item::WorkItem},
},
cm_rust::{CapabilityPath, ExposeDecl, ExposeTarget},
failure::{format_err, Error},
fidl::endpoints::ServerEnd,
fidl_fuchsia_sys2 as fsys,
fuchsia_async::{self as fasync, Time, Timer},
fuchsia_zircon as zx,
futures::{
future::{AbortHandle, Abortable, BoxFuture},
lock::Mutex,
TryStreamExt,
},
lazy_static::lazy_static,
log::warn,
std::{convert::TryInto, sync::Arc},
};
lazy_static! {
pub static ref WORKER_CAPABILITY_PATH: CapabilityPath =
"/svc/fuchsia.sys2.Worker".try_into().unwrap();
pub static ref WORK_SCHEDULER_CAPABILITY_PATH: CapabilityPath =
"/svc/fuchsia.sys2.WorkScheduler".try_into().unwrap();
pub static ref WORK_SCHEDULER_CONTROL_CAPABILITY_PATH: CapabilityPath =
"/svc/fuchsia.sys2.WorkSchedulerControl".try_into().unwrap();
}
/// A self-managed timer instantiated by `WorkScheduler` to implement the "wakeup" part of its
/// wakeup, batch, and dispatch cycles.
struct WorkSchedulerTimer {
/// Next absolute monotonic time when a timeout should be triggered to wakeup, batch, and
/// dispatch work.
next_timeout_monotonic: i64,
/// The handle used to abort the next wakeup, batch, and dispatch cycle if it needs to be
/// replaced by a different timer to be woken up at a different time.
abort_handle: AbortHandle,
}
impl WorkSchedulerTimer {
/// Construct a new timer that will fire at monotonic time `next_timeout_monotonic`. When the
/// the timer fires, if it was not aborted, it will invoke `work_scheduler.dispatch_work()`.
fn new(next_timeout_monotonic: i64, work_scheduler: WorkScheduler) -> Self {
let (abort_handle, abort_registration) = AbortHandle::new_pair();
let future = Abortable::new(
Timer::new(Time::from_nanos(next_timeout_monotonic)),
abort_registration,
);
fasync::spawn(async move {
// Dispatch work only when abortable was not aborted.
if future.await.is_ok() {
work_scheduler.dispatch_work().await;
}
});
WorkSchedulerTimer { next_timeout_monotonic, abort_handle }
}
}
/// Automatically cancel a timer that is dropped by `WorkScheduler`. This allows `WorkScheduler` to
/// use patterns like:
///
/// WorkScheduler.timer = Some(WorkSchedulerTimer::new(deadline, self.clone()))
///
/// and expect any timer previously stored in `WorkScheduler.timer` to be aborted as a part of the
/// operation.
impl Drop for WorkSchedulerTimer {
fn drop(&mut self) {
self.abort_handle.abort();
}
}
/// State maintained by a `WorkScheduler`, kept consistent via a single `Mutex`.
struct WorkSchedulerState {
/// Scheduled work items that have not been dispatched.
work_items: Vec<WorkItem>,
/// Period between wakeup, batch, dispatch cycles. Set to `None` when dispatching work is
/// disabled.
batch_period: Option<i64>,
/// Current timer for next wakeup, batch, dispatch cycle, if any.
timer: Option<WorkSchedulerTimer>,
}
impl WorkSchedulerState {
pub fn new() -> Self {
WorkSchedulerState { work_items: Vec::new(), batch_period: None, timer: None }
}
fn set_timer(&mut self, next_monotonic_deadline: i64, work_scheduler: WorkScheduler) {
self.timer = Some(WorkSchedulerTimer::new(next_monotonic_deadline, work_scheduler));
}
}
/// Provides a common facility for scheduling canceling work. Each component instance manages its
/// work items in isolation from each other, but the `WorkScheduler` maintains a collection of all
/// items to make global scheduling decisions.
#[derive(Clone)]
pub struct WorkScheduler {
inner: Arc<WorkSchedulerInner>,
}
impl WorkScheduler {
pub fn new() -> Self {
Self { inner: Arc::new(WorkSchedulerInner::new()) }
}
pub fn hooks(&self) -> Vec<HookRegistration> {
vec![
HookRegistration {
event_type: EventType::RouteBuiltinCapability,
callback: self.inner.clone(),
},
HookRegistration {
event_type: EventType::RouteFrameworkCapability,
callback: self.inner.clone(),
},
]
}
pub async fn schedule_work(
&self,
realm: Arc<Realm>,
work_id: &str,
work_request: &fsys::WorkRequest,
) -> Result<(), fsys::Error> {
let mut state = self.inner.state.lock().await;
self.schedule_work_request(&mut *state, realm, work_id, work_request)
}
fn schedule_work_request(
&self,
state: &mut WorkSchedulerState,
dispatcher: Arc<dyn Dispatcher>,
work_id: &str,
work_request: &fsys::WorkRequest,
) -> Result<(), fsys::Error> {
let work_items = &mut state.work_items;
let work_item = WorkItem::try_new(dispatcher, work_id, work_request)?;
if work_items.contains(&work_item) {
return Err(fsys::Error::InstanceAlreadyExists);
}
work_items.push(work_item);
work_items.sort_by(WorkItem::deadline_order);
self.update_timeout(&mut *state);
Ok(())
}
pub async fn cancel_work(&self, realm: Arc<Realm>, work_id: &str) -> Result<(), fsys::Error> {
let mut state = self.inner.state.lock().await;
self.cancel_work_item(&mut *state, realm, work_id)
}
fn cancel_work_item(
&self,
state: &mut WorkSchedulerState,
dispatcher: Arc<dyn Dispatcher>,
work_id: &str,
) -> Result<(), fsys::Error> {
let work_items = &mut state.work_items;
let work_item = WorkItem::new_by_identity(dispatcher, work_id);
// TODO(markdittmer): Use `work_items.remove_item(work_item)` if/when it becomes stable.
let mut found = false;
work_items.retain(|item| {
let matches = &work_item == item;
found = found || matches;
!matches
});
if !found {
return Err(fsys::Error::InstanceNotFound);
}
self.update_timeout(&mut *state);
Ok(())
}
pub async fn get_batch_period(&self) -> Result<i64, fsys::Error> {
let state = self.inner.state.lock().await;
match state.batch_period {
Some(batch_period) => Ok(batch_period),
// TODO(markdittmer): GetBatchPeriod Ok case should probably return Option<i64> to
// more directly reflect "dispatching work disabled".
None => Ok(std::i64::MAX),
}
}
pub async fn set_batch_period(&self, batch_period: i64) -> Result<(), fsys::Error> {
if batch_period <= 0 {
return Err(fsys::Error::InvalidArguments);
}
let mut state = self.inner.state.lock().await;
if batch_period != std::i64::MAX {
state.batch_period = Some(batch_period);
} else {
// TODO(markdittmer): SetBatchPeriod should probably accept Option<i64> to more directly
// reflect "dispatching work disabled".
state.batch_period = None;
}
self.update_timeout(&mut *state);
Ok(())
}
/// Dispatch expired `work_items`. In the one-shot case expired items are dispatched and dropped
/// from `work_items`. In the periodic case expired items are retained and given a new deadline.
/// New deadlines must meet all of the following criteria:
///
/// now < new_deadline
/// and
/// now + period <= new_deadline
/// and
/// new_deadline = first_deadline + n * period
///
/// Example:
///
/// F = First expected dispatch time for work item
/// C = Current expected dispatch time for work item
/// N = Now
/// * = New expected dispatch time for work item
/// | = Period marker for work item (that isn't otherwise labeled)
///
/// Period markers only: ...------|----|----|----|----|----|----|----|----|...
/// Fully annotated timeline: ...------F----|----C----|----|----|-N--*----|----|...
///
/// Example of edge case:
///
/// Now lands exactly on a period marker.
///
/// Period markers only: ...------|----|----|----|----|----|----|----|----|...
/// Fully annotated timeline: ...------F----|----C----|----|----N----*----|----|...
///
/// Example of edge case:
///
/// Period markers only: ...------||||||||||||||||||||...
/// Fully annotated timeline: ...------F||C||||||||N*||||||...
///
/// Example of edge case:
///
/// N=C. Denote M = N=C.
///
/// Period markers only: ...------|----|----|----|----|----|...
/// Fully annotated timeline: ...------F----|----M----*----|----|...
///
/// Note that updating `WorkItem` deadlines is _independent_ of updating `WorkScheduler` batch
/// period. When either `work_items` (and their deadlines) change or `batch_period` changes, the
/// next wakeup timeout is re-evaluated, but this involves updating _only_ the wakeup timeout,
/// not any `WorkItem` deadlines.
async fn dispatch_work(&self) {
let mut state = self.inner.state.lock().await;
let now = Time::now().into_nanos();
let work_items = &mut state.work_items;
work_items.retain(|item| {
// Retain future work items.
if item.next_deadline_monotonic > now {
return true;
}
// TODO(markdittmer): Dispatch work item.
// Only dispatched/past items to retain: periodic items that will recur.
item.period.is_some()
});
// Update deadlines on dispatched periodic items.
for mut item in work_items.iter_mut() {
// Stop processing items once we reach future items.
if item.next_deadline_monotonic > now {
break;
}
// All retained dispatched/past items have a period (hence, safe to unwrap()).
let period = item.period.unwrap();
item.next_deadline_monotonic += if now < item.next_deadline_monotonic + period {
// Normal case: next deadline after adding one period is in the future.
period
} else {
// Skip deadlines in the past by advancing `next_deadline_monotonic` to the first
// multiple of `period` after now
period * (((now - item.next_deadline_monotonic) / period) + 1)
};
}
work_items.sort_by(WorkItem::deadline_order);
self.update_timeout(&mut *state);
}
/// Update the timeout for the next wakeup, batch, and dispatch cycle, if necessary. The timeout
/// should be disabled if either there are no `work_items` or there is no `batch_period`.
/// Otherwise, a suitable timeout may already be set. A suitable timeout is one that satisfies:
///
/// timeout > work_deadline
/// and
/// timeout - batch_period < work_deadline
/// where
/// work_deadline is the earliest expected dispatch time of all `work_items`
///
/// That is, a suitable timeout will trigger after there is something to schedule, but before a
/// full `batch_period` has elapsed since the next schedulable `WorkItem` hit its deadline.
///
/// If the current timeout is not suitable, then the timeout is updated to the unique suitable
/// timeout rounded to the nearest `batch_deadline` (in absolute monotonic time):
///
/// timeout > work_deadline
/// and
/// timeout - batch_period < work_deadline
/// and
/// (timeout % batch_period) == 0
/// where
/// work_deadline is the earliest expected dispatch time of all `work_items`
///
/// This scheme avoids updating the timeout whenever possible, while maintaining that all
/// scheduled `WorkItem` objects will be dispatched no later than
/// `WorkItem.next_deadline_monotonic + WorkScheduler.batch_period`.
fn update_timeout(&self, state: &mut WorkSchedulerState) {
if state.work_items.is_empty() || state.batch_period.is_none() {
// No work to schedule. Abort any existing timer to wakeup and dispatch work.
state.timer = None;
return;
}
let work_deadline = state.work_items[0].next_deadline_monotonic;
let batch_period = state.batch_period.unwrap();
if let Some(timer) = &state.timer {
let timeout = timer.next_timeout_monotonic;
if timeout > work_deadline && timeout - batch_period < work_deadline {
// There is an active timeout that will fire after the next deadline but before a
// full batch period has elapsed after the deadline. Timer needs no update.
return;
}
}
// Define a deadline, an absolute monotonic time, as the soonest time after `work_deadline`
// that is aligned with `batch_period`.
let new_deadline = work_deadline - (work_deadline % batch_period) + batch_period;
state.set_timer(new_deadline, self.clone());
}
}
struct WorkSchedulerInner {
state: Mutex<WorkSchedulerState>,
}
impl WorkSchedulerInner {
pub fn new() -> Self {
Self { state: Mutex::new(WorkSchedulerState::new()) }
}
async fn on_route_builtin_capability_async<'a>(
self: Arc<Self>,
capability: &'a ComponentManagerCapability,
capability_provider: Option<Box<dyn ComponentManagerCapabilityProvider>>,
) -> Result<Option<Box<dyn ComponentManagerCapabilityProvider>>, ModelError> {
match (&capability_provider, capability) {
(None, ComponentManagerCapability::LegacyService(capability_path))
if *capability_path == *WORK_SCHEDULER_CONTROL_CAPABILITY_PATH =>
{
Ok(Some(Box::new(WorkSchedulerControlCapabilityProvider::new(WorkScheduler {
inner: self.clone(),
})) as Box<dyn ComponentManagerCapabilityProvider>))
}
_ => Ok(capability_provider),
}
}
async fn on_route_framework_capability_async<'a>(
self: Arc<Self>,
realm: Arc<Realm>,
capability: &'a ComponentManagerCapability,
capability_provider: Option<Box<dyn ComponentManagerCapabilityProvider>>,
) -> Result<Option<Box<dyn ComponentManagerCapabilityProvider>>, ModelError> {
match (&capability_provider, capability) {
(None, ComponentManagerCapability::LegacyService(capability_path))
if *capability_path == *WORK_SCHEDULER_CAPABILITY_PATH =>
{
Self::check_for_worker(&*realm).await?;
Ok(Some(Box::new(WorkSchedulerCapabilityProvider::new(
realm.clone(),
WorkScheduler { inner: self.clone() },
)) as Box<dyn ComponentManagerCapabilityProvider>))
}
_ => Ok(capability_provider),
}
}
async fn check_for_worker(realm: &Realm) -> Result<(), ModelError> {
let realm_state = realm.lock_state().await;
let realm_state = realm_state.as_ref().expect("check_for_worker: not resolved");
let decl = realm_state.decl();
decl.exposes
.iter()
.find(|&expose| match expose {
ExposeDecl::LegacyService(ls) => ls.target_path == *WORKER_CAPABILITY_PATH,
_ => false,
})
.map_or_else(
|| {
Err(ModelError::capability_discovery_error(format_err!(
"component uses WorkScheduler without exposing Worker: {}",
realm.abs_moniker
)))
},
|expose| match expose {
ExposeDecl::LegacyService(ls) => match ls.target {
ExposeTarget::Framework => Ok(()),
_ => Err(ModelError::capability_discovery_error(format_err!(
"component exposes Worker, but not as legacy service to framework: {}",
realm.abs_moniker
))),
},
_ => Err(ModelError::capability_discovery_error(format_err!(
"component exposes Worker, but not as legacy service to framework: {}",
realm.abs_moniker
))),
},
)
}
}
impl Hook for WorkSchedulerInner {
fn on<'a>(self: Arc<Self>, event: &'a Event) -> BoxFuture<'a, Result<(), ModelError>> {
Box::pin(async move {
match event {
Event::RouteBuiltinCapability { realm: _, capability, capability_provider } => {
let mut capability_provider = capability_provider.lock().await;
*capability_provider = self
.on_route_builtin_capability_async(capability, capability_provider.take())
.await?;
}
Event::RouteFrameworkCapability { realm, capability, capability_provider } => {
let mut capability_provider = capability_provider.lock().await;
*capability_provider = self
.on_route_framework_capability_async(
realm.clone(),
capability,
capability_provider.take(),
)
.await?;
}
_ => {}
};
Ok(())
})
}
}
/// `ComponentManagerCapabilityProvider` to invoke `WorkSchedulerControl` FIDL API bound to a
/// particular `WorkScheduler` object.
struct WorkSchedulerControlCapabilityProvider {
work_scheduler: WorkScheduler,
}
impl WorkSchedulerControlCapabilityProvider {
pub fn new(work_scheduler: WorkScheduler) -> Self {
WorkSchedulerControlCapabilityProvider { work_scheduler }
}
/// Service `open` invocation via an event loop that dispatches FIDL operations to
/// `work_scheduler`.
async fn open_async(
work_scheduler: WorkScheduler,
mut stream: fsys::WorkSchedulerControlRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await? {
match request {
fsys::WorkSchedulerControlRequest::GetBatchPeriod { responder, .. } => {
let mut result = work_scheduler.get_batch_period().await;
responder.send(&mut result)?;
}
fsys::WorkSchedulerControlRequest::SetBatchPeriod {
responder,
batch_period,
..
} => {
let mut result = work_scheduler.set_batch_period(batch_period).await;
responder.send(&mut result)?;
}
}
}
Ok(())
}
}
impl ComponentManagerCapabilityProvider for WorkSchedulerControlCapabilityProvider {
/// Spawn an event loop to service `WorkScheduler` FIDL operations.
fn open(
&self,
_flags: u32,
_open_mode: u32,
_relative_path: String,
server_end: zx::Channel,
) -> BoxFuture<Result<(), ModelError>> {
let server_end = ServerEnd::<fsys::WorkSchedulerControlMarker>::new(server_end);
let stream: fsys::WorkSchedulerControlRequestStream = server_end.into_stream().unwrap();
let work_scheduler = self.work_scheduler.clone();
fasync::spawn(async move {
let result = Self::open_async(work_scheduler, stream).await;
if let Err(e) = result {
// TODO(markdittmer): Set an epitaph to indicate this was an unexpected error.
warn!("WorkSchedulerCapabilityProvider.open failed: {}", e);
}
});
Box::pin(async { Ok(()) })
}
}
/// `Capability` to invoke `WorkScheduler` FIDL API bound to a particular `WorkScheduler` object and
/// component instance's `AbsoluteMoniker`. All FIDL operations bound to the same object and moniker
/// observe the same collection of `WorkItem` objects.
struct WorkSchedulerCapabilityProvider {
realm: Arc<Realm>,
work_scheduler: WorkScheduler,
}
impl WorkSchedulerCapabilityProvider {
pub fn new(realm: Arc<Realm>, work_scheduler: WorkScheduler) -> Self {
WorkSchedulerCapabilityProvider { realm, work_scheduler }
}
/// Service `open` invocation via an event loop that dispatches FIDL operations to
/// `work_scheduler`.
async fn open_async(
work_scheduler: WorkScheduler,
realm: Arc<Realm>,
mut stream: fsys::WorkSchedulerRequestStream,
) -> Result<(), Error> {
while let Some(request) = stream.try_next().await? {
match request {
fsys::WorkSchedulerRequest::ScheduleWork {
responder,
work_id,
work_request,
..
} => {
let mut result =
work_scheduler.schedule_work(realm.clone(), &work_id, &work_request).await;
responder.send(&mut result)?;
}
fsys::WorkSchedulerRequest::CancelWork { responder, work_id, .. } => {
let mut result = work_scheduler.cancel_work(realm.clone(), &work_id).await;
responder.send(&mut result)?;
}
}
}
Ok(())
}
}
impl ComponentManagerCapabilityProvider for WorkSchedulerCapabilityProvider {
/// Spawn an event loop to service `WorkScheduler` FIDL operations.
fn open(
&self,
_flags: u32,
_open_mode: u32,
_relative_path: String,
server_end: zx::Channel,
) -> BoxFuture<Result<(), ModelError>> {
let server_end = ServerEnd::<fsys::WorkSchedulerMarker>::new(server_end);
let stream: fsys::WorkSchedulerRequestStream = server_end.into_stream().unwrap();
let work_scheduler = self.work_scheduler.clone();
let realm = self.realm.clone();
fasync::spawn(async move {
let result = Self::open_async(work_scheduler, realm, stream).await;
if let Err(e) = result {
// TODO(markdittmer): Set an epitaph to indicate this was an unexpected error.
warn!("WorkSchedulerCapabilityProvider.open failed: {}", e);
}
});
Box::pin(async { Ok(()) })
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::model::{AbsoluteMoniker, ChildMoniker, ResolverRegistry},
fidl::endpoints::ClientEnd,
fidl_fuchsia_sys2::WorkSchedulerControlMarker,
fuchsia_async::{Executor, Time, WaitState},
futures::{future::BoxFuture, Future},
};
/// Time is measured in nanoseconds. This provides a constant symbol for one second.
const SECOND: i64 = 1000000000;
// Use arbitrary start monolithic time. This will surface bugs that, for example, are not
// apparent when "time starts at 0".
const FAKE_MONOTONIC_TIME: i64 = 374789234875;
impl Dispatcher for AbsoluteMoniker {
fn abs_moniker(&self) -> &AbsoluteMoniker {
&self
}
fn dispatch(&self, _work_item: WorkItem) -> BoxFuture<Result<(), fsys::Error>> {
Box::pin(async move { Err(fsys::Error::InvalidArguments) })
}
}
async fn schedule_work_request(
work_scheduler: &WorkScheduler,
abs_moniker: &AbsoluteMoniker,
work_id: &str,
work_request: &fsys::WorkRequest,
) -> Result<(), fsys::Error> {
let mut state = work_scheduler.inner.state.lock().await;
work_scheduler.schedule_work_request(
&mut *state,
Arc::new(abs_moniker.clone()),
work_id,
work_request,
)
}
async fn cancel_work_item(
work_scheduler: &WorkScheduler,
abs_moniker: &AbsoluteMoniker,
work_id: &str,
) -> Result<(), fsys::Error> {
let mut state = work_scheduler.inner.state.lock().await;
work_scheduler.cancel_work_item(&mut *state, Arc::new(abs_moniker.clone()), work_id)
}
async fn get_work_status(
work_scheduler: &WorkScheduler,
abs_moniker: &AbsoluteMoniker,
work_id: &str,
) -> Result<(i64, Option<i64>), fsys::Error> {
let state = work_scheduler.inner.state.lock().await;
let work_items = &state.work_items;
match work_items.iter().find(|work_item| {
work_item.dispatcher.abs_moniker() == abs_moniker && work_item.id == work_id
}) {
Some(work_item) => Ok((work_item.next_deadline_monotonic, work_item.period)),
None => Err(fsys::Error::InstanceNotFound),
}
}
async fn get_all_by_deadline(work_scheduler: &WorkScheduler) -> Vec<WorkItem> {
let state = work_scheduler.inner.state.lock().await;
state.work_items.clone()
}
fn child(parent: &AbsoluteMoniker, name: &str) -> AbsoluteMoniker {
parent.child(ChildMoniker::new(name.to_string(), None, 0))
}
#[fuchsia_async::run_singlethreaded(test)]
async fn work_scheduler_basic() {
let work_scheduler = WorkScheduler::new();
let root = AbsoluteMoniker::root();
let a = child(&root, "a");
let b = child(&a, "b");
let c = child(&b, "c");
let now_once = fsys::WorkRequest {
start: Some(fsys::Start::MonotonicTime(FAKE_MONOTONIC_TIME)),
period: None,
};
let each_second = fsys::WorkRequest {
start: Some(fsys::Start::MonotonicTime(FAKE_MONOTONIC_TIME + SECOND)),
period: Some(SECOND),
};
let in_an_hour = fsys::WorkRequest {
start: Some(fsys::Start::MonotonicTime(FAKE_MONOTONIC_TIME + (SECOND * 60 * 60))),
period: None,
};
// Schedule different 2 out of 3 requests on each component instance.
assert_eq!(Ok(()), schedule_work_request(&work_scheduler, &a, "NOW_ONCE", &now_once).await);
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &a, "EACH_SECOND", &each_second).await
);
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &b, "EACH_SECOND", &each_second).await
);
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &b, "IN_AN_HOUR", &in_an_hour).await
);
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &c, "IN_AN_HOUR", &in_an_hour).await
);
assert_eq!(Ok(()), schedule_work_request(&work_scheduler, &c, "NOW_ONCE", &now_once).await);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME, None)),
get_work_status(&work_scheduler, &a, "NOW_ONCE").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME + SECOND, Some(SECOND))),
get_work_status(&work_scheduler, &a, "EACH_SECOND").await
);
assert_eq!(
Err(fsys::Error::InstanceNotFound),
get_work_status(&work_scheduler, &a, "IN_AN_HOUR").await
);
assert_eq!(
Err(fsys::Error::InstanceNotFound),
get_work_status(&work_scheduler, &b, "NOW_ONCE").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME + SECOND, Some(SECOND))),
get_work_status(&work_scheduler, &b, "EACH_SECOND").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME + (SECOND * 60 * 60), None)),
get_work_status(&work_scheduler, &b, "IN_AN_HOUR").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME, None)),
get_work_status(&work_scheduler, &c, "NOW_ONCE").await
);
assert_eq!(
Err(fsys::Error::InstanceNotFound),
get_work_status(&work_scheduler, &c, "EACH_SECOND").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME + (SECOND * 60 * 60), None)),
get_work_status(&work_scheduler, &c, "IN_AN_HOUR").await
);
// Cancel a's NOW_ONCE. Confirm it only affects a's scheduled work.
assert_eq!(Ok(()), cancel_work_item(&work_scheduler, &a, "NOW_ONCE").await);
assert_eq!(
Err(fsys::Error::InstanceNotFound),
get_work_status(&work_scheduler, &a, "NOW_ONCE").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME + SECOND, Some(SECOND))),
get_work_status(&work_scheduler, &a, "EACH_SECOND").await
);
assert_eq!(
Err(fsys::Error::InstanceNotFound),
get_work_status(&work_scheduler, &a, "IN_AN_HOUR").await
);
assert_eq!(
Err(fsys::Error::InstanceNotFound),
get_work_status(&work_scheduler, &b, "NOW_ONCE").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME + SECOND, Some(SECOND))),
get_work_status(&work_scheduler, &b, "EACH_SECOND").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME + (SECOND * 60 * 60), None)),
get_work_status(&work_scheduler, &b, "IN_AN_HOUR").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME, None)),
get_work_status(&work_scheduler, &c, "NOW_ONCE").await
);
assert_eq!(
Err(fsys::Error::InstanceNotFound),
get_work_status(&work_scheduler, &c, "EACH_SECOND").await
);
assert_eq!(
Ok((FAKE_MONOTONIC_TIME + (SECOND * 60 * 60), None)),
get_work_status(&work_scheduler, &c, "IN_AN_HOUR").await
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn work_scheduler_deadline_order() {
let work_scheduler = WorkScheduler::new();
let root = AbsoluteMoniker::root();
let a = child(&root, "a");
let b = child(&a, "b");
let c = child(&b, "c");
let now_once = fsys::WorkRequest {
start: Some(fsys::Start::MonotonicTime(FAKE_MONOTONIC_TIME)),
period: None,
};
let each_second = fsys::WorkRequest {
start: Some(fsys::Start::MonotonicTime(FAKE_MONOTONIC_TIME + SECOND)),
period: Some(SECOND),
};
let in_an_hour = fsys::WorkRequest {
start: Some(fsys::Start::MonotonicTime(FAKE_MONOTONIC_TIME + (SECOND * 60 * 60))),
period: None,
};
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &a, "EACH_SECOND", &each_second).await
);
assert_eq!(Ok(()), schedule_work_request(&work_scheduler, &c, "NOW_ONCE", &now_once).await);
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &b, "IN_AN_HOUR", &in_an_hour).await
);
// Order should match deadlines, not order of scheduling or component topology.
assert_eq!(
vec![
WorkItem::new(Arc::new(c), "NOW_ONCE", FAKE_MONOTONIC_TIME, None),
WorkItem::new(
Arc::new(a),
"EACH_SECOND",
FAKE_MONOTONIC_TIME + SECOND,
Some(SECOND),
),
WorkItem::new(
Arc::new(b),
"IN_AN_HOUR",
FAKE_MONOTONIC_TIME + (SECOND * 60 * 60),
None,
),
],
get_all_by_deadline(&work_scheduler).await
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn work_scheduler_batch_period() {
let work_scheduler = WorkScheduler::new();
assert_eq!(Ok(std::i64::MAX), work_scheduler.get_batch_period().await);
assert_eq!(Ok(()), work_scheduler.set_batch_period(SECOND).await);
assert_eq!(Ok(SECOND), work_scheduler.get_batch_period().await)
}
#[fuchsia_async::run_singlethreaded(test)]
async fn work_scheduler_batch_period_error() {
let work_scheduler = WorkScheduler::new();
assert_eq!(Err(fsys::Error::InvalidArguments), work_scheduler.set_batch_period(0).await);
assert_eq!(Err(fsys::Error::InvalidArguments), work_scheduler.set_batch_period(-1).await)
}
struct TestWorkUnit {
start: i64,
work_item: WorkItem,
}
impl TestWorkUnit {
fn new(
start: i64,
abs_moniker: &AbsoluteMoniker,
id: &str,
next_deadline_monotonic: i64,
period: Option<i64>,
) -> Self {
TestWorkUnit {
start,
work_item: WorkItem::new(
Arc::new(abs_moniker.clone()),
id,
next_deadline_monotonic,
period,
),
}
}
}
struct TimeTest {
executor: Executor,
}
impl TimeTest {
fn new() -> Self {
let executor = Executor::new_with_fake_time().unwrap();
executor.set_fake_time(Time::from_nanos(0));
TimeTest { executor }
}
fn set_time(&mut self, time: i64) {
self.executor.set_fake_time(Time::from_nanos(time));
}
fn run_and_sync<F>(&mut self, fut: &mut F)
where
F: Future + Unpin,
{
assert!(self.executor.run_until_stalled(fut).is_ready());
while self.executor.is_waiting() == WaitState::Ready {
assert!(self.executor.run_until_stalled(&mut Box::pin(async {})).is_ready());
}
}
fn set_time_and_run_timers(&mut self, time: i64) {
self.set_time(time);
assert!(self.executor.wake_expired_timers());
while self.executor.is_waiting() == WaitState::Ready {
assert!(self.executor.run_until_stalled(&mut Box::pin(async {})).is_ready());
}
}
fn assert_no_timers(&mut self) {
assert_eq!(None, self.executor.wake_next_timer());
}
fn assert_next_timer_at(&mut self, time: i64) {
assert_eq!(WaitState::Waiting(Time::from_nanos(time)), self.executor.is_waiting());
}
fn assert_work_items(
&mut self,
work_scheduler: &WorkScheduler,
test_work_units: Vec<TestWorkUnit>,
) {
self.run_and_sync(&mut Box::pin(async {
// Check collection of work items.
let work_items: Vec<WorkItem> = test_work_units
.iter()
.map(|test_work_unit| test_work_unit.work_item.clone())
.collect();
assert_eq!(work_items, get_all_by_deadline(&work_scheduler).await);
// Check invariants on relationships between `now` and `WorkItem` state.
let now = Time::now().into_nanos();
for test_work_unit in test_work_units.iter() {
let work_item = &test_work_unit.work_item;
let deadline = work_item.next_deadline_monotonic;
println!(
"Now: {}\nDeadline: {}\nStart: {}",
now, deadline, test_work_unit.start
);
// Either:
// 1. This is a check for initial state, in which case allow now=deadline=0, or
// 2. All deadlines should be in the future.
assert!(
(now == 0 && deadline == now) || now < deadline,
"Expected either
1. This is a check for initial state, so allow now=deadline=0, or
2. All deadlines should be in the future."
);
if let Some(period) = work_item.period {
println!("Period: {}", period);
// All periodic deadlines should be either:
// 1. Waiting to be dispatched for the first time, or
// 2. At most one period into the future (for otherwise, a period would be
// skipped).
assert!(
now < test_work_unit.start || now + period >= deadline,
"Expected all periodic deadlines should be either:
1. Waiting to be dispatched for the first time, or
2. At most one period into the future (for otherwise, a period would
be skipped"
);
// All periodic deadlines should be aligned to:
// `deadline = start + n*period` for some non-negative integer, `n`.
assert_eq!(
0,
(deadline - test_work_unit.start) % period,
"Expected all periodic deadlines should be aligned to:
`deadline = start + n*period` for some non-negative integer, `n`."
);
}
}
}));
}
fn assert_no_work(&mut self, work_scheduler: &WorkScheduler) {
self.run_and_sync(&mut Box::pin(async {
assert_eq!(vec![] as Vec<WorkItem>, get_all_by_deadline(&work_scheduler).await);
}));
}
}
#[test]
fn work_scheduler_time_get_batch_period_queues_nothing() {
let mut t = TimeTest::new();
t.run_and_sync(&mut Box::pin(async {
let work_scheduler = WorkScheduler::new();
assert_eq!(Ok(std::i64::MAX), work_scheduler.get_batch_period().await);
}));
t.assert_no_timers();
}
#[test]
fn work_scheduler_time_set_batch_period_no_work_queues_nothing() {
let mut t = TimeTest::new();
t.run_and_sync(&mut Box::pin(async {
let work_scheduler = WorkScheduler::new();
assert_eq!(Ok(()), work_scheduler.set_batch_period(1).await);
}));
t.assert_no_timers();
}
#[test]
fn work_scheduler_time_schedule_inf_batch_period_queues_nothing() {
let mut t = TimeTest::new();
t.run_and_sync(&mut Box::pin(async {
let work_scheduler = WorkScheduler::new();
let root = AbsoluteMoniker::root();
let now_once =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(0)), period: None };
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &root, "NOW_ONCE", &now_once).await
);
}));
t.assert_no_timers();
}
#[test]
fn work_scheduler_time_schedule_finite_batch_period_queues_and_dispatches() {
let mut t = TimeTest::new();
let work_scheduler = WorkScheduler::new();
let root = AbsoluteMoniker::root();
// Set batch period and queue a unit of work.
t.run_and_sync(&mut Box::pin(async {
assert_eq!(Ok(()), work_scheduler.set_batch_period(1).await);
let now_once =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(0)), period: None };
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &root, "NOW_ONCE", &now_once).await
);
}));
// Confirm timer and work item.
t.assert_next_timer_at(1);
t.assert_work_items(
&work_scheduler,
vec![TestWorkUnit::new(0, &root, "NOW_ONCE", 0, None)],
);
// Run work stemming from timer and confirm no more work items.
t.set_time_and_run_timers(1);
t.assert_no_work(&work_scheduler);
}
#[test]
fn work_scheduler_time_periodic_stays_queued() {
let mut t = TimeTest::new();
let work_scheduler = WorkScheduler::new();
let root = AbsoluteMoniker::root();
// Set batch period and queue a unit of work.
t.run_and_sync(&mut Box::pin(async {
assert_eq!(Ok(()), work_scheduler.set_batch_period(1).await);
let every_moment =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(0)), period: Some(1) };
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &root, "EVERY_MOMENT", &every_moment).await
);
}));
// Confirm timer and work item.
t.assert_next_timer_at(1);
t.assert_work_items(
&work_scheduler,
vec![TestWorkUnit::new(0, &root, "EVERY_MOMENT", 0, Some(1))],
);
// Dispatch work and assert next periodic work item and timer.
t.set_time_and_run_timers(1);
t.assert_work_items(
&work_scheduler,
vec![TestWorkUnit::new(0, &root, "EVERY_MOMENT", 2, Some(1))],
);
t.assert_next_timer_at(3);
}
#[test]
fn work_scheduler_time_timeout_updates_when_earlier_work_item_added() {
let mut t = TimeTest::new();
let work_scheduler = WorkScheduler::new();
let root = AbsoluteMoniker::root();
// Set batch period and queue a unit of work.
t.run_and_sync(&mut Box::pin(async {
assert_eq!(Ok(()), work_scheduler.set_batch_period(5).await);
let at_nine =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(9)), period: None };
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &root, "AT_NINE", &at_nine).await
);
}));
// Confirm timer and work item.
t.assert_next_timer_at(10);
t.assert_work_items(&work_scheduler, vec![TestWorkUnit::new(9, &root, "AT_NINE", 9, None)]);
// Queue unit of work with deadline _earlier_ than first unit of work.
t.run_and_sync(&mut Box::pin(async {
let at_four =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(4)), period: None };
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &root, "AT_FOUR", &at_four).await
);
}));
// Confirm timer moved _back_, and work units are as expected.
t.assert_next_timer_at(5);
t.assert_work_items(
&work_scheduler,
vec![
TestWorkUnit::new(4, &root, "AT_FOUR", 4, None),
TestWorkUnit::new(9, &root, "AT_NINE", 9, None),
],
);
// Dispatch work and assert remaining work and timer.
t.set_time_and_run_timers(5);
t.assert_work_items(&work_scheduler, vec![TestWorkUnit::new(9, &root, "AT_NINE", 9, None)]);
t.assert_next_timer_at(10);
// Queue unit of work with deadline _later_ than existing unit of work.
t.run_and_sync(&mut Box::pin(async {
let at_ten =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(10)), period: None };
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &root, "AT_TEN", &at_ten).await
);
}));
// Confirm unchanged, and work units are as expected.
t.assert_next_timer_at(10);
t.assert_work_items(
&work_scheduler,
vec![
TestWorkUnit::new(9, &root, "AT_NINE", 9, None),
TestWorkUnit::new(10, &root, "AT_TEN", 10, None),
],
);
// Dispatch work and assert no work left.
t.set_time_and_run_timers(10);
t.assert_no_work(&work_scheduler);
}
#[test]
fn work_scheduler_time_late_timer_fire() {
let mut t = TimeTest::new();
let work_scheduler = WorkScheduler::new();
let root = AbsoluteMoniker::root();
// Set period and schedule two work items, one of which _should_ be dispatched in a second
// cycle.
t.run_and_sync(&mut Box::pin(async {
assert_eq!(Ok(()), work_scheduler.set_batch_period(5).await);
let at_four =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(4)), period: None };
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &root, "AT_FOUR", &at_four).await
);
let at_nine =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(9)), period: None };
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &root, "AT_NINE", &at_nine).await
);
}));
// Confirm timer and work items.
t.assert_next_timer_at(5);
t.assert_work_items(
&work_scheduler,
vec![
TestWorkUnit::new(4, &root, "AT_FOUR", 4, None),
TestWorkUnit::new(9, &root, "AT_NINE", 9, None),
],
);
// Simulate delayed dispatch: System load or some other factor caused dispatch of work to be
// delayed beyond the deadline of _both_ units of work.
t.set_time_and_run_timers(16);
// Confirm timers and dispatched units.
t.assert_no_timers();
t.assert_no_work(&work_scheduler);
}
#[test]
fn work_scheduler_time_late_timer_fire_periodic_work_item() {
let mut t = TimeTest::new();
let work_scheduler = WorkScheduler::new();
let root = AbsoluteMoniker::root();
// Set period and schedule two work items, one of which _should_ be dispatched in a second
// cycle.
t.run_and_sync(&mut Box::pin(async {
assert_eq!(Ok(()), work_scheduler.set_batch_period(5).await);
let at_four =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(4)), period: None };
assert_eq!(
Ok(()),
schedule_work_request(&work_scheduler, &root, "AT_FOUR", &at_four).await
);
let at_nine_periodic =
fsys::WorkRequest { start: Some(fsys::Start::MonotonicTime(9)), period: Some(5) };
assert_eq!(
Ok(()),
schedule_work_request(
&work_scheduler,
&root,
"AT_NINE_PERIODIC_FIVE",
&at_nine_periodic
)
.await
);
}));
// Confirm timer and work items.
t.assert_next_timer_at(5);
t.assert_work_items(
&work_scheduler,
vec![
TestWorkUnit::new(4, &root, "AT_FOUR", 4, None),
TestWorkUnit::new(9, &root, "AT_NINE_PERIODIC_FIVE", 9, Some(5)),
],
);
// Simulate _seriously_ delayed dispatch: System load or some other
// factor caused dispatch of work to be delayed _way_ beyond the
// deadline of _both_ units of work.
t.set_time_and_run_timers(116);
// Confirm timer set to next batch period, and periodic work item still queued.
t.assert_next_timer_at(120);
t.assert_work_items(
&work_scheduler,
// Time:
// now=116
// WorkItem:
// start=9
// period=5
//
// Updated WorkItem.period should be:
// WorkItem.next_deadline_monotonic = 9 + 5*n
// where
// Time.now < WorkItem.next_deadline_monotonic
// and
// Time.now + WorkItem.period > WorkItem.next_deadline_monotonic
//
// WorkItem.next_deadline_monotonic = 119 = 9 + (22 * 5).
vec![TestWorkUnit::new(9, &root, "AT_NINE_PERIODIC_FIVE", 119, Some(5))],
);
}
#[fasync::run_singlethreaded(test)]
async fn connect_to_work_scheduler_control_service() -> Result<(), Error> {
let work_scheduler = WorkScheduler::new();
let hooks = Hooks::new(None);
hooks.install(work_scheduler.hooks()).await;
let capability_provider = Arc::new(Mutex::new(None));
let capability = ComponentManagerCapability::LegacyService(
WORK_SCHEDULER_CONTROL_CAPABILITY_PATH.clone(),
);
let (client, server) = zx::Channel::create()?;
let realm = {
let resolver = ResolverRegistry::new();
let root_component_url = "test:///root".to_string();
Arc::new(Realm::new_root_realm(resolver, root_component_url))
};
let event = Event::RouteBuiltinCapability {
realm: realm.clone(),
capability: capability.clone(),
capability_provider: capability_provider.clone(),
};
hooks.dispatch(&event).await?;
let capability_provider = capability_provider.lock().await.take();
if let Some(capability_provider) = capability_provider {
capability_provider.open(0, 0, String::new(), server).await?;
}
let work_scheduler_control = ClientEnd::<WorkSchedulerControlMarker>::new(client)
.into_proxy()
.expect("failed to create launcher proxy");
let result = work_scheduler_control.get_batch_period().await;
result
.expect("failed to use WorkSchedulerControl service")
.expect("WorkSchedulerControl.GetBatchPeriod() yielded error");
Ok(())
}
}
|
trait SameTag<T> {
fn same_tag(&self, other: &Self) -> bool;
}
impl<T> SameTag<T> for Option<T> {
#[allow(unused_assignments)]
fn same_tag(&self, other: &Self) -> bool {
let mut self_tag = 0;
let mut other_tag = 0;
match *self {
Some(_) => self_tag = 0,
None => self_tag = 1,
}
match *other {
Some(_) => other_tag = 0,
None => other_tag = 1,
}
self_tag == other_tag
}
}
fn main() {
println!("{}", Some(1).same_tag(&Some(2)));
}
// use std::mem;
// enum Foo { A(&'static str), B(i32), C(i32) }
// assert!(mem::discriminant(&Foo::A("bar")) == mem::discriminant(&Foo::A("baz")));
// assert!(mem::discriminant(&Foo::B(1)) == mem::discriminant(&Foo::B(2)));
// assert!(mem::discriminant(&Foo::B(3)) != mem::discriminant(&Foo::C(3)));
|
use rand::{Closed01, Rng};
use std::cmp;
use std::cell::UnsafeCell;
use std::intrinsics;
use super::sys::{Duration, XorShiftRng};
use super::sys::arch::pause;
use super::sys::os::sleep;
const MIN_DELAY_MSEC: u32 = 1;
const MAX_DELAY_MSEC: u32 = 1000;
//type SpinLockAlign = super::simd::u64x8;
struct SpinLockAlign;
#[cfg(test)]
/// Note: we rely on these tests being correct for optimizations that would otherwise be unsafe!
mod tests {
use super::{MAX_DELAY_MSEC, MIN_DELAY_MSEC};
use super::super::sys::{MILLIS_PER_SEC, NANOS_PER_MILLI};
#[test]
fn min_delay_fine() {
((MIN_DELAY_MSEC as u64 % MILLIS_PER_SEC) as u32).checked_mul(NANOS_PER_MILLI).unwrap();
}
#[test]
fn max_delay_fine() {
((MAX_DELAY_MSEC as u64 % MILLIS_PER_SEC) as u32).checked_mul(NANOS_PER_MILLI).unwrap();
}
}
/// A fast, lightweight spinlock
#[repr(C)]
pub struct SpinLock {
lock: UnsafeCell<u8>,
// Plus 15 bytes to get to u64
padding: [SpinLockAlign; 0],
}
const DEFAULT_SPINS_PER_DELAY: u32 = 100;
#[thread_local] static mut SPINS_PER_DELAY: u32 = DEFAULT_SPINS_PER_DELAY;
pub fn set_spins_per_delay(shared_spins_per_delay: u32) {
unsafe {
SPINS_PER_DELAY = shared_spins_per_delay;
}
}
pub fn update_spins_per_delay(shared_spins_per_delay: u32) -> u32 {
unsafe {
let spins_per_delay = SPINS_PER_DELAY;
(shared_spins_per_delay * 15u32.wrapping_add(spins_per_delay)) / 16
}
}
const U8_TRUE: u8 = !0;
#[cfg(target_arch = "x86_64")]
#[inline(always)]
fn spin_delay() {
pause();
}
#[cfg(not(target_arch = "x86_64"))]
#[inline(always)]
fn spin_delay() { }
impl SpinLock {
#[inline(always)]
fn tas(&self) -> bool {
unsafe { intrinsics::atomic_xchg_acq(self.lock.get(), U8_TRUE) != 0 }
}
#[cfg(target_arch = "x86_64")]
#[inline(always)]
fn tas_spin(&self) -> bool {
unsafe { intrinsics::atomic_load_relaxed(self.lock.get()) != 0 || self.tas() }
}
#[cfg(not(target_arch = "x86_64"))]
#[inline(always)]
fn tas_spin(&self) -> bool {
self.tas()
}
pub fn new() -> Self {
SpinLock { lock: UnsafeCell::new(0), padding: [] }
}
/*fn spin_lock(&self) {
while self.tas_spin() {
// CPU-specific delay each time through the loop
spin_delay();
}
}*/
fn spin_lock(&self) {
const MIN_SPINS_PER_DELAY: u32 = 10;
const MAX_SPINS_PER_DELAY: u32 = 1000;
const NUM_DELAYS: u32 = 1000;
{
let mut spins = 0u32;
let mut delays = 0u32;
let mut cur_delay = 0;
let mut rng = None;
let spins_per_delay = unsafe { SPINS_PER_DELAY };
while self.tas_spin() {
// CPU-specific delay each time through the loop
spin_delay();
// Block the process every spins_per_delay tries
spins = spins.wrapping_add(1u32);
if spins >= spins_per_delay {
delays = delays.wrapping_add(1u32);
if delays > NUM_DELAYS {
/*unsafe */{
//println!("abort");
// Lock stuck. Currently we do nothing.
//intrinsics::abort();
delays = 0;
}
}
if cur_delay == 0 { // first time to delay?
cur_delay = MIN_DELAY_MSEC;
}
/*unsafe */{
//intrinsics::assume(MIN_DELAY_MSEC <= cur_delay && cur_delay <= MAX_DELAY_MSEC);
let duration = Duration::from_millis(cur_delay as u64);
sleep(duration);
}
// increase delay by a random fraction between 1X and 2X
// TODO: Fix this to actually use proper randomness... the default traits in
// the rand crate mostly do lots of panicking / unwinding so I'll probably have
// to seed them myself.
cur_delay = cur_delay.wrapping_add((cur_delay as f64 * match rng {
None => {
//let mut rng_ = rand::thread_rng();
let mut rng_ = XorShiftRng::new_unseeded();
let frac = rng_.gen::<Closed01<f64>>().0;
rng = Some(rng_);
frac
},
Some(ref mut rng) => rng.gen::<Closed01<f64>>().0,
}) as u32);
// wrap back to minimum delay when maximum is exceeded
if cur_delay > MAX_DELAY_MSEC {
cur_delay = MIN_DELAY_MSEC ;
}
spins = 0;
}
}
if cur_delay == 0 {
// we never had to delay
if spins_per_delay < MAX_SPINS_PER_DELAY {
unsafe {
SPINS_PER_DELAY = cmp::min(spins_per_delay.wrapping_add(100), MAX_SPINS_PER_DELAY);
}
}
} else {
if spins_per_delay > MIN_SPINS_PER_DELAY {
unsafe {
SPINS_PER_DELAY = cmp::max(spins_per_delay.wrapping_sub(1), MIN_SPINS_PER_DELAY);
}
}
}
}
}
#[inline(always)]
pub fn lock(&self) {
// fast path
if self.tas() {
// slow path
self.spin_lock();
}
}
#[inline(always)]
pub fn unlock(&self) {
unsafe { intrinsics::atomic_store_rel(self.lock.get(), 0) }
}
#[inline(always)]
pub fn try_lock(&self) -> bool {
self.tas()
}
}
|
use crate::protocol::parts::OutputParameters;
use crate::{HdbError, HdbResult};
#[cfg(feature = "dist_tx")]
use dist_tx::XaTransactionId;
/// An enum that describes a single database return value.
#[derive(Debug)]
pub enum HdbReturnValue {
/// A resultset of a query.
ResultSet(crate::sync::ResultSet),
/// A list of numbers of affected rows.
AffectedRows(Vec<usize>),
/// Values of output parameters of a procedure call.
OutputParameters(OutputParameters),
/// Indication that a db call was successful.
Success,
#[cfg(feature = "dist_tx")]
/// A list of `XaTransactionId`s.
#[cfg(feature = "dist_tx")]
XaTransactionIds(Vec<XaTransactionId>),
}
impl HdbReturnValue {
/// Turns itself into a single resultset.
///
/// # Errors
///
/// `HdbError::Evaluation` for other variants than `HdbReturnValue::ResultSet`.
pub fn into_resultset(self) -> HdbResult<crate::sync::ResultSet> {
match self {
Self::ResultSet(rs) => Ok(rs),
_ => Err(HdbError::Evaluation("Not a HdbReturnValue::ResultSet")),
}
}
/// Turns itself into a Vector of numbers (each number representing a
/// number of affected rows).
///
/// # Errors
///
/// `HdbError::Evaluation` for other variants than `HdbReturnValue::AffectedRows`.
pub fn into_affected_rows(self) -> HdbResult<Vec<usize>> {
match self {
Self::AffectedRows(array) => Ok(array),
_ => Err(HdbError::Evaluation("Not a HdbReturnValue::AffectedRows")),
}
}
/// Turns itself into a Vector of numbers (each number representing a
/// number of affected rows).
///
/// # Errors
///
/// `HdbError::Evaluation` for other variants than `HdbReturnValue::OutputParameters`.
pub fn into_output_parameters(self) -> HdbResult<OutputParameters> {
match self {
Self::OutputParameters(op) => Ok(op),
_ => Err(HdbError::Evaluation(
"Not a HdbReturnValue::OutputParameters",
)),
}
}
/// Turns itself into (), if the statement had returned successfully.
///
/// # Errors
///
/// `HdbError::Evaluation` for other variants of `HdbReturnValue`.
pub fn into_success(self) -> HdbResult<()> {
match self {
Self::Success => Ok(()),
Self::AffectedRows(_) => {
if self.is_success() {
Ok(())
} else {
Err(HdbError::Evaluation(
"HdbReturnValue::AffectedRows contained value > 0",
))
}
}
_ => Err(HdbError::Evaluation(
"Not a HdbReturnValue::AffectedRows or ::Success",
)),
}
}
/// Returns true if the statement had returned successfully.
pub fn is_success(&self) -> bool {
match *self {
Self::Success => true,
Self::AffectedRows(ref vec) => vec.len() == 1 && vec.first() == Some(&0),
_ => false,
}
}
}
impl std::fmt::Display for HdbReturnValue {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
match *self {
Self::AffectedRows(ref vec) => writeln!(fmt, "AffectedRows {vec:?},"),
Self::OutputParameters(ref op) => writeln!(fmt, "OutputParameters [{op}],"),
Self::ResultSet(ref rs) => writeln!(fmt, "ResultSet [{rs}],"),
Self::Success => writeln!(fmt, "Success,"),
#[cfg(feature = "dist_tx")]
Self::XaTransactionIds(_) => writeln!(fmt, "XaTransactionIds,<"),
}
}
}
|
use crate::fs::Dev;
#[inline]
pub(crate) fn makedev(maj: u32, min: u32) -> Dev {
((u64::from(maj) & 0xffff_f000_u64) << 32)
| ((u64::from(maj) & 0x0000_0fff_u64) << 8)
| ((u64::from(min) & 0xffff_ff00_u64) << 12)
| (u64::from(min) & 0x0000_00ff_u64)
}
#[inline]
pub(crate) fn major(dev: Dev) -> u32 {
(((dev >> 31 >> 1) & 0xffff_f000) | ((dev >> 8) & 0x0000_0fff)) as u32
}
#[inline]
pub(crate) fn minor(dev: Dev) -> u32 {
(((dev >> 12) & 0xffff_ff00) | (dev & 0x0000_00ff)) as u32
}
|
use crate::algebra::{Magma, UnitalMagma};
/// A magma whose all elements have an inverse element.
///
/// # Laws
/// * ∀`x: T` (`x.op(&x.invert())` = `x.invert().op(&x)` = `T::identity()`)
pub trait InvertibleMagma: Magma + UnitalMagma {
/// Returns an inverse element.
fn invert(&self) -> Self;
/// Returns `self.op(&rhs.invert())`.
fn inverse_op(&self, rhs: &Self) -> Self {
self.op(&rhs.invert())
}
/// Assigns `self.inverse_op(rhs)` to `self`.
fn inverse_op_assign_right(&mut self, rhs: &Self) {
*self = self.inverse_op(rhs);
}
/// Assigns `lhs.inverse_op(self)` to `self`.
fn inverse_op_assign_left(&mut self, lhs: &Self) {
*self = lhs.inverse_op(self);
}
}
|
// Copyright 2020 IOTA Stiftung
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and limitations under the License.
//! Derive macros for the bee-common crate.
#![warn(missing_docs)]
use quote::quote;
use syn::{parse_macro_input, DeriveInput};
/// Derives an implementation of the trait `std::fmt::Debug` for a secret type that doesn't leak its internal secret.
/// Implements https://github.com/iotaledger/bee-rfcs/blob/master/text/0042-secret-debug-display.md.
/// Based on https://github.com/dtolnay/syn/blob/master/examples/heapsize/heapsize_derive/src/lib.rs.
#[proc_macro_derive(SecretDebug)]
pub fn derive_secret_debug(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
// Parse the input tokens into a syntax tree.
let input = parse_macro_input!(input as DeriveInput);
// Used in the quasi-quotation below as `#name`.
let name = input.ident;
// Get the different implementation elements from the input.
let (impl_generics, ty_generics, _) = input.generics.split_for_impl();
// The generated implementation.
let expanded = quote! {
impl #impl_generics std::fmt::Debug for #name #ty_generics {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "<Omitted secret>")
}
}
};
expanded.into()
}
/// Derives an implementation of the trait `std::fmt::Display` for a secret type that doesn't leak its internal secret.
/// Implements https://github.com/iotaledger/bee-rfcs/blob/master/text/0042-secret-debug-display.md.
/// Based on https://github.com/dtolnay/syn/blob/master/examples/heapsize/heapsize_derive/src/lib.rs.
#[proc_macro_derive(SecretDisplay)]
pub fn derive_secret_display(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
// Parse the input tokens into a syntax tree.
let input = parse_macro_input!(input as DeriveInput);
// Used in the quasi-quotation below as `#name`.
let name = input.ident;
// Get the different implementation elements from the input.
let (impl_generics, ty_generics, _) = input.generics.split_for_impl();
// The generated implementation.
let expanded = quote! {
impl #impl_generics std::fmt::Display for #name #ty_generics {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "<Omitted secret>")
}
}
};
expanded.into()
}
/// Derives an implementation of the trait `std::ops::Drop` for a secret type that calls `Zeroize::zeroize`.
/// Implements https://github.com/iotaledger/bee-rfcs/blob/master/text/0044-secret-zeroize-drop.md.
/// Based on https://github.com/dtolnay/syn/blob/master/examples/heapsize/heapsize_derive/src/lib.rs.
#[proc_macro_derive(SecretDrop)]
pub fn derive_secret_drop(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
// Parse the input tokens into a syntax tree.
let input = parse_macro_input!(input as DeriveInput);
// Used in the quasi-quotation below as `#name`.
let name = input.ident;
// Get the different implementation elements from the input.
let (impl_generics, ty_generics, _) = input.generics.split_for_impl();
// The generated implementation.
let expanded = quote! {
impl #impl_generics std::ops::Drop for #name #ty_generics {
fn drop(&mut self) {
self.zeroize()
}
}
};
expanded.into()
}
|
#[doc = "Reader of register C12ISR"]
pub type R = crate::R<u32, super::C12ISR>;
#[doc = "Reader of field `TEIF12`"]
pub type TEIF12_R = crate::R<bool, bool>;
#[doc = "Reader of field `CTCIF12`"]
pub type CTCIF12_R = crate::R<bool, bool>;
#[doc = "Reader of field `BRTIF12`"]
pub type BRTIF12_R = crate::R<bool, bool>;
#[doc = "Reader of field `BTIF12`"]
pub type BTIF12_R = crate::R<bool, bool>;
#[doc = "Reader of field `TCIF12`"]
pub type TCIF12_R = crate::R<bool, bool>;
#[doc = "Reader of field `CRQA12`"]
pub type CRQA12_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 0 - Channel x transfer error interrupt flag This bit is set by hardware. It is cleared by software writing 1 to the corresponding bit in the DMA_IFCRy register."]
#[inline(always)]
pub fn teif12(&self) -> TEIF12_R {
TEIF12_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Channel x Channel Transfer Complete interrupt flag This bit is set by hardware. It is cleared by software writing 1 to the corresponding bit in the DMA_IFCRy register. CTC is set when the last block was transferred and the channel has been automatically disabled. CTC is also set when the channel is suspended, as a result of writing EN bit to 0."]
#[inline(always)]
pub fn ctcif12(&self) -> CTCIF12_R {
CTCIF12_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Channel x block repeat transfer complete interrupt flag This bit is set by hardware. It is cleared by software writing 1 to the corresponding bit in the DMA_IFCRy register."]
#[inline(always)]
pub fn brtif12(&self) -> BRTIF12_R {
BRTIF12_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Channel x block transfer complete interrupt flag This bit is set by hardware. It is cleared by software writing 1 to the corresponding bit in the DMA_IFCRy register."]
#[inline(always)]
pub fn btif12(&self) -> BTIF12_R {
BTIF12_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - channel x buffer transfer complete"]
#[inline(always)]
pub fn tcif12(&self) -> TCIF12_R {
TCIF12_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 16 - channel x request active flag"]
#[inline(always)]
pub fn crqa12(&self) -> CRQA12_R {
CRQA12_R::new(((self.bits >> 16) & 0x01) != 0)
}
}
|
use thiserror::Error;
use std::fs::File;
use anyhow::anyhow;
use log::info;
#[derive(Error, Debug)]
pub enum ManifestValidationError {
#[error("manifest has no data sources")]
NoDataSources,
#[error("manifest cannot index data from different networks")]
MultipleNetworks,
}
// Lazily load file from local
pub fn load_file(
config_url: String,
) {
let f = File::open(config_url).unwrap();
let data: serde_yaml::Value = serde_yaml::from_reader(f).unwrap();
let schema = data["schema"]["file"]
.as_str()
.map(|s| s.to_string())
.ok_or(anyhow!("Could not find schema file"));
info!("Schema: {}", schema.unwrap());
let kind = data["dataSources"][0]["kind"]
.as_str()
.map(|s| s.to_string())
.ok_or(anyhow!("Could not find network kind"));
info!("Kind: {}", kind.unwrap());
} |
use byte_unit::Byte;
use std::{
fmt::{self, Display},
fs::{self, File},
io::{self, Write},
path::Path,
time::Instant,
};
pub trait WriteAsCSV {
fn write_as_csv<W: Write>(&self, writer: &mut W) -> io::Result<()>;
fn write_hdr_as_csv<W: Write>(writer: &mut W) -> io::Result<()>;
}
impl WriteAsCSV for () {
fn write_as_csv<W: Write>(&self, _: &mut W) -> io::Result<()> { Ok(()) }
fn write_hdr_as_csv<W: Write>(_: &mut W) -> io::Result<()> { Ok(()) }
}
pub struct Benchmark<'a, T, O: WriteAsCSV> {
name: &'a str,
params: &'a [Param<T>],
iterations: usize,
setup: Option<Box<dyn Fn(&T) -> ()>>,
functions: Vec<NamedFunction<'a, T, O>>,
teardown: Option<Box<dyn Fn(&T) -> ()>>,
}
impl<'a, T, O: WriteAsCSV> Benchmark<'a, T, O> {
pub fn new(name: &'a str, params: &'a [Param<T>], iterations: usize) -> Benchmark<'a, T, O> {
Benchmark {
name,
params,
iterations,
setup: None,
functions: Vec::new(),
teardown: None,
}
}
pub fn with_func<F: Fn(&T) -> O + 'static>(
name: &'a str,
params: &'a [Param<T>],
iterations: usize,
function_name: &'a str,
func: F,
) -> Benchmark<'a, T, O> {
let mut benchmark = Benchmark::new(name, params, iterations);
let function = NamedFunction {
name: function_name,
function: Box::new(func),
};
benchmark.functions.push(function);
benchmark
}
pub fn setup<F: Fn(&T) -> () + 'static>(self, func: F) -> Self {
let mut benchmark = self;
benchmark.setup = Some(Box::new(func));
benchmark
}
pub fn add_func<F: Fn(&T) -> O + 'static>(self, function_name: &'a str, func: F) -> Self {
let mut benchmark = self;
let function = NamedFunction {
name: function_name,
function: Box::new(func),
};
benchmark.functions.push(function);
benchmark
}
pub fn teardown<F: Fn(&T) -> () + 'static>(self, func: F) -> Self {
let mut benchmark = self;
benchmark.teardown = Some(Box::new(func));
benchmark
}
pub fn benchmark(&self) -> BenchmarkResult<'a, O> {
let num_of_samples = self.params.len() * self.functions.len();
let mut res = BenchmarkResult::new(self.name, num_of_samples);
println!(
"Running benchmark '{}' with {} param(s) for {} function(s) and {} iteration(s)\n",
self.name,
self.params.len(),
self.functions.len(),
self.iterations
);
for f in &self.functions {
let func = &f.function;
for p in self.params {
let mut run_res = BenchmarkResult::new(self.name, self.params.len());
println!("\t{} / {}", f.name, p.display_name);
for _ in 0..self.iterations {
if let Some(ref setup) = self.setup {
setup(&p.value)
}
let (time_ns, f_res) = measure_ns(|| func(&p.value));
run_res.add(Sample::new(f.name, &p.name, time_ns, f_res));
if let Some(ref teardown) = self.teardown {
teardown(&p.value)
}
}
let summary = run_res.samples.summary();
println!("\t\t\t{}", summary);
println!("\t\t\t{}", ThroughputSummary::new(&summary, p.amount));
res.samples.append(&mut run_res.samples);
}
}
println!(
"\nReceived {} sample(s) (expected {})",
res.samples.len(),
self.params.len() * self.functions.len() * self.iterations
);
res
}
pub fn name(&self) -> &str { self.name }
pub fn params(&self) -> &[Param<T>] { self.params }
}
fn measure_ns<O, F: Fn() -> O>(func: F) -> (u128, O) {
let start = Instant::now();
let res = func();
let elapsed = start.elapsed().as_nanos();
(elapsed, res)
}
pub struct Param<T> {
name: String,
/// value to use for displaying this parameter
display_name: String,
amount: usize,
value: T,
}
impl<T> Param<T> {
pub fn new<S: Into<String>>(name: S, display_name: S, amount: usize, value: T) -> Param<T> {
Param {
name: name.into(),
display_name: display_name.into(),
amount,
value,
}
}
pub fn name(&self) -> &str { &self.name }
pub fn amount(&self) -> usize { self.amount }
pub fn value(&self) -> &T { &self.value }
}
pub struct NamedFunction<'a, T, O: WriteAsCSV> {
name: &'a str,
function: Box<dyn Fn(&T) -> O>,
}
#[derive(Debug)]
pub struct BenchmarkResult<'a, O: WriteAsCSV> {
pub benchmark_name: &'a str,
pub samples: Vec<Sample<'a, O>>,
}
impl<'a, O: WriteAsCSV> BenchmarkResult<'a, O> {
pub fn new(benchmark_name: &'a str, num: usize) -> BenchmarkResult<'a, O> {
let samples = Vec::with_capacity(num);
BenchmarkResult {
benchmark_name,
samples,
}
}
pub fn write_results<P: AsRef<Path>>(&self, results_dir: P) -> io::Result<()> {
fs::create_dir_all(&results_dir)?;
let mut output_path = results_dir.as_ref().join(self.benchmark_name);
output_path.set_extension("csv.gz");
println!("Writing benchmark results to \"{}\"", &output_path.to_string_lossy());
let file = File::create(output_path)?;
let mut gzip = flate2::write::GzEncoder::new(file, flate2::Compression::default());
self.write_as_csv(&mut gzip)?;
gzip.finish()?;
Ok(())
}
fn add(&mut self, sample: Sample<'a, O>) { self.samples.push(sample) }
fn write_as_csv<W: Write>(&self, writer: &mut W) -> io::Result<()> {
write!(writer, "method,file_size,time,")?;
O::write_hdr_as_csv(writer)?;
writeln!(writer)?;
for s in &self.samples {
write!(writer, "{},{},{},", s.name, s.param, s.time_ns)?;
s.extra.write_as_csv(writer)?;
writeln!(writer)?;
}
Ok(())
}
}
#[derive(Debug)]
pub struct Sample<'a, O: WriteAsCSV> {
pub name: &'a str,
pub param: &'a str,
pub time_ns: u128,
pub extra: O,
}
impl<'a, O: WriteAsCSV> Sample<'a, O> {
pub fn new(name: &'a str, param: &'a str, time_ns: u128, extra: O) -> Sample<'a, O> {
Sample {
name,
param,
time_ns,
extra,
}
}
}
pub trait Summarize {
fn summary(&self) -> Summary;
}
#[derive(Debug)]
pub struct Summary {
pub min: f64,
pub max: f64,
pub mean: f64,
pub sd: f64,
}
impl Summary {
pub fn new(min: f64, max: f64, mean: f64, sd: f64) -> Summary { Summary { min, max, mean, sd } }
}
impl Display for Summary {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let min = self.min / 1_000_000f64; // run time in millisecounds
let mean = self.mean / 1_000_000f64;
let max = self.max / 1_000_000f64;
let sd = self.sd / 1_000_000f64;
write!(
f,
"[min:{:10.2} ms, mean:{:10.2} ms, max:{:10.2} ms, sd:{:10.2}]",
min, mean, max, sd
)
}
}
impl<O: WriteAsCSV> Summarize for Vec<Sample<'_, O>> {
fn summary(&self) -> Summary {
use statrs::statistics::Statistics;
let values: Vec<f64> = self.iter().map(|x| x.time_ns as f64).collect();
let min: f64 = values.as_slice().min();
let max: f64 = values.as_slice().max();
let mean: f64 = values.as_slice().mean();
let sd: f64 = values.as_slice().std_dev();
Summary::new(min, max, mean, sd)
}
}
#[derive(Debug)]
pub struct ThroughputSummary<'a> {
summary: &'a Summary,
amount: usize,
}
impl<'a> ThroughputSummary<'a> {
pub fn new(summary: &'a Summary, amount: usize) -> ThroughputSummary { ThroughputSummary { summary, amount } }
}
impl<'a> Display for ThroughputSummary<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let min = self.amount as f64 / (self.summary.min / 1_000_000_000f64); // run time in secounds
let mean = self.amount as f64 / (self.summary.mean / 1_000_000_000f64);
let max = self.amount as f64 / (self.summary.max / 1_000_000_000f64);
let bytes_min = Byte::from_bytes(min as u128);
let bytes_mean = Byte::from_bytes(mean as u128);
let bytes_max = Byte::from_bytes(max as u128);
write!(
f,
"[min:{:>11}/s, mean:{:>11}/s, max:{:>11}/s]",
bytes_min.get_appropriate_unit(true).format(2),
bytes_mean.get_appropriate_unit(true).format(2),
bytes_max.get_appropriate_unit(true).format(2)
)
}
}
|
extern crate ggez;
extern crate rustic;
use rustic::application::*;
use rustic::sop::*;
fn main() {
let mut builder = ApplicationBuilder::new("empty", "rustic");
builder.stories(vec![
fade_out(3.0, ggez::graphics::BLACK),
fade_in(3.0, ggez::graphics::BLACK),
quit_state(),
]);
let app = builder.build();
app.run();
}
|
//! Memory accesses analysis.
use crate::Gpu;
use binary_heap_plus::BinaryHeap;
use fxhash::{FxHashMap, FxHashSet};
use itertools::Itertools;
use log::trace;
use num::Integer;
use telamon::device::{Context, Device};
use telamon::ir;
use telamon::model::size;
use telamon::search_space::*;
use utils::*;
// TODO(model): the pressure changes depending on the list of outer dimensions. Try to
// take this into account be computing the pressure incrementatly when applying levels.
/// Result of the memory analysis for one instruction. Vector instructions are considered
/// as a single instance and predicated dimensions are not considered to compute the
/// average pressure.
#[derive(Default, Debug)]
pub struct MemInfo {
/// The proportion of instruction that produce a L2 miss.
pub l2_miss_ratio: f64,
/// The number of L1 cache line loaded for each instruction.
pub l1_coalescing: f64,
/// The number of L2 cache line loaded for each instruction.
pub l2_coalescing: f64,
/// The number of times the instruction must be issued to be completed.
pub issue_replays: f64,
/// The number of memory transactions needed to complete the instruction.
pub memory_transactions: f64,
/// Indicates if the instruction accesses shared memory.
pub access_shared: bool,
/// Indicates if the instruction accesses global memory.
pub access_global: bool,
}
/// Runs the memory analysis.
pub fn analyse(
space: &SearchSpace,
gpu: &Gpu,
inst: &ir::Instruction,
sizes: &FxHashMap<ir::DimId, size::Range>,
ctx: &dyn Context,
) -> MemInfo {
let flag = space.domain().get_inst_flag(inst.id());
let info = match *inst.operator() {
ir::Operator::Ld(_, _, ref pattern) | ir::Operator::St(_, _, _, ref pattern) => {
let mem_space = access_pattern_space(pattern, space);
let is_shared = mem_space.is(MemSpace::SHARED);
match pattern {
_ if flag.intersects(InstFlag::CACHE_READ_ONLY) => {
unknown_info(inst, is_shared, gpu)
}
ir::AccessPattern::Unknown { .. } => unknown_info(inst, is_shared, gpu),
ir::AccessPattern::Tensor { ref dims, .. } => {
info(space, inst, dims, is_shared, gpu, sizes, ctx)
}
}
}
ir::Operator::TmpLd(.., mem) | ir::Operator::TmpSt(.., mem) => {
let mem_space = space.domain().get_mem_space(mem);
let is_shared = mem_space.is(MemSpace::SHARED);
unknown_info(inst, is_shared, gpu)
}
_ => panic!(),
};
trace!("mem_info for {:?}: {:?}", inst.id(), info);
info
}
/// Computes the `MemInfo` when the access pattern is unknown.
fn unknown_info(
inst: &ir::Instruction,
is_shared_access: Trivalent,
gpu: &Gpu,
) -> MemInfo {
let mut info = MemInfo::default();
if is_shared_access.maybe_true() {
info.memory_transactions = 1.0;
info.access_shared = true;
}
if is_shared_access.maybe_false() {
info.l1_coalescing = 1.0 / f64::from(gpu.wrap_size);
info.l2_coalescing = 1.0 / f64::from(gpu.wrap_size);
info.memory_transactions = 1.0;
info.access_global = true;
}
// Starting with Maxwell, memory replays are handled by the individual units and do not
// use extra issue slots.
//
// https://stackoverflow.com/questions/57492400/issued-load-store-instructions-for-replay
info.issue_replays = if gpu.sm_major >= 5 {
// Each single "instruction" occupies a n-th of an issue slot for a n-way vector
// instruction, so we need to divide the issue pressure by the vectorization factor -- or
// at least that is my understanding, anyways.
let max_vectorization = gpu
.max_vectorization(inst.operator())
.iter()
.product::<u32>();
1. / f64::from(max_vectorization)
} else {
info.memory_transactions
};
info
}
/// Computes the memory access info for a given memory access.
// TODO(model): The model can decrease if the maximal number decreases: the replay
// assume a full wrap if possible. This is correct as if the wrap is not full the
// waste ratio will repeat the replay factor to achieve the same number. However,
// it makes debugging the performance model harder.
fn info(
space: &SearchSpace,
inst: &ir::Instruction,
dims: &FxHashMap<ir::DimId, ir::PartialSize>,
is_shared_access: Trivalent,
gpu: &Gpu,
sizes: &FxHashMap<ir::DimId, size::Range>,
ctx: &dyn Context,
) -> MemInfo {
let mut info = MemInfo::default();
let thread_dims = tensor_thread_dims(space, inst, dims, sizes, ctx);
trace!("thread dims: {:?}", thread_dims);
info.memory_transactions = std::f64::INFINITY;
if is_shared_access.maybe_true() {
let replay =
shared_memory_transactions(thread_dims.clone(), dims, sizes, space, gpu);
info.memory_transactions = f64::min(replay, info.memory_transactions);
info.access_shared = true;
}
if is_shared_access.maybe_false() {
let (l1_coalescing, l2_coalescing, replay) =
global_coalescing(thread_dims, space, gpu);
info.l1_coalescing = l1_coalescing;
info.l2_coalescing = l2_coalescing;
info.memory_transactions = f64::min(replay, info.memory_transactions);
info.access_global = true;
// TODO(model): compute the miss ratio
}
// Starting with Maxwell, memory replays are handled by the individual units and do not
// use extra issue slots.
//
// https://stackoverflow.com/questions/57492400/issued-load-store-instructions-for-replay
info.issue_replays = if gpu.sm_major >= 5 {
// Each single "instruction" occupies a n-th of an issue slot for a n-way vector
// instruction, so we need to divide the issue pressure by the vectorization factor -- or
// at least that is my understanding, anyways.
let max_vectorization = gpu
.max_vectorization(inst.operator())
.iter()
.product::<u32>();
let vectorization = dims
.iter()
.filter(|&(&d, _)| space.domain().get_dim_kind(d).intersects(DimKind::VECTOR))
.map(|(d, _)| (sizes[&d].max as u32).min(max_vectorization))
.max()
.unwrap_or(1);
1. / f64::from(vectorization)
} else {
info.memory_transactions
};
info
}
#[derive(Debug, Copy, Clone)]
struct ThreadDimInfo {
id: ir::DimId,
is_active_thread: bool,
/// Indicates this is the part between the minimal and the maximal size of a dimension
/// that is already accounted up to the minimal size.
is_partial_dim: bool,
size: size::Range,
stride: size::Range,
stride_factors: size::FactorRange,
}
impl ThreadDimInfo {
/// Returns part of the dimension size handled by `Self`.
fn partial_size(&self) -> u64 {
// The `+1` is needed because the `0` index on the partial dimension corresponds
// to the iterations on the base part. Thus we need `max - min` more indexes to
// represent the partial dimension size.
if self.is_partial_dim {
self.size.max - self.size.min + 1
} else {
self.size.min
}
}
}
/// Returns the size and stride of thread dimensions for a tensor access pattern and
/// sort them in an optimal or better-than-optimal order. For two dimensions `d0`, `d1`
/// such that `d0.stride` < `d1.stride` and `such that, d0` can be nested inside `d1` the
/// order guarantees that `d0 < d1`.
///
/// Dimensions with a non-constrained size are split between a dimension for the minimal
/// size and a partial dimension for the rest.
fn tensor_thread_dims(
space: &SearchSpace,
inst: &ir::Instruction,
tensor_dims: &FxHashMap<ir::DimId, ir::PartialSize>,
sizes: &FxHashMap<ir::DimId, size::Range>,
ctx: &dyn Context,
) -> Vec<ThreadDimInfo> {
let external_dims = external_thread_dims(inst, space);
let dims = inst
.iteration_dims()
.iter()
.flat_map(
|&dim| match space.domain().get_dim_kind(dim).is(DimKind::THREAD) {
Trivalent::False => None,
Trivalent::Maybe => Some((dim, false)),
Trivalent::True => Some((dim, true)),
},
)
.chain(external_dims);
let mut out = Vec::new();
for (id, is_active_thread) in dims {
let size = sizes[&id];
let stride_size = tensor_dims.get(&id);
let stride = stride_size
.map(|s| size::bounds(s, space, ctx))
.unwrap_or(size::Range::ZERO);
let stride_factors = stride_size
.map(|s| size::factors(s, space, ctx))
.unwrap_or(size::FactorRange::ZERO);
let info = ThreadDimInfo {
is_partial_dim: false,
stride,
id,
is_active_thread,
stride_factors,
size,
};
if !size.is_constrained() {
out.push(ThreadDimInfo {
is_partial_dim: true,
..info
});
}
out.push(info);
}
out
}
/// Returns the thread dimensions that are mapped outside an instruction but not active
/// under this instruction. The returned boolean indicates if the thread dimension cannot
/// be mapped to an active dimension and if the dimension is predicated.
fn external_thread_dims<'a>(
inst: &'a ir::Instruction,
space: &'a SearchSpace,
) -> impl Iterator<Item = (ir::DimId, bool)> + 'a {
space.ir_instance().thread_dims().flat_map(move |dim| {
let is_mapped = inst
.iteration_dims()
.iter()
.map(|&other| {
if space.ir_instance().dim(other).possible_sizes().is_none() {
return Trivalent::False;
}
if dim.id() == other {
return Trivalent::True;
}
let mapping = space.domain().get_thread_mapping(dim.id(), other);
mapping.is(ThreadMapping::MAPPED)
})
.fold(Trivalent::False, |l, r| l | r);
match is_mapped {
Trivalent::True => None,
Trivalent::Maybe => Some((dim.id(), false)),
Trivalent::False => Some((dim.id(), true)),
}
})
}
/// Sort thread dimensions in an optimal or better-than-optimal order. The order may not
/// respect dependencies since we don't know the exact order and it would be too costly to
/// explore all of them (exponential). Instead we compute the minimal number of inner
/// thread dimension for each dimension and ensure this amount is respected.
///
/// Because we only support tensor accesses, bigger strides are multiples of smaller
/// strides. Thus smaller stride will lead to less replays.
fn sort_thread_dims(
dims: Vec<ThreadDimInfo>,
use_gcd: bool,
space: &SearchSpace,
gpu: &Gpu,
) -> Vec<ThreadDimInfo> {
let sure_thread_dims = dims
.iter()
.filter(|d| d.is_active_thread)
.map(|d| d.id)
.collect_vec();
let cmp = |x: &ThreadDimInfo, y: &ThreadDimInfo| cmp_thread_dims(x, y, use_gcd, gpu);
let mut heap = BinaryHeap::with_capacity_by(dims.len(), cmp);
let mut dim_groups: FxMultiHashMap<_, _> = dims
// Do not account for partial dims
.into_iter()
.map(|d| {
let num_inner = sure_thread_dims
.iter()
.filter(|&&other| {
if other == d.id {
return false;
}
let mapping = space.domain().get_thread_mapping(d.id, other);
mapping.is(ThreadMapping::MAPPED_OUT).is_true()
})
.count();
(num_inner, d)
})
.collect();
heap.extend(dim_groups.remove(&0));
let mut out = Vec::new();
let mut total_size = 1;
while let Some(d) = heap.pop() {
if d.is_partial_dim {
total_size = (total_size / d.size.min) * d.size.max
} else {
total_size *= d.size.min;
}
out.push(d);
heap.extend(dim_groups.remove(&out.len()));
if total_size >= u64::from(gpu.wrap_size) {
break;
}
}
trace!("sorted dims: {:?}", out);
out
}
/// Indicates which loop nest order should be considered to minimize replays.
fn cmp_thread_dims(
lhs: &ThreadDimInfo,
rhs: &ThreadDimInfo,
use_gcd: bool,
gpu: &Gpu,
) -> std::cmp::Ordering {
let (lhs_val, rhs_val) = if use_gcd {
let replay_distance = u64::from(gpu.wrap_size * gpu.shared_bank_stride);
let lhs_val = lhs.stride_factors.gcd.gcd(&replay_distance);
let rhs_val = rhs.stride_factors.gcd.gcd(&replay_distance);
(lhs_val, rhs_val)
} else {
(lhs.stride.min, rhs.stride.min)
};
rhs_val
.cmp(&lhs_val)
.then(rhs.is_partial_dim.cmp(&lhs.is_partial_dim))
}
/// Returns the offset of memory accesses for each thread in a wrap. The offset is
/// relative to the access of the first thread.
fn wrap_access_offsets(
thread_dims: &[ThreadDimInfo],
use_gcd: bool,
gpu: &Gpu,
) -> Vec<u64> {
let mut offsets = Vec::with_capacity(gpu.wrap_size as usize);
offsets.push(0);
let mut indexes = vec![0; thread_dims.len()];
while offsets.len() < gpu.wrap_size as usize {
let mut incr = true;
for (i, dim) in thread_dims.iter().enumerate() {
if incr {
incr = increment_index(i, thread_dims, &mut indexes);
}
if dim.is_partial_dim && indexes[i] > 0 {
// TODO(cc_perf): save the index of real dimensions instead of recomputing.
let real_pos = thread_dims[0..i].iter().position(|d| d.id == dim.id);
let real_pos = unwrap!(real_pos, "partial dim ordered before its base");
assert!(!thread_dims[real_pos].is_partial_dim);
indexes[real_pos] = thread_dims[real_pos].size.min - 1;
}
}
let offset = thread_dims
.iter()
.enumerate()
.map(|(i, dim)| {
let stride = if use_gcd {
dim.stride_factors.gcd
} else {
dim.stride.min
};
indexes[i] * stride
})
.sum();
if incr {
break;
} // We reached the end of all loops.
offsets.push(offset);
}
offsets
}
/// Increments the index at the given position modulo the dimension size. Indicates if
/// the next index should also be incremented.
fn increment_index(pos: usize, dims: &[ThreadDimInfo], indexes: &mut [u64]) -> bool {
indexes[pos] += 1;
if indexes[pos] < dims[pos].partial_size() {
false
} else {
indexes[pos] = 0;
true
}
}
/// Compute the replay factor caused by shared memory accesses.
fn shared_memory_transactions(
thread_dims: Vec<ThreadDimInfo>,
tensor_dims: &FxHashMap<ir::DimId, ir::PartialSize>,
dim_sizes: &FxHashMap<ir::DimId, size::Range>,
space: &SearchSpace,
gpu: &Gpu,
) -> f64 {
let thread_dims = sort_thread_dims(thread_dims, true, space, gpu);
// Handle replays caused by offsets.
let mut offsets = vec![wrap_access_offsets(&thread_dims, true, gpu)];
// Handle the case where the last dimension may not be active. In that case we also
// try without the dimension as considering it as a thread may increase the pressure.
// Only the last dimension needs sepcial handling as other dimensions are fully
// contained into a wrap.
if thread_dims
.last()
.map(|d| !d.is_active_thread)
.unwrap_or(false)
{
offsets.push(wrap_access_offsets(
&thread_dims[0..thread_dims.len() - 1],
true,
gpu,
));
}
let replay = offsets
.iter()
.map(|offsets| offsets_shared_memory_transactions(offsets, gpu))
.min()
.unwrap();
// Handle the case where a single thread must access two banks.
let vector_replay = tensor_dims
.iter()
.flat_map(|(&d, stride)| stride.as_int().map(|s| (d, s)))
.filter(|&(d, _)| space.domain().get_dim_kind(d).intersects(DimKind::VECTOR))
.map(|(d, stride)| dim_sizes[&d].min as u32 * stride)
.map(|size| div_ceil(size, gpu.shared_bank_stride))
.min()
.unwrap_or(1);
let replay = std::cmp::max(replay, vector_replay);
trace!("shared_replay: {}", replay);
f64::from(replay)
}
/// Computes the replay factor for a list of shared memory access.
fn offsets_shared_memory_transactions(offsets: &[u64], gpu: &Gpu) -> u32 {
// We only need to account for hits on the first bank. Other banks will have a smaller
// replay factor.
let mut hits: FxHashSet<_> = std::iter::once(0).collect();
for &offset in offsets {
let num_bank_stride = offset / u64::from(gpu.shared_bank_stride);
let (hit_id, rem) = num_bank_stride.div_rem(&(u64::from(gpu.wrap_size)));
if rem == 0 {
hits.insert(hit_id);
}
}
hits.len() as u32
}
/// Computes the L1, L2 coalescing and replay factor for a global memory access.
fn global_coalescing(
thread_dims: Vec<ThreadDimInfo>,
space: &SearchSpace,
gpu: &Gpu,
) -> (f64, f64, f64) {
let thread_dims = sort_thread_dims(thread_dims, false, space, gpu);
let offsets = wrap_access_offsets(&thread_dims, true, gpu);
trace!("global offsets: {:?}", offsets);
let (mut l1_coalescing, mut l2_coalescing, mut replay) =
offsets_global_coalescing(&offsets, gpu);
if thread_dims
.last()
.map(|d| !d.is_active_thread)
.unwrap_or(false)
{
let offsets =
wrap_access_offsets(&thread_dims[0..thread_dims.len() - 1], true, gpu);
trace!("global offsets (last inactive): {:?}", offsets);
let (l1, l2, r) = offsets_global_coalescing(&offsets, gpu);
l1_coalescing = f64::min(l1_coalescing, l1);
l2_coalescing = f64::min(l2_coalescing, l2);
replay = f64::min(replay, r);
}
(l1_coalescing, l2_coalescing, replay)
}
/// Computes the L1, L2 coalescing and replay factor for a global memory access.
fn offsets_global_coalescing(offsets: &[u64], gpu: &Gpu) -> (f64, f64, f64) {
let mut l1_lines: FxHashSet<_> = std::iter::once(0).collect();
let mut l2_lines: FxHashSet<_> = std::iter::once(0).collect();
// Compute the lines accessed by each tread in a wrap.
for &offset in offsets {
l1_lines.insert(offset / u64::from(gpu.l1_cache_sector));
l2_lines.insert(offset / u64::from(gpu.l2_cache_line));
}
trace!(
"global_replay: {} (size: {})",
l1_lines.len(),
offsets.len()
);
let l1_coalescing = l1_lines.len() as f64 / offsets.len() as f64;
let l2_coalescing = l2_lines.len() as f64 / offsets.len() as f64;
(l1_coalescing, l2_coalescing, l1_lines.len() as f64)
}
/*
/// Computes the miss ratio for L2 cache.
fn miss_ratios(inst: &ir::Instruction,
pattern: &ir::AccessPattern,
space: &SearchSpace,
gpu: &cuda::Gpu,
sizes: &FxHashMap<ir::DimId, u32>) -> f64 {
// Compute MSHR, without taking other accesses into account.
// (1) Find accesses to the sane memory block.
let other_accesses = space.ir_instance().insts().filter(|other_inst| {
let other_mem = other_inst.operator().mem_access_pattern().map(|x| x.mem_block());
*other_inst != inst && other_mem == Some(pattern.mem_block())
}).collect_vec();
// (2) Find the MSHR cache hit ratio on each active dimension.
let mshr_miss = space.ir_instance().dims().filter(|&dim| {
let kind = space.domain().get_dim_kind(dim.id());
space.domain().get_order(dim.stmt_id(), inst.stmt_id()) == Order::ACTIVE_OUT
&& !(DimKind::BLOCK | DimKind::VECTOR).contains(kind)
}).map(|dim| {
// fixme: use other accesses
let has_other_access = false; /*other_accesses.iter().any(|other| {
fun.order(other.stmt_id(), dim.stmt_id()).intersects(Order::INNER)
});*/
if has_other_access {
// TODO(model): better handle other accesses to the same memory block
0.0
} else {
let size = sizes[&dim.id()];
let stride = eval_stride(pattern, dim.id(), sizes).unwrap_or(0);
let reuse_distance = reuse_distance(inst, dim, pattern, space, sizes, gpu);
let mshr_miss = if reuse_distance > gpu.mshr_per_smx {
1.0
} else if size == 1 {
0.0
} else {
let num_lines = 1 + (stride*(size as i32-1))/gpu.l1_cache_sector as i32;
f64::min(num_lines as f64/size as f64, 1.0)
};
trace!("dim: {:?}, kind: {:?}, reuse_distance: {}, stride: {}, mshr_miss: {}",
dim, space.domain().get_dim_kind(dim.id()), reuse_distance, stride, mshr_miss);
mshr_miss
}
}).product();
// TODO(model): take other accesses into account.
// TODO(model): compute L2 miss
// TODO(model): take flags into account.
// TODO(model): handle block dimensions.
trace!("Inst {:?} = mshr_miss: {}", inst.id(), mshr_miss);
// fixme: does not account for reuse in the first iterations
0.0
}
/// Computes the reuse distance between two iterations of `dim` for the given pattern.
fn reuse_distance(inst: &ir::Instruction,
dim: &ir::Dimension,
pattern: &ir::AccessPattern,
space: &SearchSpace,
sizes: &FxHashMap<ir::DimId, u32>,
gpu: &cuda::Gpu) -> u32 {
space.ir_instance().dims().filter(|&other_dim| {
other_dim.id() != dim.id() &&
space.domain().get_order(other_dim.stmt_id(), inst.stmt_id()) == Order::ACTIVE_OUT &&
dynamic_nesting(dim, other_dim, space) == Some(Ordering::Greater)
}).map(|other_dim| {
let stride = eval_stride(pattern, other_dim.id(), sizes).unwrap_or(0) as u32;
let size = sizes[&other_dim.id()] as u32;
1 + std::cmp::min(size - 1, stride*(size-1)/gpu.l1_cache_sector)
}).product::<u32>() - 1
}
/// Evaluate the stride of an access pattern of a given dimension.
fn eval_stride(pattern: &ir::AccessPattern,
dim: ir::DimId,
sizes: &FxHashMap<ir::DimId, u32>) -> ir::Stride {
match *pattern {
ir::AccessPattern::Unknown { .. } => ir::Stride::Unknown,
ir::AccessPattern::Tensor { ref stride, ref dims, .. } => {
let mut it = dims.iter().skip_while(|other| **other != dim);
if it.next().is_some() {
ir::Stride::Int(it.map(|d| sizes[d] as i32).product::<i32>() * stride)
} else {
ir::Stride::Int(0)
}
},
}
}
/// Compare the nesting of two dimension in the dynamic schedule. Yeilds a valid partial order.
fn dynamic_nesting(lhs: &ir::Dimension, rhs: &ir::Dimension, space: &SearchSpace)
-> Option<Ordering> {
if lhs.id() == rhs.id() { return Some(Ordering::Equal); }
let order = space.domain().get_order(lhs.stmt_id(), rhs.stmt_id());
let lhs_kind = space.domain().get_dim_kind(lhs.id());
let rhs_kind = space.domain().get_dim_kind(rhs.id());
let lhs_is_thread = lhs_kind.is(DimKind::THREAD);
let rhs_is_thread = rhs_kind.is(DimKind::THREAD);
let lhs_is_vector = lhs_kind.is(DimKind::VECTOR);
let rhs_is_vector = rhs_kind.is(DimKind::VECTOR);
match (lhs_is_thread, rhs_is_thread, lhs_is_vector, rhs_is_vector) {
// Handle ordering with vectors
(_, _, Trivalent::True, _) => Some(Ordering::Less),
(_, _, _, Trivalent::True) => Some(Ordering::Greater),
// Thread/Non-Thread ordering
(Trivalent::True, Trivalent::False, _, Trivalent::Maybe) => None,
(Trivalent::True, Trivalent::False, _, Trivalent::False) => Some(Ordering::Less),
// Non-Thread/Thread ordering
(Trivalent::False, Trivalent::True, Trivalent::Maybe, _) => None,
(Trivalent::False, Trivalent::True, Trivalent::False, _) => Some(Ordering::Greater),
// Non-Thread/Non-Thread and Thread/Thread ordering
(Trivalent::False, Trivalent::False, _, _) |
(Trivalent::True, Trivalent::True, _, _) => {
// Order per nesting order.
if order.is(Order::INNER).is_true() { Some(Ordering::Less) }
else if order.is(Order::OUTER).is_true() { Some(Ordering::Greater) }
else { None }
},
// In some cases, we can't say anything.
(_, Trivalent::Maybe, _, _) |
(Trivalent::Maybe, _, _, _) => None
}
}
*/
#[cfg(test)]
#[cfg(feature = "real_gpu")]
#[allow(clippy::float_cmp)]
mod tests {
use super::*;
use crate::{Context, Executor, Gpu};
use env_logger;
use std::sync::Arc;
use telamon::device::Device;
use telamon::model::size::Range;
use telamon::search_space::Order;
use telamon::{helper, ir};
/// Generates function with a load in two thread dimensions, with non-coalesced
/// accessed on the first one.
fn gen_function<'a>(
signature: Arc<ir::Signature>,
gpu: &'a Gpu,
d0_d1_order: Order,
) -> (SearchSpace, ir::InstId, FxHashMap<ir::DimId, Range>) {
let mut builder = helper::Builder::new(signature, Arc::new(gpu.clone()));
let t = ir::Type::F(32);
let size = builder.cst_size(gpu.wrap_size);
let addr_base = builder.cast(&0i64, gpu.pointer_type(MemSpace::GLOBAL));
let d0 = builder.open_dim_ex(size.clone(), DimKind::THREAD);
let d1 = builder.open_dim_ex(size.clone(), DimKind::THREAD);
let addr = builder.mad(&d0, &(gpu.l1_cache_sector as i32), &addr_base);
let stride = ir::Size::new_const(gpu.l1_cache_sector);
let pattern = builder.tensor_access_pattern(None, vec![(&d0, stride)]);
let ld = builder.ld_ex(t, &addr, pattern, InstFlag::CACHE_GLOBAL);
builder.order(&d0, &d1, d0_d1_order);
let mut size_map = FxHashMap::default();
let wrap_size = Range {
min: gpu.wrap_size.into(),
max: gpu.wrap_size.into(),
};
size_map.insert(d0[0], wrap_size);
size_map.insert(d1[0], wrap_size);
(builder.get(), ld, size_map)
}
/// Generates a dummy signature.
fn gen_signature() -> ir::Signature {
ir::Signature {
name: String::new(),
params: vec![],
}
}
/// Tests `MemInfo` for global loads without coalescing.
#[test]
fn global_full_coalescing() {
let _ = env_logger::try_init();
let executor = Executor::init();
let ctx = Context::new(&executor);
let gpu = Gpu::from_executor(&executor);
let base = gen_signature();
let (space, inst, size_map) = gen_function(base.into(), &gpu, Order::OUTER);
let inst = space.ir_instance().inst(inst);
let inst_info = analyse(&space, &gpu, &inst, &size_map, &ctx);
assert_eq!(inst_info.l1_coalescing, 1.0 / f64::from(gpu.wrap_size));
assert_eq!(inst_info.l2_coalescing, 1.0 / f64::from(gpu.wrap_size));
assert_eq!(inst_info.memory_transactions, 1.0);
}
/// Tests `MemInfo` for global loads with full coalescing.
#[test]
fn global_no_coalescing() {
let _ = env_logger::try_init();
let executor = Executor::init();
let ctx = Context::new(&executor);
let gpu = Gpu::from_executor(&executor);
let base = gen_signature();
let (space, inst, size_map) = gen_function(base.into(), &gpu, Order::INNER);
let inst = space.ir_instance().inst(inst);
let inst_info = analyse(&space, &gpu, &inst, &size_map, &ctx);
assert_eq!(inst_info.l1_coalescing, 1.0);
assert_eq!(inst_info.l2_coalescing, 1.0);
assert_eq!(inst_info.memory_transactions, f64::from(gpu.wrap_size));
}
fn thread_dim_info(
id: u32,
partial: bool,
min_size: u64,
max_size: u64,
stride: u64,
) -> ThreadDimInfo {
ThreadDimInfo {
id: ir::DimId(id),
is_active_thread: true,
is_partial_dim: partial,
size: size::Range {
min: min_size,
max: max_size,
},
stride: size::Range {
min: stride,
max: stride,
},
stride_factors: size::FactorRange::new_fixed(stride),
}
}
/// Tests offsets computation.
#[test]
fn offsets() {
let _ = env_logger::try_init();
let gpu = Gpu::dummy();
let big_dim_0 = thread_dim_info(0, false, 32, 32, 0);
let big_dim_1 = thread_dim_info(1, false, 32, 32, 1);
let small_dim_0 = thread_dim_info(0, false, 4, 4, 0);
let small_dim_1 = thread_dim_info(1, false, 4, 4, 1);
let offsets_big_0 = wrap_access_offsets(&[big_dim_0, big_dim_1], false, &gpu);
let offsets_big_1 = wrap_access_offsets(&[big_dim_1, big_dim_0], false, &gpu);
let offsets_small = wrap_access_offsets(&[small_dim_0, small_dim_1], false, &gpu);
assert_eq!(offsets_big_0, vec![0; 32]);
assert_eq!(offsets_big_1, (0..32).collect_vec());
assert_eq!(
offsets_small,
vec![0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]
);
}
/// Tests offsets computation, with partial dims.
#[test]
fn offsets_with_partial_dims() {
let _ = env_logger::try_init();
let gpu = Gpu::dummy();
// Create two dimensions of size [4, 6], with strides 0, 1.
let beg_0 = thread_dim_info(0, false, 2, 4, 0);
let end_0 = thread_dim_info(0, true, 2, 4, 0);
let beg_1 = thread_dim_info(1, false, 2, 4, 1);
let end_1 = thread_dim_info(1, true, 2, 4, 1);
let offsets0 = wrap_access_offsets(&[beg_1, beg_0, end_0, end_1], false, &gpu);
let offsets1 = wrap_access_offsets(&[beg_1, beg_0, end_1, end_0], false, &gpu);
assert_eq!(
offsets0,
vec![0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2, 3, 3, 3, 3]
);
assert_eq!(
offsets1,
vec![0, 1, 0, 1, 2, 2, 3, 3, 0, 1, 2, 3, 0, 1, 2, 3]
);
}
}
|
use opengl_graphics::gl::types::GLuint;
use opengl_graphics::gl;
pub struct Texture3D {
id: GLuint,
width: u32,
height: u32,
depth: u32,
}
impl Texture3D {
pub unsafe fn new(id: GLuint, width: u32, height: u32, depth: u32) -> Self {
Self {
id, width, height, depth
}
}
pub fn get_id(&self) -> GLuint {
self.id
}
pub fn empty() -> Result<Self, String> {
Self::create(&[0u8; 4], [1, 1, 1])
}
pub fn from_data(data: &[u8], dimensions: [usize; 3]) -> Result<Self, String> {
assert_eq!(data.len(), dimensions[0] * dimensions[1] * dimensions[2]);
Self::create(data, dimensions)
}
fn create(memory: &[u8], size: [usize; 3]) -> Result<Self, String> {
let mut id = 0;
let internal_format = gl::R8;
unsafe {
gl::GenTextures(1, &mut id);
gl::BindTexture(gl::TEXTURE_3D, id);
gl::TexParameteri(gl::TEXTURE_3D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as i32);
gl::TexParameteri(gl::TEXTURE_3D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
gl::TexParameteri(gl::TEXTURE_3D, gl::TEXTURE_WRAP_S, gl::MIRRORED_REPEAT as i32);
gl::TexParameteri(gl::TEXTURE_3D, gl::TEXTURE_WRAP_T, gl::MIRRORED_REPEAT as i32);
gl::TexParameteri(gl::TEXTURE_3D, gl::TEXTURE_WRAP_R, gl::MIRRORED_REPEAT as i32);
gl::TexImage3D(
gl::TEXTURE_3D,
0,
internal_format as i32,
size[0] as i32,
size[1] as i32,
size[2] as i32,
0,
gl::RED,
gl::UNSIGNED_BYTE,
memory.as_ptr() as *const _);
Ok(Self::new(id, size[0] as _, size[1] as _, size[2] as _))
}
}
pub fn update_with(&mut self, data: &[u8]) {
assert_eq!(data.len() as u32, self.width * self.height * self.depth);
let offset = [0, 0, 0];
unsafe {
gl::BindTexture(gl::TEXTURE_3D, self.id);
gl::TexSubImage3D(gl::TEXTURE_3D,
0,
offset[0],
offset[1],
offset[2],
self.width as _,
self.height as _,
self.depth as _,
gl::RED,
gl::UNSIGNED_BYTE,
data.as_ptr() as *const _);
}
}
}
impl Drop for Texture3D {
fn drop(&mut self) {
unsafe {
let ids = [self.id];
gl::DeleteTextures(1, ids.as_ptr());
}
}
}
|
use crate::ser::write::{write_varint, write_varlong};
use serde::de::{DeserializeSeed, MapAccess, Visitor};
use serde::export::Formatter;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Debug, Clone)]
pub struct VarInt(pub i32);
#[derive(Debug, Clone)]
pub struct VarLong(pub i64);
impl Serialize for VarInt {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, S::Error>
where
S: Serializer,
{
let mut bytes: Vec<u8> = Vec::new();
write_varint(&self.0, &mut bytes);
serializer.serialize_bytes(bytes.as_slice())
}
}
impl Serialize for VarLong {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, S::Error>
where
S: Serializer,
{
let mut bytes: Vec<u8> = Vec::new();
write_varlong(&self.0, &mut bytes);
serializer.serialize_bytes(bytes.as_slice())
}
}
impl<'de> Deserialize<'de> for VarLong {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct VarLongVisitor;
impl<'de> Visitor<'de> for VarLongVisitor {
type Value = VarLong;
fn expecting(&self, formatter: &mut Formatter) -> Result<(), std::fmt::Error> {
formatter.write_str("varlong")
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(VarLong(v))
}
}
deserializer.deserialize_newtype_struct("MCVARLONG", VarLongVisitor)
}
}
impl<'de> Deserialize<'de> for VarInt {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct VarIntVisitor;
impl<'de> Visitor<'de> for VarIntVisitor {
type Value = VarInt;
fn expecting(&self, formatter: &mut Formatter) -> Result<(), std::fmt::Error> {
formatter.write_str("varint")
}
fn visit_i32<E>(self, v: i32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(VarInt(v))
}
}
deserializer.deserialize_newtype_struct("MCVARINT", VarIntVisitor)
}
}
impl From<i32> for VarInt {
#[inline]
fn from(v: i32) -> Self {
VarInt(v)
}
}
impl From<i64> for VarLong {
#[inline]
fn from(v: i64) -> Self {
VarLong(v)
}
}
impl From<VarLong> for i64 {
fn from(v: VarLong) -> Self {
v.0
}
}
impl From<VarInt> for i32 {
fn from(v: VarInt) -> Self {
v.0
}
}
|
use prettytable::{table, Table};
use rust_decimal::prelude::*;
use crate::domain::collecting::{
collections::{
Collection, CollectionStats, Depot, Year, YearlyCollectionStats,
},
wish_lists::WishList,
};
pub trait AsTable {
fn to_table(self) -> Table;
}
impl AsTable for WishList {
fn to_table(mut self) -> Table {
self.sort_items();
let mut table = Table::new();
table.add_row(row![
"#",
"Brand",
"Item number",
"Cat.",
"Priority",
"Scale",
"PM",
"Description",
"Count",
"Price range",
]);
for (ind, it) in self.get_items().iter().enumerate() {
let ci = it.catalog_item();
let price_range = if let Some((min, max)) = it.price_range() {
format!("from {} to {}", min.price(), max.price())
} else {
String::from("-")
};
table.add_row(row![
ind + 1,
b -> ci.brand().name(),
ci.item_number(),
c -> ci.category(),
c -> it.priority().to_string(),
ci.scale(),
ci.power_method(),
i -> substring(ci.description()),
r -> ci.count(),
c -> price_range,
]);
}
table
}
}
impl AsTable for Depot {
fn to_table(self) -> Table {
let mut table = Table::new();
table.add_row(row![
"#",
"Class name",
"Road number",
"Series",
"Livery",
"Brand",
"Item Number",
"With decoder",
"DCC",
]);
for (id, card) in self.locomotives().iter().enumerate() {
let with_dec = if card.with_decoder() { "Y" } else { "N" };
table.add_row(row![
c -> (id + 1).to_string(),
b -> card.class_name().to_string(),
card.road_number().to_string(),
card.series().unwrap_or_default(),
card.livery().unwrap_or_default(),
card.brand().to_string(),
card.item_number().to_string(),
c -> with_dec.to_string(),
c -> card.dcc_interface()
.map(|dcc| dcc.to_string())
.unwrap_or_default(),
]);
}
table
}
}
impl AsTable for CollectionStats {
fn to_table(self) -> Table {
let mut table = Table::new();
table.add_row(row![
"Year",
"Locomotives (no.)",
"Locomotives (EUR)",
"Trains (no.)",
"Trains (EUR)",
"Passenger Cars (no.)",
"Passenger Cars (EUR)",
"Freight Cars (no.)",
"Freight Cars (EUR)",
"Total (no.)",
"Total (EUR)"
]);
for s in self.values_by_year() {
table.add_row(row![
s.year().to_string(),
r -> s.number_of_locomotives().to_string(),
r -> s.locomotives_value().to_string(),
r -> s.number_of_trains().to_string(),
r -> s.trains_value().to_string(),
r -> s.number_of_passenger_cars().to_string(),
r -> s.passenger_cars_value().to_string(),
r -> s.number_of_freight_cars().to_string(),
r -> s.freight_cars_value().to_string(),
r -> s.number_of_rolling_stocks().to_string(),
r -> s.total_value().to_string(),
]);
}
table.add_row(row![
"TOTAL",
r -> self.number_of_locomotives().to_string(),
r -> self.locomotives_value().to_string(),
r -> self.number_of_trains().to_string(),
r -> self.trains_value().to_string(),
r -> self.number_of_passenger_cars().to_string(),
r -> self.passenger_cars_value().to_string(),
r -> self.number_of_freight_cars().to_string(),
r -> self.freight_cars_value().to_string(),
r -> self.number_of_rolling_stocks().to_string(),
r -> self.total_value().to_string(),
]);
table
}
}
impl AsTable for Collection {
fn to_table(mut self) -> Table {
self.sort_items();
let mut table = Table::new();
table.add_row(row![
"#",
"Brand",
"Item number",
"Scale",
"PM",
"Cat.",
"Description",
"Count",
"Added",
"Price",
"Shop"
]);
for (ind, it) in self.get_items().iter().enumerate() {
let ci = it.catalog_item();
let purchase = it.purchased_info();
table.add_row(row![
ind + 1,
b -> ci.brand().name(),
ci.item_number(),
ci.scale(),
ci.power_method(),
c -> ci.category(),
i -> substring(ci.description()),
r -> ci.count(),
purchase.purchased_date().format("%Y-%m-%d").to_string(),
r -> purchase.price(),
purchase.shop(),
]);
}
table
}
}
fn substring(s: &str) -> String {
if s.len() < 50 {
s.to_owned()
} else {
let mut output = s[0..47].to_owned();
output.push_str("...");
output
}
}
|
use aligned::{Aligned, A8};
use core::ops::{Deref, DerefMut};
use crate::{RxDescriptor, TxDescriptor, MTU};
pub trait RingDescriptor {
fn setup(&mut self, buffer: *const u8, len: usize, next: Option<&Self>);
}
pub struct RingEntry<T: Clone + RingDescriptor> {
desc: Aligned<A8, [T; 1]>,
buffer: Aligned<A8, [u8; MTU]>,
}
impl<T: Clone + RingDescriptor> Clone for RingEntry<T> {
fn clone(&self) -> Self {
RingEntry {
desc: Aligned((*self.desc).clone()),
buffer: Aligned(*self.buffer),
}
}
}
impl<T: Clone + RingDescriptor + Default> Default for RingEntry<T> {
fn default() -> Self {
RingEntry {
desc: Aligned([T::default()]),
buffer: Aligned([0; MTU]),
}
}
}
impl RingEntry<TxDescriptor> {
/// Creates a RingEntry with a TxDescriptor.
pub const fn new() -> Self {
RingEntry {
desc: Aligned([TxDescriptor::new()]),
buffer: Aligned([0; MTU]),
}
}
}
impl RingEntry<RxDescriptor> {
/// Creates a RingEntry with a RxDescriptor.
pub const fn new() -> Self {
RingEntry {
desc: Aligned([RxDescriptor::new()]),
buffer: Aligned([0; MTU]),
}
}
}
impl<T: Clone + RingDescriptor> RingEntry<T> {
pub(crate) fn setup(&mut self, next: Option<&Self>) {
let buffer = self.buffer.as_ptr();
let len = self.buffer.len();
self.desc_mut()
.setup(buffer, len, next.map(|next| next.desc()));
}
#[inline]
pub(crate) fn desc(&self) -> &T {
&self.desc.deref()[0]
}
#[inline]
pub(crate) fn desc_mut(&mut self) -> &mut T {
&mut self.desc.deref_mut()[0]
}
#[inline]
pub(crate) fn as_slice(&self) -> &[u8] {
&(*self.buffer)[..]
}
#[inline]
pub(crate) fn as_mut_slice(&mut self) -> &mut [u8] {
&mut (*self.buffer)[..]
}
}
|
use uefi::boot_services::protocols;
use uefi::boot_services::BootServices;
use uefi::{CStr16,
Status};
use uefi::status::NOT_FOUND;
use PATH_FALLBACK_KERNEL;
pub struct Configuration<'bs>
{
pub kernel: ::uefi::borrow::Cow<'bs, 'static, CStr16>,
//commandline: ::uefi::borrow::Cow<'bs, 'static, str>,
}
impl<'bs> Configuration<'bs>
{
pub fn from_file(_bs: &'bs BootServices, sys_vol: &protocols::File, filename: &CStr16) -> Result<Configuration<'bs>, Status> {
match sys_vol.open_read(filename) {
Ok(_cfg_file) => {
//panic!("TODO: Read config file (allocating strings with `bs`)");
Ok(Configuration {
kernel: ::uefi::CStr16::from_slice(PATH_FALLBACK_KERNEL).into(),
//commandline: "".into(),
})
},
Err(NOT_FOUND) => {
Ok(Configuration {
kernel: ::uefi::CStr16::from_slice(PATH_FALLBACK_KERNEL).into(),
//commandline: "".into(),
})
},
Err(e) => Err(e),
}
}
}
|
use std::{collections::HashMap};
pub(crate) type Result<T> = std::result::Result<T, String>;
use inkwell::{self, builder::Builder, context::Context, module::Module, types::{BasicType, BasicTypeEnum}, values::{BasicValue, BasicValueEnum, FunctionValue, IntValue, PointerValue}};
use crate::parser::{Expr, Function, Type};
macro_rules! gimme {
($x:expr) => {
match $x {
Ok(e) => e,
Err(e) => return Err(e),
};
};
}
macro_rules! gimme_opt {
($x:expr, $y:expr) => {
match $x {
Some(x) => x,
None => return Err($y),
}
};
}
pub(crate) struct Compiler<'ctx> {
builder: Builder<'ctx>,
context: &'ctx Context,
module: Module<'ctx>,
variables: HashMap<String, PointerValue<'ctx>>,
fun: Option<Function>,
fn_val: Option<FunctionValue<'ctx>>,
}
impl<'ctx> Compiler<'ctx> {
fn new(name: String, c: &'ctx Context) -> Compiler<'ctx> {
let builder = c.create_builder();
let module = c.create_module(&name);
let c = Compiler {builder, context: c, module, variables: HashMap::new(), fun: None, fn_val: None};
return c;
}
fn resolve_type(&self, ty: Type) -> BasicTypeEnum {
match ty {
Type::I64 => self.context.i64_type().as_basic_type_enum(),
Type::Struct(_) => todo!(),
}
}
fn entry_block_alloca(&self, name: String, ty: BasicTypeEnum<'ctx>) -> Result<PointerValue<'ctx>> {
let b = self.context.create_builder();
let entry_block = self.fn_val.unwrap().get_first_basic_block().unwrap();
match entry_block.get_first_instruction() {
Some(i) => b.position_before(&i),
None => b.position_at_end(entry_block),
}
Ok(b.build_alloca(ty, &name))
}
fn build_add(&self, lhs: BasicValueEnum<'ctx>, rhs: BasicValueEnum<'ctx>) -> Result<Option<BasicValueEnum>> {
match &lhs.get_type() {
BasicTypeEnum::ArrayType(_) => todo!(),
BasicTypeEnum::FloatType(_) => Ok(Some(self.builder.build_float_add(lhs.into_float_value(), rhs.into_float_value(), "tmpfloatadd").as_basic_value_enum())),
BasicTypeEnum::IntType(_) => Ok(Some(self.builder.build_int_add(lhs.into_int_value(), rhs.into_int_value(), "tmpintadd").as_basic_value_enum())),
BasicTypeEnum::PointerType(_) => todo!(),
BasicTypeEnum::StructType(_) => todo!(),
BasicTypeEnum::VectorType(_) => todo!(),
}
}
fn build_mul(&self, lhs: BasicValueEnum<'ctx>, rhs: BasicValueEnum<'ctx>) -> Result<Option<BasicValueEnum>> {
match &lhs.get_type() {
BasicTypeEnum::ArrayType(_) => todo!(),
BasicTypeEnum::FloatType(_) => Ok(Some(self.builder.build_float_mul(lhs.into_float_value(), rhs.into_float_value(), "tmpfloatmul").as_basic_value_enum())),
BasicTypeEnum::IntType(_) => Ok(Some(self.builder.build_int_mul(lhs.into_int_value(), rhs.into_int_value(), "tmpintmul").as_basic_value_enum())),
BasicTypeEnum::PointerType(_) => todo!(),
BasicTypeEnum::StructType(_) => todo!(),
BasicTypeEnum::VectorType(_) => todo!(),
}
}
fn build_sub(&self, lhs: BasicValueEnum<'ctx>, rhs: BasicValueEnum<'ctx>) -> Result<Option<BasicValueEnum>> {
match &lhs.get_type() {
BasicTypeEnum::ArrayType(_) => todo!(),
BasicTypeEnum::FloatType(_) => Ok(Some(self.builder.build_float_sub(lhs.into_float_value(), rhs.into_float_value(), "tmpfloatsub").as_basic_value_enum())),
BasicTypeEnum::IntType(_) => Ok(Some(self.builder.build_int_sub(lhs.into_int_value(), rhs.into_int_value(), "tmpintsub").as_basic_value_enum())),
BasicTypeEnum::PointerType(_) => todo!(),
BasicTypeEnum::StructType(_) => todo!(),
BasicTypeEnum::VectorType(_) => todo!(),
}
}
fn build_div(&self, lhs: BasicValueEnum<'ctx>, rhs: BasicValueEnum<'ctx>) -> Result<Option<BasicValueEnum>> {
match &lhs.get_type() {
BasicTypeEnum::ArrayType(_) => todo!(),
BasicTypeEnum::FloatType(_) => Ok(Some(self.builder.build_float_div(lhs.into_float_value(), rhs.into_float_value(), "tmpfloatdiv").as_basic_value_enum())),
BasicTypeEnum::IntType(_) => Ok(Some(self.builder.build_int_signed_div(lhs.into_int_value(), rhs.into_int_value(), "tmpintdiv").as_basic_value_enum())),
BasicTypeEnum::PointerType(_) => todo!(),
BasicTypeEnum::StructType(_) => todo!(),
BasicTypeEnum::VectorType(_) => todo!(),
}
}
fn build_eq_comp(&self, lhs: BasicValueEnum<'ctx>, rhs: BasicValueEnum<'ctx>) -> Result<Option<BasicValueEnum>> {
match &lhs.get_type() {
BasicTypeEnum::ArrayType(_) => todo!(),
BasicTypeEnum::FloatType(_) => Ok(Some(self.builder.build_float_compare(inkwell::FloatPredicate::OEQ, lhs.into_float_value(), rhs.into_float_value(), "tmpfloatcmp").as_basic_value_enum())),
BasicTypeEnum::IntType(_) => Ok(Some(self.builder.build_int_compare(inkwell::IntPredicate::EQ, lhs.into_int_value(), rhs.into_int_value(), "tmpintdiv").as_basic_value_enum())),
BasicTypeEnum::PointerType(_) => todo!(),
BasicTypeEnum::StructType(_) => todo!(),
BasicTypeEnum::VectorType(_) => todo!(),
}
}
fn build_assign(&self, lhs: BasicValueEnum<'ctx>, rhs: BasicValueEnum<'ctx>) -> Result<Option<BasicValueEnum>> {
self.builder.build_store(lhs.into_pointer_value(), rhs);
Ok(None)
}
fn compile(&self, e: Expr) -> Result<Option<BasicValueEnum>> {
match e {
Expr::Binary(b) => {
let lhs = gimme_opt!(gimme!(self.compile(b.left)), String::from("void"));
let rhs = gimme_opt!(gimme!(self.compile(b.right)), String::from("void"));
match b.op {
crate::parser::BinaryOps::Assign => self.build_assign(lhs, rhs),
crate::parser::BinaryOps::Add => self.build_add(lhs, rhs),
crate::parser::BinaryOps::Sub => self.build_sub(lhs, rhs),
crate::parser::BinaryOps::Mul => self.build_mul(lhs, rhs),
crate::parser::BinaryOps::Div => self.build_div(lhs, rhs),
crate::parser::BinaryOps::Equal => self.build_eq_comp(lhs, rhs),
crate::parser::BinaryOps::NEqual => todo!(),
crate::parser::BinaryOps::Less => todo!(),
crate::parser::BinaryOps::More => todo!(),
crate::parser::BinaryOps::LessEqual => todo!(),
crate::parser::BinaryOps::MoreEqual => todo!(),
}
},
Expr::Variable(v) => {
let val = self.variables.get(&v);
match val {
Some(x) => {Ok(Some(x.as_basic_value_enum()))},
None => Err(format!("Cannot find variable {}", v)),
}
},
Expr::If(i) => { // TODO: optimise so bools actually exist
let cond = gimme_opt!(gimme!(self.compile(i.cond)), format!("Cannot use {:?} as boolean", i.cond));
let comp: IntValue = match cond {
BasicValueEnum::ArrayValue(_) => todo!(),
BasicValueEnum::IntValue(i) => { self.builder.build_int_compare(inkwell::IntPredicate::NE, i, self.context.i8_type().const_zero(), "tmpintcmp")},
BasicValueEnum::FloatValue(_) => todo!(),
BasicValueEnum::PointerValue(_) => todo!(),
BasicValueEnum::StructValue(_) => todo!(),
BasicValueEnum::VectorValue(_) => todo!(),
};
let parent = gimme_opt!(self.fn_val, String::from("this shouldnt happen"));
let then_bb = self.context.append_basic_block(parent, "then_branch");
let else_bb = self.context.append_basic_block(parent, "else_branch");
let cont_bb = self.context.append_basic_block(parent, "cont_branch");
let branch = self.builder.build_conditional_branch(comp, then_bb, else_bb);
self.builder.position_at_end(then_bb);
let then_val = gimme!(self.compile(i.then));
self.builder.build_unconditional_branch(cont_bb);
self.builder.position_at_end(else_bb);
let else_val: Option<BasicValueEnum> = if let Some(x) = i.els {
gimme!(self.compile(x))
} else { None };
self.builder.build_unconditional_branch(cont_bb);
if let Some(t) = then_val {
if let Some(e) = else_val {
if t.get_type() == e.get_type() {
let phi = self.builder.build_phi(t.get_type(), "tmpifphi");
phi.add_incoming(&[
(&t, then_bb),
(&e, else_bb),
]);
return Ok(Some(phi.as_basic_value()));
} else {
return Err(format!("Then-val not the same as else-val"));
}
}
}
Ok(None)
},
Expr::Number(n) => {Ok(Some(self.context.i64_type().const_int(n as u64, false).as_basic_value_enum()))},
Expr::Block(b) => {
for i in b.statements {
let c = self.compile(i);
match c {
Ok(_) => (),
Err(e) => return Err(e),
}
}
Ok(None)
},
Expr::Def(d) => {
let ty = self.resolve_type(d.typ);
let m = self.entry_block_alloca(d.name.clone(), ty);
let m = gimme!(m);
self.variables.insert(d.name, m);
Ok(None)
},
Expr::Call(c) => {
let fun = self.module.get_function(&c.name);
let fun = gimme_opt!(fun, format!("Unknown function {}", c.name));
let args = vec![];
for i in c.args {
args.push(gimme_opt!(gimme!(self.compile(i)), String::from("Cannot use as a non-void")));
}
let c = self.builder.build_call(fun, args.as_slice(), "tmpcall").try_as_basic_value().left().unwrap();
Ok(Some(c))
},
Expr::FnDef(f) => todo!(),
Expr::StructDef(_) => todo!(),
}
}
fn compile_fn(&self, f: Function) -> Result<FunctionValue> {
let arg_types = vec![];
let fn_type = self.resolve_type(f.proto.ty).fn;
}
} |
use napi::*;
use std::{fmt::Display, usize};
use usb_enumeration::UsbDevice;
pub fn to_js_error<T: Display>(error: T) -> napi::Error {
napi::Error::new(Status::Unknown, error.to_string())
}
pub trait ToJsObject {
fn to_js_object(&self, env: Env) -> Result<JsObject>;
}
impl ToJsObject for UsbDevice {
fn to_js_object(&self, env: Env) -> Result<JsObject> {
let mut obj = env.create_object()?;
obj.set_named_property("id", env.create_string(&self.id)?)?;
obj.set_named_property("vendor_id", env.create_int32(self.vendor_id as i32)?)?;
obj.set_named_property("product_id", env.create_int32(self.product_id as i32)?)?;
if let Some(description) = &self.description {
obj.set_named_property("description", env.create_string(description)?)?;
}
Ok(obj)
}
}
pub fn get_optional_u16(ctx: &CallContext, index: usize) -> Result<Option<u16>> {
let arg2 = ctx.get::<JsUnknown>(index)?;
Ok(match arg2.get_type()? {
ValueType::Undefined | ValueType::Null => None,
ValueType::Number => arg2.coerce_to_number()?.get_uint32().ok().map(|v| v as u16),
_ => {
return Err(to_js_error(
"First argument (vendor_id), expected number or undefined",
))
}
})
}
|
//
// Sysinfo
//
// Copyright (c) 2018 Guillaume Gomez
//
use ComponentExt;
/// Struct containing a component information (temperature and name for the moment).
pub struct Component {
temperature: f32,
max: f32,
critical: Option<f32>,
label: String,
}
impl Component {
/// Creates a new `Component` with the given information.
pub fn new(label: String, max: Option<f32>, critical: Option<f32>) -> Component {
Component {
temperature: 0f32,
label,
max: max.unwrap_or(0.0),
critical,
}
}
}
impl ComponentExt for Component {
fn get_temperature(&self) -> f32 {
self.temperature
}
fn get_max(&self) -> f32 {
self.max
}
fn get_critical(&self) -> Option<f32> {
self.critical
}
fn get_label(&self) -> &str {
&self.label
}
}
pub fn update_component(comp: &mut Component, temperature: f32) {
comp.temperature = temperature;
if comp.temperature > comp.max {
comp.max = comp.temperature;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.