file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
lib.rs
//! This crate contains structures and generators for specifying how to generate //! historical and real-time test data for Delorean. The rules for how to //! generate data and what shape it should take can be specified in a TOML file. //! //! Generators can output in line protocol, Parquet, or can be used to generate //! real-time load on a server that implements the [InfluxDB 2.0 write //! path][write-api]. //! //! [write-api]: https://v2.docs.influxdata.com/v2.0/api/#tag/Write //! //! While this generator could be compared to [the Go based one that creates TSM //! data][go-gen], its purpose is meant to be more far reaching. In addition to //! generating historical data, it should be useful for generating data in a //! sequence as you would expect it to arrive in a production environment. That //! means many agents sending data with their different tags and timestamps. //! //! [go-gen]: https://github.com/influxdata/influxdb/pull/12710 #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] #![warn( missing_copy_implementations, missing_debug_implementations, missing_docs, clippy::explicit_iter_loop, clippy::future_not_send, clippy::use_self, clippy::clone_on_ref_ptr )] use crate::substitution::Substitute; use rand::Rng; use rand_seeder::Seeder; use snafu::{ResultExt, Snafu}; use std::{ convert::TryFrom, time::{SystemTime, UNIX_EPOCH}, }; pub mod agent; pub mod field; pub mod measurement; pub mod specification; pub mod substitution; pub mod tag; mod tag_set; pub mod write; /// Errors that may happen while generating points. #[derive(Snafu, Debug)] pub enum Error { /// Error that may happen when waiting on a tokio task #[snafu(display("Could not join tokio task: {}", source))] TokioError { /// Underlying tokio error that caused this problem source: tokio::task::JoinError, }, /// Error that may happen when constructing an agent name #[snafu(display("Could not create agent name, caused by:\n{}", source))] CouldNotCreateAgentName { /// Underlying `substitution` module error that caused this problem source: substitution::Error, }, /// Error that may happen when an agent generates points #[snafu(display("Agent could not generate points, caused by:\n{}", source))] AgentCouldNotGeneratePoints { /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when creating agents #[snafu(display("Could not create agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgent { /// The name of the relevant agent name: String, /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when constructing an agent's writer #[snafu(display("Could not create writer for agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgentWriter { /// The name of the relevant agent name: String, /// Underlying `write` module error that caused this problem source: write::Error, }, } type Result<T, E = Error> = std::result::Result<T, E>; /// Generate data from the configuration in the spec. /// /// Provide a writer that the line protocol should be written to. /// /// If `start_datetime` or `end_datetime` are `None`, the current datetime will /// be used. pub async fn generate<T: DataGenRng>( spec: &specification::DataSpec, points_writer_builder: &mut write::PointsWriterBuilder, start_datetime: Option<i64>, end_datetime: Option<i64>, execution_start_time: i64, continue_on: bool, batch_size: usize, ) -> Result<usize> { let seed = spec.base_seed.to_owned().unwrap_or_else(|| { let mut rng = rand::thread_rng(); format!("{:04}", rng.gen_range(0..10000)) }); let mut handles = vec![]; // for each agent specification for agent_spec in &spec.agents { // create iterators to `cycle` through for `agent_spec.tags` let tag_set_iterator = tag::AgentTagIterator::new(&agent_spec.tags); // create `count` number of agent instances, or 1 agent if no count is specified let n_agents = agent_spec.count.unwrap_or(1); for (agent_id, mut agent_tags) in tag_set_iterator.take(n_agents).enumerate() { let agent_name = Substitute::once(&agent_spec.name, &[("agent_id", &agent_id.to_string())]) .context(CouldNotCreateAgentName)?; agent_tags.push(tag::Tag::new("data_spec", &spec.name)); if let Some(name_tag_key) = &agent_spec.name_tag_key
let mut agent = agent::Agent::<T>::new( agent_spec, &agent_name, agent_id, &seed, agent_tags, start_datetime, end_datetime, execution_start_time, continue_on, ) .context(CouldNotCreateAgent { name: &agent_name })?; let agent_points_writer = points_writer_builder .build_for_agent(&agent_name) .context(CouldNotCreateAgentWriter { name: &agent_name })?; handles.push(tokio::task::spawn(async move { agent.generate_all(agent_points_writer, batch_size).await })); } } let mut total_points = 0; for handle in handles { total_points += handle .await .context(TokioError)? .context(AgentCouldNotGeneratePoints)?; } Ok(total_points) } /// Shorthand trait for the functionality this crate needs a random number generator to have pub trait DataGenRng: rand::Rng + rand::SeedableRng + Send + 'static {} impl<T: rand::Rng + rand::SeedableRng + Send + 'static> DataGenRng for T {} /// Encapsulating the creation of an optionally-seedable random number generator /// to make this easy to change. Uses a 4-digit number expressed as a `String` /// as the seed type to enable easy creation of another instance using the same /// seed. #[derive(Debug)] pub struct RandomNumberGenerator<T: DataGenRng> { rng: T, /// The seed used for this instance. pub seed: String, } impl<T: DataGenRng> Default for RandomNumberGenerator<T> { fn default() -> Self { let mut rng = rand::thread_rng(); let seed = format!("{:04}", rng.gen_range(0..10000)); Self::new(seed) } } impl<T: DataGenRng> RandomNumberGenerator<T> { /// Create a new instance using the specified seed. pub fn new(seed: impl Into<String>) -> Self { let seed = seed.into(); Self { rng: Seeder::from(&seed).make_rng(), seed, } } /// Generate a random GUID pub fn guid(&mut self) -> uuid::Uuid { let mut bytes = [0u8; 16]; self.rng.fill_bytes(&mut bytes); uuid::Builder::from_bytes(bytes) .set_variant(uuid::Variant::RFC4122) .set_version(uuid::Version::Random) .build() } } impl<T: DataGenRng> rand::RngCore for RandomNumberGenerator<T> { fn next_u32(&mut self) -> u32 { self.rng.next_u32() } fn next_u64(&mut self) -> u64 { self.rng.next_u64() } fn fill_bytes(&mut self, dest: &mut [u8]) { self.rng.fill_bytes(dest); } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.rng.try_fill_bytes(dest) } } /// Gets the current time in nanoseconds since the epoch pub fn now_ns() -> i64 { let since_the_epoch = SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Time went backwards"); i64::try_from(since_the_epoch.as_nanos()).expect("Time does not fit") } // Always returns 0. #[cfg(test)] #[derive(Default)] struct ZeroRng; #[cfg(test)] impl rand::RngCore for ZeroRng { fn next_u32(&mut self) -> u32 { self.next_u64() as u32 } fn next_u64(&mut self) -> u64 { 0 } fn fill_bytes(&mut self, dest: &mut [u8]) { rand_core::impls::fill_bytes_via_next(self, dest) } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.fill_bytes(dest); Ok(()) } } #[cfg(test)] impl rand::SeedableRng for ZeroRng { type Seed = Vec<u8>; // Ignore the seed value fn from_seed(_seed: Self::Seed) -> Self { Self } } // The test rng ignores the seed anyway, so the seed specified doesn't matter. #[cfg(test)] const TEST_SEED: &str = ""; #[cfg(test)] fn test_rng() -> RandomNumberGenerator<ZeroRng> { RandomNumberGenerator::<ZeroRng>::new(TEST_SEED) } // A random number type that does *not* have a predictable sequence of values for use in tests // that assert on properties rather than exact values. Aliased for convenience in changing to // a different Rng type. #[cfg(test)] type DynamicRng = rand::rngs::SmallRng; #[cfg(test)] mod test { use super::*; use crate::specification::*; use influxdb2_client::models::WriteDataPoint; use std::str::FromStr; type Error = Box<dyn std::error::Error>; type Result<T = (), E = Error> = std::result::Result<T, E>; #[tokio::test] async fn historical_data_sampling_interval() -> Result<()> { let toml = r#" name = "demo_schema" [[agents]] name = "basic" sampling_interval = "10s" # seconds [[agents.measurements]] name = "cpu" [[agents.measurements.fields]] name = "up" bool = true"#; let data_spec = DataSpec::from_str(toml).unwrap(); let agent_id = 0; let agent_spec = &data_spec.agents[0]; // Take agent_tags out of the equation for the purposes of this test let agent_tags = vec![]; let execution_start_time = now_ns(); // imagine we've specified at the command line that we want to generate metrics // for 1970 let start_datetime = Some(0); // for the first 15 seconds of the year let end_datetime = Some(15 * 1_000_000_000); let mut agent = agent::Agent::<ZeroRng>::new( agent_spec, &agent_spec.name, agent_id, TEST_SEED, agent_tags, start_datetime, end_datetime, execution_start_time, false, )?; let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 0 let expected_line_protocol = "cpu up=f 0\n"; assert_eq!(line_protocol, expected_line_protocol); let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 10s let expected_line_protocol = "cpu up=f 10000000000\n"; assert_eq!(line_protocol, expected_line_protocol); // Don't get any points anymore because we're past the ending datetime let data_points = agent.generate().await?; assert!( data_points.is_empty(), "expected no data points, got {:?}", data_points ); Ok(()) } }
{ agent_tags.push(tag::Tag::new(name_tag_key, &agent_name)); }
conditional_block
transport.go
/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package transport defines and implements message oriented communication // channel to complete various transactions (e.g., an RPC). It is meant for // grpc-internal usage and is not intended to be imported directly by users. package transport // externally used as import "google.golang.org/grpc/transport" import ( "errors" "fmt" "net" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) // state of transport type transportState int const ( reachable transportState = iota closing draining ) // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 AuthInfo credentials.AuthInfo InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int ChannelzParentID int64 } // NewServerTransport creates a ServerTransport with conn or non-nil error // if it fails. func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { return newHTTP2Server(conn, config) } // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string // Authority is the :authority pseudo-header to use. This field has no effect if // TransportCredentials is set. Authority string // Dialer specifies how to dial a network address. Dialer func(context.Context, string) (net.Conn, error) // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. FailOnNonTempDialError bool // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. PerRPCCredentials []credentials.PerRPCCredentials // TransportCredentials stores the Authenticator required to setup a client connection. TransportCredentials credentials.TransportCredentials // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. InitialConnWindowSize int32 // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID int64 } // TargetInfo contains the information of the target such as network address and metadata. type TargetInfo struct { Addr string Metadata interface{} Authority string } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) { return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess) } // Options provides additional hints and information for message // transmission. type Options struct { // Last indicates whether this write is the last piece for // this stream. Last bool // Delay is a hint to the transport implementation for whether // the data could be buffered for a batching write. The // transport implementation may ignore the hint. // TODO(mmukhi, dfawley): Should this be deleted? Delay bool // IsCompressed indicates weather the message being written // was compressed or not. Transport relays this information // to the API that generates gRPC-specific message header. IsCompressed bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { // Host specifies the peer's host. Host string // Method specifies the operation to perform. Method string // SendCompress specifies the compression algorithm applied on // outbound message. SendCompress string // Creds specifies credentials.PerRPCCredentials for a call. Creds credentials.PerRPCCredentials // Flush indicates whether a new stream command should be sent // to the peer without waiting for the first data. This is // only a hint. // If it's true, the transport may modify the flush decision // for performance purposes. // If it's false, new stream will never be flushed. Flush bool // ContentSubtype specifies the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". The value of ContentSubtype must be all // lowercase, otherwise the behavior is undefined. See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. ContentSubtype string } // ClientTransport is the common interface for all gRPC client-side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. Close() error // GracefulClose starts to tear down the transport. It stops accepting // new RPCs and wait the completion of the pending RPCs. GracefulClose() error // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. Write(s *Stream, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) // CloseStream clears the footprint of a stream when the stream is // not needed any more. The err indicates the error incurred when // CloseStream is called. Must be called when a stream is finished // unless the associated transport is closing. CloseStream(stream *Stream, err error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor // this in order to take action (e.g., close the current transport // and create a new one) in error case. It should not return nil // once the transport is initiated. Error() <-chan struct{} // GoAway returns a channel that is closed when ClientTransport // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} // GetGoAwayReason returns the reason why GoAway frame was received. GetGoAwayReason() GoAwayReason // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport // implementations. // // Methods may be called concurrently from multiple goroutines, but // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream), func(context.Context, string) context.Context) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. WriteHeader(s *Stream, md metadata.MD) error // Write sends the data for the given stream. // Write may not be called on all streams. Write(s *Stream, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. WriteStatus(s *Stream, st *status.Status) error // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain() // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // streamErrorf creates an StreamError with the specified error code and description. func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { return StreamError{ Code: c, Desc: fmt.Sprintf(format, a...), } } // connectionErrorf creates an ConnectionError with the specified error description. func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, err: e, } } // ConnectionError is an error that results in the termination of the // entire connection and the retry of all the active streams. type ConnectionError struct { Desc string temp bool err error } func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: desc = %q", e.Desc) } // Temporary indicates if this connection error is temporary or fatal. func (e ConnectionError) Temporary() bool { return e.temp } // Origin returns the original error of this connection error. func (e ConnectionError) Origin() error { // Never return nil error here. // If the original error is nil, return itself. if e.err == nil
return e.err } var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") // errStreamDrain indicates that the stream is rejected because the // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining") // errStreamDone is returned from write at the client side to indiacte application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this // stream's ID in unprocessed RPCs. statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") ) // TODO: See if we can replace StreamError with status package errors. // StreamError is an error that only affects one stream within a connection. type StreamError struct { Code codes.Code Desc string } func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 const ( // GoAwayInvalid indicates that no GoAway frame is received. GoAwayInvalid GoAwayReason = 0 // GoAwayNoReason is the default value when GoAway frame is received. GoAwayNoReason GoAwayReason = 1 // GoAwayTooManyPings indicates that a GoAway frame with // ErrCodeEnhanceYourCalm was received and that the debug data said // "too_many_pings". GoAwayTooManyPings GoAwayReason = 2 )
{ return e }
conditional_block
transport.go
/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package transport defines and implements message oriented communication // channel to complete various transactions (e.g., an RPC). It is meant for // grpc-internal usage and is not intended to be imported directly by users. package transport // externally used as import "google.golang.org/grpc/transport" import ( "errors" "fmt" "net" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) // state of transport type transportState int const ( reachable transportState = iota closing draining ) // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 AuthInfo credentials.AuthInfo InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int ChannelzParentID int64
// if it fails. func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { return newHTTP2Server(conn, config) } // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string // Authority is the :authority pseudo-header to use. This field has no effect if // TransportCredentials is set. Authority string // Dialer specifies how to dial a network address. Dialer func(context.Context, string) (net.Conn, error) // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. FailOnNonTempDialError bool // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. PerRPCCredentials []credentials.PerRPCCredentials // TransportCredentials stores the Authenticator required to setup a client connection. TransportCredentials credentials.TransportCredentials // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. InitialConnWindowSize int32 // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID int64 } // TargetInfo contains the information of the target such as network address and metadata. type TargetInfo struct { Addr string Metadata interface{} Authority string } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) { return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess) } // Options provides additional hints and information for message // transmission. type Options struct { // Last indicates whether this write is the last piece for // this stream. Last bool // Delay is a hint to the transport implementation for whether // the data could be buffered for a batching write. The // transport implementation may ignore the hint. // TODO(mmukhi, dfawley): Should this be deleted? Delay bool // IsCompressed indicates weather the message being written // was compressed or not. Transport relays this information // to the API that generates gRPC-specific message header. IsCompressed bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { // Host specifies the peer's host. Host string // Method specifies the operation to perform. Method string // SendCompress specifies the compression algorithm applied on // outbound message. SendCompress string // Creds specifies credentials.PerRPCCredentials for a call. Creds credentials.PerRPCCredentials // Flush indicates whether a new stream command should be sent // to the peer without waiting for the first data. This is // only a hint. // If it's true, the transport may modify the flush decision // for performance purposes. // If it's false, new stream will never be flushed. Flush bool // ContentSubtype specifies the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". The value of ContentSubtype must be all // lowercase, otherwise the behavior is undefined. See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. ContentSubtype string } // ClientTransport is the common interface for all gRPC client-side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. Close() error // GracefulClose starts to tear down the transport. It stops accepting // new RPCs and wait the completion of the pending RPCs. GracefulClose() error // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. Write(s *Stream, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) // CloseStream clears the footprint of a stream when the stream is // not needed any more. The err indicates the error incurred when // CloseStream is called. Must be called when a stream is finished // unless the associated transport is closing. CloseStream(stream *Stream, err error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor // this in order to take action (e.g., close the current transport // and create a new one) in error case. It should not return nil // once the transport is initiated. Error() <-chan struct{} // GoAway returns a channel that is closed when ClientTransport // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} // GetGoAwayReason returns the reason why GoAway frame was received. GetGoAwayReason() GoAwayReason // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport // implementations. // // Methods may be called concurrently from multiple goroutines, but // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream), func(context.Context, string) context.Context) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. WriteHeader(s *Stream, md metadata.MD) error // Write sends the data for the given stream. // Write may not be called on all streams. Write(s *Stream, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. WriteStatus(s *Stream, st *status.Status) error // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain() // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // streamErrorf creates an StreamError with the specified error code and description. func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { return StreamError{ Code: c, Desc: fmt.Sprintf(format, a...), } } // connectionErrorf creates an ConnectionError with the specified error description. func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, err: e, } } // ConnectionError is an error that results in the termination of the // entire connection and the retry of all the active streams. type ConnectionError struct { Desc string temp bool err error } func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: desc = %q", e.Desc) } // Temporary indicates if this connection error is temporary or fatal. func (e ConnectionError) Temporary() bool { return e.temp } // Origin returns the original error of this connection error. func (e ConnectionError) Origin() error { // Never return nil error here. // If the original error is nil, return itself. if e.err == nil { return e } return e.err } var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") // errStreamDrain indicates that the stream is rejected because the // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining") // errStreamDone is returned from write at the client side to indiacte application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this // stream's ID in unprocessed RPCs. statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") ) // TODO: See if we can replace StreamError with status package errors. // StreamError is an error that only affects one stream within a connection. type StreamError struct { Code codes.Code Desc string } func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 const ( // GoAwayInvalid indicates that no GoAway frame is received. GoAwayInvalid GoAwayReason = 0 // GoAwayNoReason is the default value when GoAway frame is received. GoAwayNoReason GoAwayReason = 1 // GoAwayTooManyPings indicates that a GoAway frame with // ErrCodeEnhanceYourCalm was received and that the debug data said // "too_many_pings". GoAwayTooManyPings GoAwayReason = 2 )
} // NewServerTransport creates a ServerTransport with conn or non-nil error
random_line_split
transport.go
/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package transport defines and implements message oriented communication // channel to complete various transactions (e.g., an RPC). It is meant for // grpc-internal usage and is not intended to be imported directly by users. package transport // externally used as import "google.golang.org/grpc/transport" import ( "errors" "fmt" "net" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) // state of transport type transportState int const ( reachable transportState = iota closing draining ) // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 AuthInfo credentials.AuthInfo InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int ChannelzParentID int64 } // NewServerTransport creates a ServerTransport with conn or non-nil error // if it fails. func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { return newHTTP2Server(conn, config) } // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string // Authority is the :authority pseudo-header to use. This field has no effect if // TransportCredentials is set. Authority string // Dialer specifies how to dial a network address. Dialer func(context.Context, string) (net.Conn, error) // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. FailOnNonTempDialError bool // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. PerRPCCredentials []credentials.PerRPCCredentials // TransportCredentials stores the Authenticator required to setup a client connection. TransportCredentials credentials.TransportCredentials // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. InitialConnWindowSize int32 // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID int64 } // TargetInfo contains the information of the target such as network address and metadata. type TargetInfo struct { Addr string Metadata interface{} Authority string } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) { return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess) } // Options provides additional hints and information for message // transmission. type Options struct { // Last indicates whether this write is the last piece for // this stream. Last bool // Delay is a hint to the transport implementation for whether // the data could be buffered for a batching write. The // transport implementation may ignore the hint. // TODO(mmukhi, dfawley): Should this be deleted? Delay bool // IsCompressed indicates weather the message being written // was compressed or not. Transport relays this information // to the API that generates gRPC-specific message header. IsCompressed bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { // Host specifies the peer's host. Host string // Method specifies the operation to perform. Method string // SendCompress specifies the compression algorithm applied on // outbound message. SendCompress string // Creds specifies credentials.PerRPCCredentials for a call. Creds credentials.PerRPCCredentials // Flush indicates whether a new stream command should be sent // to the peer without waiting for the first data. This is // only a hint. // If it's true, the transport may modify the flush decision // for performance purposes. // If it's false, new stream will never be flushed. Flush bool // ContentSubtype specifies the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". The value of ContentSubtype must be all // lowercase, otherwise the behavior is undefined. See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. ContentSubtype string } // ClientTransport is the common interface for all gRPC client-side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. Close() error // GracefulClose starts to tear down the transport. It stops accepting // new RPCs and wait the completion of the pending RPCs. GracefulClose() error // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. Write(s *Stream, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) // CloseStream clears the footprint of a stream when the stream is // not needed any more. The err indicates the error incurred when // CloseStream is called. Must be called when a stream is finished // unless the associated transport is closing. CloseStream(stream *Stream, err error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor // this in order to take action (e.g., close the current transport // and create a new one) in error case. It should not return nil // once the transport is initiated. Error() <-chan struct{} // GoAway returns a channel that is closed when ClientTransport // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} // GetGoAwayReason returns the reason why GoAway frame was received. GetGoAwayReason() GoAwayReason // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport // implementations. // // Methods may be called concurrently from multiple goroutines, but // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream), func(context.Context, string) context.Context) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. WriteHeader(s *Stream, md metadata.MD) error // Write sends the data for the given stream. // Write may not be called on all streams. Write(s *Stream, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. WriteStatus(s *Stream, st *status.Status) error // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain() // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // streamErrorf creates an StreamError with the specified error code and description. func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { return StreamError{ Code: c, Desc: fmt.Sprintf(format, a...), } } // connectionErrorf creates an ConnectionError with the specified error description. func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, err: e, } } // ConnectionError is an error that results in the termination of the // entire connection and the retry of all the active streams. type ConnectionError struct { Desc string temp bool err error } func (e ConnectionError)
() string { return fmt.Sprintf("connection error: desc = %q", e.Desc) } // Temporary indicates if this connection error is temporary or fatal. func (e ConnectionError) Temporary() bool { return e.temp } // Origin returns the original error of this connection error. func (e ConnectionError) Origin() error { // Never return nil error here. // If the original error is nil, return itself. if e.err == nil { return e } return e.err } var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") // errStreamDrain indicates that the stream is rejected because the // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining") // errStreamDone is returned from write at the client side to indiacte application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this // stream's ID in unprocessed RPCs. statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") ) // TODO: See if we can replace StreamError with status package errors. // StreamError is an error that only affects one stream within a connection. type StreamError struct { Code codes.Code Desc string } func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 const ( // GoAwayInvalid indicates that no GoAway frame is received. GoAwayInvalid GoAwayReason = 0 // GoAwayNoReason is the default value when GoAway frame is received. GoAwayNoReason GoAwayReason = 1 // GoAwayTooManyPings indicates that a GoAway frame with // ErrCodeEnhanceYourCalm was received and that the debug data said // "too_many_pings". GoAwayTooManyPings GoAwayReason = 2 )
Error
identifier_name
transport.go
/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package transport defines and implements message oriented communication // channel to complete various transactions (e.g., an RPC). It is meant for // grpc-internal usage and is not intended to be imported directly by users. package transport // externally used as import "google.golang.org/grpc/transport" import ( "errors" "fmt" "net" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) // state of transport type transportState int const ( reachable transportState = iota closing draining ) // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 AuthInfo credentials.AuthInfo InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int ChannelzParentID int64 } // NewServerTransport creates a ServerTransport with conn or non-nil error // if it fails. func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { return newHTTP2Server(conn, config) } // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string // Authority is the :authority pseudo-header to use. This field has no effect if // TransportCredentials is set. Authority string // Dialer specifies how to dial a network address. Dialer func(context.Context, string) (net.Conn, error) // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. FailOnNonTempDialError bool // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. PerRPCCredentials []credentials.PerRPCCredentials // TransportCredentials stores the Authenticator required to setup a client connection. TransportCredentials credentials.TransportCredentials // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. InitialConnWindowSize int32 // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID int64 } // TargetInfo contains the information of the target such as network address and metadata. type TargetInfo struct { Addr string Metadata interface{} Authority string } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error)
// Options provides additional hints and information for message // transmission. type Options struct { // Last indicates whether this write is the last piece for // this stream. Last bool // Delay is a hint to the transport implementation for whether // the data could be buffered for a batching write. The // transport implementation may ignore the hint. // TODO(mmukhi, dfawley): Should this be deleted? Delay bool // IsCompressed indicates weather the message being written // was compressed or not. Transport relays this information // to the API that generates gRPC-specific message header. IsCompressed bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { // Host specifies the peer's host. Host string // Method specifies the operation to perform. Method string // SendCompress specifies the compression algorithm applied on // outbound message. SendCompress string // Creds specifies credentials.PerRPCCredentials for a call. Creds credentials.PerRPCCredentials // Flush indicates whether a new stream command should be sent // to the peer without waiting for the first data. This is // only a hint. // If it's true, the transport may modify the flush decision // for performance purposes. // If it's false, new stream will never be flushed. Flush bool // ContentSubtype specifies the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". The value of ContentSubtype must be all // lowercase, otherwise the behavior is undefined. See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. ContentSubtype string } // ClientTransport is the common interface for all gRPC client-side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. Close() error // GracefulClose starts to tear down the transport. It stops accepting // new RPCs and wait the completion of the pending RPCs. GracefulClose() error // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. Write(s *Stream, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) // CloseStream clears the footprint of a stream when the stream is // not needed any more. The err indicates the error incurred when // CloseStream is called. Must be called when a stream is finished // unless the associated transport is closing. CloseStream(stream *Stream, err error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor // this in order to take action (e.g., close the current transport // and create a new one) in error case. It should not return nil // once the transport is initiated. Error() <-chan struct{} // GoAway returns a channel that is closed when ClientTransport // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} // GetGoAwayReason returns the reason why GoAway frame was received. GetGoAwayReason() GoAwayReason // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport // implementations. // // Methods may be called concurrently from multiple goroutines, but // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream), func(context.Context, string) context.Context) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. WriteHeader(s *Stream, md metadata.MD) error // Write sends the data for the given stream. // Write may not be called on all streams. Write(s *Stream, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. WriteStatus(s *Stream, st *status.Status) error // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain() // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // streamErrorf creates an StreamError with the specified error code and description. func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { return StreamError{ Code: c, Desc: fmt.Sprintf(format, a...), } } // connectionErrorf creates an ConnectionError with the specified error description. func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, err: e, } } // ConnectionError is an error that results in the termination of the // entire connection and the retry of all the active streams. type ConnectionError struct { Desc string temp bool err error } func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: desc = %q", e.Desc) } // Temporary indicates if this connection error is temporary or fatal. func (e ConnectionError) Temporary() bool { return e.temp } // Origin returns the original error of this connection error. func (e ConnectionError) Origin() error { // Never return nil error here. // If the original error is nil, return itself. if e.err == nil { return e } return e.err } var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") // errStreamDrain indicates that the stream is rejected because the // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining") // errStreamDone is returned from write at the client side to indiacte application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this // stream's ID in unprocessed RPCs. statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") ) // TODO: See if we can replace StreamError with status package errors. // StreamError is an error that only affects one stream within a connection. type StreamError struct { Code codes.Code Desc string } func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 const ( // GoAwayInvalid indicates that no GoAway frame is received. GoAwayInvalid GoAwayReason = 0 // GoAwayNoReason is the default value when GoAway frame is received. GoAwayNoReason GoAwayReason = 1 // GoAwayTooManyPings indicates that a GoAway frame with // ErrCodeEnhanceYourCalm was received and that the debug data said // "too_many_pings". GoAwayTooManyPings GoAwayReason = 2 )
{ return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess) }
identifier_body
assignment.go
package main import ( "database/sql" "encoding/json" "fmt" "io/ioutil" "log" "net/url" "github.com/russross/blackfriday" "gopkg.in/yaml.v2" ) const ( assignmentsTable = "assignments" assignmentsDir = "assignments" // TODO: make configable ) type Assignment struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` CanvasId int `json:"id" yaml:"id" meddler:"canvas_id"` CourseId int `json:"course_id" yaml:"course_id" meddler:"course_id"` // the ID of the course the assignment belongs to Name string `json:"name" yaml:"name" meddler:"name"` // the name of the assignment Published bool `json:"published" yaml:"published" meddler:"published"` // Whether the assignment is published Description string `json:"description" yaml:"-" meddler:"description"` // the assignment description, in an HTML fragment PointsPossible float64 `json:"points_possible" yaml:"points_possible" meddler:"points_possible"` // the maximum points possible for the assignment Position int `json:"position" yaml:"position" meddler:"position"` // the sorting order of the assignment in the group // the types of submissions allowed for this assignment list containing one or // more of the following: 'discussion_topic', 'online_quiz', 'on_paper', 'none', // 'external_tool', 'online_text_entry', 'online_url', 'online_upload' // 'media_recording' SubmissionTypes []string `json:"submission_types" yaml:"submission_types" meddler:"submission_types"` // Allowed file extensions, which take effect if submission_types includes // 'online_upload'. AllowedExtensions []string `json:"allowed_extensions" yaml:"allowed_extensions" meddler:"allowed_extensions"` // (Optional) assignment's settings for external tools if submission_types // include 'external_tool'. Only url and new_tab are included (new_tab defaults // to false). Use the 'External Tools' API if you need more information about // an external tool. ExternalToolTagAttributes ExternalToolTagAttributes `json:"external_tool_tag_attributes" yaml:"external_tool_tag_attributes" meddler:"external_tool_tag_attributes"` // The number of submission attempts a student can make for this assignment. -1 // is considered unlimited. AllowedAttempts int `json:"allowed_attempts" yaml:"allowed_attempts" meddler:"allowed_attempts"` // The type of grading the assignment receives; one of 'pass_fail', 'percent', // 'letter_grade', 'gpa_scale', 'points' GradingType string `json:"grading_type" yaml:"grading_type" meddler:"grading_type"` AssignmentGroupId int `json:"assignment_group_id" yaml:"assignment_group_id" meddler:"assignment_group_id"` // the ID of the assignment's group QuizId int `json:"quiz_id" yaml:"quiz_id" meddler:"quiz_id"` // (Optional) id of the associated quiz (applies only when submission_types is ['online_quiz']) // the due date for the assignment. returns null if not present. NOTE: If this // assignment has assignment overrides, this field will be the due date as it // applies to the user requesting information from the API. DueAt string `json:"due_at" yaml:"due_at" meddler:"due_at"` // the unlock date (assignment is unlocked after this date) returns null if not // present NOTE: If this assignment has assignment overrides, this field will be // the unlock date as it applies to the user requesting information from the // API. UnlockAt string `json:"unlock_at" yaml:"unlock_at" meddler:"unlock_at"` // the lock date (assignment is locked after this date). returns null if not // present. NOTE: If this assignment has assignment overrides, this field will // be the lock date as it applies to the user requesting information from the // API. LockAt string `json:"lock_at" yaml:"lock_at" meddler:"lock_at"` CreatedAt string `json:"created_at" yaml:"created_at" meddler:"created_at"` // The time at which this assignment was originally created UpdatedAt string `json:"updated_at" yaml:"updated_at" meddler:"updated_at"` // The time at which this assignment was last modified in any way // If this is a group assignment, boolean flag indicating whether or not // students will be graded individually. GradeGroupStudentsIndividually bool `json:"grade_group_students_individually" yaml:"grade_group_students_individually" meddler:"grade_group_students_individually"` // The ID of the assignment’s group set, if this is a group assignment. For // group discussions, set group_category_id on the discussion topic, not the // linked assignment. GroupCategoryId int `json:"group_category_id" yaml:"group_category_id" meddler:"group_category_id"` PeerReviews bool `json:"peer_reviews" yaml:"peer_reviews" meddler:"peer_reviews"` // Boolean indicating if peer reviews are required for this assignment // Boolean indicating peer reviews are assigned automatically. If false, the // teacher is expected to manually assign peer reviews. AutomaticPeerReviews bool `json:"automatic_peer_reviews" yaml:"automatic_peer_reviews" meddler:"automatic_peer_reviews"` // Integer representing the amount of reviews each user is assigned. NOTE: This // key is NOT present unless you have automatic_peer_reviews set to true. PeerReviewCount int `json:"peer_review_count" yaml:"peer_review_count" meddler:"peer_review_count"` // String representing a date the reviews are due by. Must be a date that occurs // after the default due date. If blank, or date is not after the assignment's // due date, the assignment's due date will be used. NOTE: This key is NOT // present unless you have automatic_peer_reviews set to true. PeerReviewsAssignAt string `json:"peer_reviews_assign_at" yaml:"peer_reviews_assign_at" meddler:"peer_reviews_assign_at"` // Boolean representing whether or not members from within the same group on a // group assignment can be assigned to peer review their own group's work IntraGroupPeerReviews bool `json:"intra_group_peer_reviews" yaml:"intra_group_peer_reviews" meddler:"intra_group_peer_reviews"` HtmlUrl string `json:"html_url" yaml:"html_url" meddler:"html_url"` // the URL to the assignment's web page IntegrationId string `json:"integration_id" yaml:"integration_id" meddler:"integration_id"` // (optional, Third Party unique identifier for Assignment) IntegrationData map[string]string `json:"integration_data" yaml:"integration_data" meddler:"integration_data"` // (optional, Third Party integration data for assignment) AnonymousSubmissions bool `json:"anonymous_submissions" yaml:"anonymous_submissions" meddler:"anonymous_submissions"` // (Optional) whether anonymous submissions are accepted (applies only to quiz assignments) // (Optional) If true, the assignment will be omitted from the student's final // grade OmitFromFinalGrade bool `json:"omit_from_final_grade" yaml:"omit_from_final_grade" meddler:"omit_from_final_grade"` // (Optional) the DiscussionTopic associated with the assignment, if // applicable TODO: is it the topic object or its id? DiscussionTopic int `json:"discussion_topic" yaml:"discussion_topic" meddler:"discussion_topic"` // (Optional) If true, the rubric is directly tied to grading the assignment. // Otherwise, it is only advisory. Included if there is an associated rubric. UseRubricForGrading bool `json:"use_rubric_for_grading" yaml:"use_rubric_for_grading" meddler:"use_rubric_for_grading"` // (Optional) An object describing the basic attributes of the rubric, including // the point total. Included if there is an associated rubric. RubricSettings map[string]interface{} `json:"rubric_settings" yaml:"rubric_settings" meddler:"rubric_settings"` // (Optional) A list of scoring criteria and ratings for each rubric criterion. // Included if there is an associated rubric. Rubric []RubricCriterion `json:"rubric" yaml:"rubric" meddler:"rubric"` // if the requesting user has grading rights, the number of submissions that // need grading. NeedsGradingCount int `json:"needs_grading_count" yaml:"needs_grading_count" meddler:"needs_grading_count"` SubmissionsDownloadUrl string `json:"submissions_download_url" yaml:"submissions_download_url" meddler:"submissions_download_url"` // the URL to download all submissions as a zip } type ExternalToolTagAttributes struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` Url string `json:"url" yaml:"url" meddler:"url"` // URL to the external tool NewTab bool `json:"new_tab" yaml:"new_tab" meddler:"new_tab"` // Whether or not there is a new tab for the external tool } type RubricCriterion struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` CanvasId int `json:"id" yaml:"id" meddler:"canvas_id"` Description string `json:"description" yaml:"description" meddler:"description"` LongDescription string `json:"long_description" yaml:"long_description" meddler:"long_description"` Points int `json:"points" yaml:"points" meddler:"points"` } func getAssignments(db *sql.DB) []*Assignment { assignments := make([]*Assignment, 0) courses, _ := findCourses(db) values := url.Values{} values.Add("per_page", "100") // TODO: do it for all courses courseId := courses[0].CanvasId reqUrl := fmt.Sprintf(assignmentsPath, courseId) mustGetObject(reqUrl, values, &assignments) return assignments } func pullAssignments(db *sql.DB) { assignments := getAssignments(db) for _, assignment := range assignments { assignment.Pull(db) } } func pu
b *sql.DB, filepath string) error { assignment := new(Assignment) _, err := readFile(filepath, assignment) if err != nil { return err } return assignment.Push(db) } func pushAssignments(db *sql.DB) { files, err := ioutil.ReadDir(assignmentsDir) if err != nil { log.Fatal(err) } for _, f := range files { filepath := f.Name() err = pushAssignment(db, filepath) if err != nil { log.Fatalf("Failed to push assignment %s: %v\n", filepath, err) } } } func (assignment *Assignment) Dump() error { metadata, err := yaml.Marshal(assignment) if err != nil { return err } assignmentFilePath := fmt.Sprintf("%s/%s.md", assignmentsDir, assignment.Slug()) return writeFile(assignmentFilePath, string(metadata), assignment.Description) } func (assignment *Assignment) Pull(db *sql.DB) error { return pullComponent(db, assignmentPath, assignment.CanvasId, assignment) } // For now, pushing only creates assignments. Later, we'll have to track whether // an assignment already exists before pushing it to either create or update it. // The difference is that assignments have separate API endpoints for the two // actions whereas pages do not. func (assignment *Assignment) Push(db *sql.DB) error { // Convert struct to map marshalled, err := json.Marshal(assignment) if err != nil { return err } var assignmentMap map[string]interface{} err = json.Unmarshal(marshalled, &assignmentMap) if err != nil { return err } // fix a few fields assignmentMap["description"] = string(blackfriday.MarkdownCommon([]byte(assignment.Description))) invalidFields := []string{"updated_at", "created_at", "id", "html_url", "submissions_download_url", "course_id", "anonymous_submissions", "discussion_topic", "intra_group_peer_reviews", "needs_grading_count", "peer_review_count", "peer_reviews_assign_at", "quiz_id", "rubric", "rubric_settings", "use_rubric_for_grading"} for _, field := range invalidFields { delete(assignmentMap, field) } a := map[string]interface{}{ "assignment": assignmentMap, } courses, _ := findCourses(db) for _, course := range courses { courseId := course.CanvasId createAssignmentPath := fmt.Sprintf(assignmentsPath, courseId) fmt.Printf("Pushing %s to %s\n", assignment.Name, course.Name) mustPostObject(createAssignmentPath, url.Values{}, a, nil) } return nil } func (assignment *Assignment) Slug() string { return slug(assignment.Name) }
shAssignment(d
identifier_name
assignment.go
package main import ( "database/sql" "encoding/json" "fmt" "io/ioutil" "log" "net/url" "github.com/russross/blackfriday" "gopkg.in/yaml.v2" ) const ( assignmentsTable = "assignments" assignmentsDir = "assignments" // TODO: make configable ) type Assignment struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` CanvasId int `json:"id" yaml:"id" meddler:"canvas_id"` CourseId int `json:"course_id" yaml:"course_id" meddler:"course_id"` // the ID of the course the assignment belongs to Name string `json:"name" yaml:"name" meddler:"name"` // the name of the assignment Published bool `json:"published" yaml:"published" meddler:"published"` // Whether the assignment is published Description string `json:"description" yaml:"-" meddler:"description"` // the assignment description, in an HTML fragment PointsPossible float64 `json:"points_possible" yaml:"points_possible" meddler:"points_possible"` // the maximum points possible for the assignment Position int `json:"position" yaml:"position" meddler:"position"` // the sorting order of the assignment in the group // the types of submissions allowed for this assignment list containing one or // more of the following: 'discussion_topic', 'online_quiz', 'on_paper', 'none', // 'external_tool', 'online_text_entry', 'online_url', 'online_upload' // 'media_recording' SubmissionTypes []string `json:"submission_types" yaml:"submission_types" meddler:"submission_types"` // Allowed file extensions, which take effect if submission_types includes // 'online_upload'. AllowedExtensions []string `json:"allowed_extensions" yaml:"allowed_extensions" meddler:"allowed_extensions"` // (Optional) assignment's settings for external tools if submission_types // include 'external_tool'. Only url and new_tab are included (new_tab defaults // to false). Use the 'External Tools' API if you need more information about // an external tool. ExternalToolTagAttributes ExternalToolTagAttributes `json:"external_tool_tag_attributes" yaml:"external_tool_tag_attributes" meddler:"external_tool_tag_attributes"` // The number of submission attempts a student can make for this assignment. -1 // is considered unlimited. AllowedAttempts int `json:"allowed_attempts" yaml:"allowed_attempts" meddler:"allowed_attempts"` // The type of grading the assignment receives; one of 'pass_fail', 'percent', // 'letter_grade', 'gpa_scale', 'points' GradingType string `json:"grading_type" yaml:"grading_type" meddler:"grading_type"` AssignmentGroupId int `json:"assignment_group_id" yaml:"assignment_group_id" meddler:"assignment_group_id"` // the ID of the assignment's group QuizId int `json:"quiz_id" yaml:"quiz_id" meddler:"quiz_id"` // (Optional) id of the associated quiz (applies only when submission_types is ['online_quiz']) // the due date for the assignment. returns null if not present. NOTE: If this // assignment has assignment overrides, this field will be the due date as it // applies to the user requesting information from the API. DueAt string `json:"due_at" yaml:"due_at" meddler:"due_at"` // the unlock date (assignment is unlocked after this date) returns null if not // present NOTE: If this assignment has assignment overrides, this field will be // the unlock date as it applies to the user requesting information from the // API. UnlockAt string `json:"unlock_at" yaml:"unlock_at" meddler:"unlock_at"` // the lock date (assignment is locked after this date). returns null if not // present. NOTE: If this assignment has assignment overrides, this field will // be the lock date as it applies to the user requesting information from the // API. LockAt string `json:"lock_at" yaml:"lock_at" meddler:"lock_at"` CreatedAt string `json:"created_at" yaml:"created_at" meddler:"created_at"` // The time at which this assignment was originally created UpdatedAt string `json:"updated_at" yaml:"updated_at" meddler:"updated_at"` // The time at which this assignment was last modified in any way // If this is a group assignment, boolean flag indicating whether or not // students will be graded individually. GradeGroupStudentsIndividually bool `json:"grade_group_students_individually" yaml:"grade_group_students_individually" meddler:"grade_group_students_individually"` // The ID of the assignment’s group set, if this is a group assignment. For // group discussions, set group_category_id on the discussion topic, not the // linked assignment. GroupCategoryId int `json:"group_category_id" yaml:"group_category_id" meddler:"group_category_id"` PeerReviews bool `json:"peer_reviews" yaml:"peer_reviews" meddler:"peer_reviews"` // Boolean indicating if peer reviews are required for this assignment // Boolean indicating peer reviews are assigned automatically. If false, the // teacher is expected to manually assign peer reviews. AutomaticPeerReviews bool `json:"automatic_peer_reviews" yaml:"automatic_peer_reviews" meddler:"automatic_peer_reviews"` // Integer representing the amount of reviews each user is assigned. NOTE: This // key is NOT present unless you have automatic_peer_reviews set to true. PeerReviewCount int `json:"peer_review_count" yaml:"peer_review_count" meddler:"peer_review_count"` // String representing a date the reviews are due by. Must be a date that occurs // after the default due date. If blank, or date is not after the assignment's // due date, the assignment's due date will be used. NOTE: This key is NOT // present unless you have automatic_peer_reviews set to true. PeerReviewsAssignAt string `json:"peer_reviews_assign_at" yaml:"peer_reviews_assign_at" meddler:"peer_reviews_assign_at"` // Boolean representing whether or not members from within the same group on a // group assignment can be assigned to peer review their own group's work IntraGroupPeerReviews bool `json:"intra_group_peer_reviews" yaml:"intra_group_peer_reviews" meddler:"intra_group_peer_reviews"` HtmlUrl string `json:"html_url" yaml:"html_url" meddler:"html_url"` // the URL to the assignment's web page IntegrationId string `json:"integration_id" yaml:"integration_id" meddler:"integration_id"` // (optional, Third Party unique identifier for Assignment) IntegrationData map[string]string `json:"integration_data" yaml:"integration_data" meddler:"integration_data"` // (optional, Third Party integration data for assignment) AnonymousSubmissions bool `json:"anonymous_submissions" yaml:"anonymous_submissions" meddler:"anonymous_submissions"` // (Optional) whether anonymous submissions are accepted (applies only to quiz assignments) // (Optional) If true, the assignment will be omitted from the student's final // grade OmitFromFinalGrade bool `json:"omit_from_final_grade" yaml:"omit_from_final_grade" meddler:"omit_from_final_grade"` // (Optional) the DiscussionTopic associated with the assignment, if // applicable TODO: is it the topic object or its id? DiscussionTopic int `json:"discussion_topic" yaml:"discussion_topic" meddler:"discussion_topic"` // (Optional) If true, the rubric is directly tied to grading the assignment. // Otherwise, it is only advisory. Included if there is an associated rubric. UseRubricForGrading bool `json:"use_rubric_for_grading" yaml:"use_rubric_for_grading" meddler:"use_rubric_for_grading"` // (Optional) An object describing the basic attributes of the rubric, including // the point total. Included if there is an associated rubric. RubricSettings map[string]interface{} `json:"rubric_settings" yaml:"rubric_settings" meddler:"rubric_settings"` // (Optional) A list of scoring criteria and ratings for each rubric criterion. // Included if there is an associated rubric. Rubric []RubricCriterion `json:"rubric" yaml:"rubric" meddler:"rubric"` // if the requesting user has grading rights, the number of submissions that // need grading. NeedsGradingCount int `json:"needs_grading_count" yaml:"needs_grading_count" meddler:"needs_grading_count"` SubmissionsDownloadUrl string `json:"submissions_download_url" yaml:"submissions_download_url" meddler:"submissions_download_url"` // the URL to download all submissions as a zip } type ExternalToolTagAttributes struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` Url string `json:"url" yaml:"url" meddler:"url"` // URL to the external tool NewTab bool `json:"new_tab" yaml:"new_tab" meddler:"new_tab"` // Whether or not there is a new tab for the external tool } type RubricCriterion struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` CanvasId int `json:"id" yaml:"id" meddler:"canvas_id"` Description string `json:"description" yaml:"description" meddler:"description"` LongDescription string `json:"long_description" yaml:"long_description" meddler:"long_description"` Points int `json:"points" yaml:"points" meddler:"points"` } func getAssignments(db *sql.DB) []*Assignment { assignments := make([]*Assignment, 0) courses, _ := findCourses(db) values := url.Values{} values.Add("per_page", "100") // TODO: do it for all courses courseId := courses[0].CanvasId reqUrl := fmt.Sprintf(assignmentsPath, courseId) mustGetObject(reqUrl, values, &assignments) return assignments } func pullAssignments(db *sql.DB) { assignments := getAssignments(db) for _, assignment := range assignments { assignment.Pull(db) } } func pushAssignment(db *sql.DB, filepath string) error { assignment := new(Assignment) _, err := readFile(filepath, assignment) if err != nil {
return assignment.Push(db) } func pushAssignments(db *sql.DB) { files, err := ioutil.ReadDir(assignmentsDir) if err != nil { log.Fatal(err) } for _, f := range files { filepath := f.Name() err = pushAssignment(db, filepath) if err != nil { log.Fatalf("Failed to push assignment %s: %v\n", filepath, err) } } } func (assignment *Assignment) Dump() error { metadata, err := yaml.Marshal(assignment) if err != nil { return err } assignmentFilePath := fmt.Sprintf("%s/%s.md", assignmentsDir, assignment.Slug()) return writeFile(assignmentFilePath, string(metadata), assignment.Description) } func (assignment *Assignment) Pull(db *sql.DB) error { return pullComponent(db, assignmentPath, assignment.CanvasId, assignment) } // For now, pushing only creates assignments. Later, we'll have to track whether // an assignment already exists before pushing it to either create or update it. // The difference is that assignments have separate API endpoints for the two // actions whereas pages do not. func (assignment *Assignment) Push(db *sql.DB) error { // Convert struct to map marshalled, err := json.Marshal(assignment) if err != nil { return err } var assignmentMap map[string]interface{} err = json.Unmarshal(marshalled, &assignmentMap) if err != nil { return err } // fix a few fields assignmentMap["description"] = string(blackfriday.MarkdownCommon([]byte(assignment.Description))) invalidFields := []string{"updated_at", "created_at", "id", "html_url", "submissions_download_url", "course_id", "anonymous_submissions", "discussion_topic", "intra_group_peer_reviews", "needs_grading_count", "peer_review_count", "peer_reviews_assign_at", "quiz_id", "rubric", "rubric_settings", "use_rubric_for_grading"} for _, field := range invalidFields { delete(assignmentMap, field) } a := map[string]interface{}{ "assignment": assignmentMap, } courses, _ := findCourses(db) for _, course := range courses { courseId := course.CanvasId createAssignmentPath := fmt.Sprintf(assignmentsPath, courseId) fmt.Printf("Pushing %s to %s\n", assignment.Name, course.Name) mustPostObject(createAssignmentPath, url.Values{}, a, nil) } return nil } func (assignment *Assignment) Slug() string { return slug(assignment.Name) }
return err }
conditional_block
assignment.go
package main import ( "database/sql" "encoding/json" "fmt" "io/ioutil" "log" "net/url" "github.com/russross/blackfriday" "gopkg.in/yaml.v2" ) const ( assignmentsTable = "assignments" assignmentsDir = "assignments" // TODO: make configable ) type Assignment struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` CanvasId int `json:"id" yaml:"id" meddler:"canvas_id"` CourseId int `json:"course_id" yaml:"course_id" meddler:"course_id"` // the ID of the course the assignment belongs to Name string `json:"name" yaml:"name" meddler:"name"` // the name of the assignment Published bool `json:"published" yaml:"published" meddler:"published"` // Whether the assignment is published Description string `json:"description" yaml:"-" meddler:"description"` // the assignment description, in an HTML fragment PointsPossible float64 `json:"points_possible" yaml:"points_possible" meddler:"points_possible"` // the maximum points possible for the assignment Position int `json:"position" yaml:"position" meddler:"position"` // the sorting order of the assignment in the group // the types of submissions allowed for this assignment list containing one or // more of the following: 'discussion_topic', 'online_quiz', 'on_paper', 'none', // 'external_tool', 'online_text_entry', 'online_url', 'online_upload' // 'media_recording' SubmissionTypes []string `json:"submission_types" yaml:"submission_types" meddler:"submission_types"` // Allowed file extensions, which take effect if submission_types includes // 'online_upload'. AllowedExtensions []string `json:"allowed_extensions" yaml:"allowed_extensions" meddler:"allowed_extensions"` // (Optional) assignment's settings for external tools if submission_types // include 'external_tool'. Only url and new_tab are included (new_tab defaults // to false). Use the 'External Tools' API if you need more information about // an external tool. ExternalToolTagAttributes ExternalToolTagAttributes `json:"external_tool_tag_attributes" yaml:"external_tool_tag_attributes" meddler:"external_tool_tag_attributes"` // The number of submission attempts a student can make for this assignment. -1 // is considered unlimited. AllowedAttempts int `json:"allowed_attempts" yaml:"allowed_attempts" meddler:"allowed_attempts"` // The type of grading the assignment receives; one of 'pass_fail', 'percent', // 'letter_grade', 'gpa_scale', 'points' GradingType string `json:"grading_type" yaml:"grading_type" meddler:"grading_type"` AssignmentGroupId int `json:"assignment_group_id" yaml:"assignment_group_id" meddler:"assignment_group_id"` // the ID of the assignment's group QuizId int `json:"quiz_id" yaml:"quiz_id" meddler:"quiz_id"` // (Optional) id of the associated quiz (applies only when submission_types is ['online_quiz']) // the due date for the assignment. returns null if not present. NOTE: If this // assignment has assignment overrides, this field will be the due date as it // applies to the user requesting information from the API. DueAt string `json:"due_at" yaml:"due_at" meddler:"due_at"` // the unlock date (assignment is unlocked after this date) returns null if not // present NOTE: If this assignment has assignment overrides, this field will be // the unlock date as it applies to the user requesting information from the // API. UnlockAt string `json:"unlock_at" yaml:"unlock_at" meddler:"unlock_at"` // the lock date (assignment is locked after this date). returns null if not // present. NOTE: If this assignment has assignment overrides, this field will // be the lock date as it applies to the user requesting information from the // API. LockAt string `json:"lock_at" yaml:"lock_at" meddler:"lock_at"` CreatedAt string `json:"created_at" yaml:"created_at" meddler:"created_at"` // The time at which this assignment was originally created UpdatedAt string `json:"updated_at" yaml:"updated_at" meddler:"updated_at"` // The time at which this assignment was last modified in any way // If this is a group assignment, boolean flag indicating whether or not // students will be graded individually. GradeGroupStudentsIndividually bool `json:"grade_group_students_individually" yaml:"grade_group_students_individually" meddler:"grade_group_students_individually"` // The ID of the assignment’s group set, if this is a group assignment. For // group discussions, set group_category_id on the discussion topic, not the // linked assignment. GroupCategoryId int `json:"group_category_id" yaml:"group_category_id" meddler:"group_category_id"` PeerReviews bool `json:"peer_reviews" yaml:"peer_reviews" meddler:"peer_reviews"` // Boolean indicating if peer reviews are required for this assignment // Boolean indicating peer reviews are assigned automatically. If false, the // teacher is expected to manually assign peer reviews. AutomaticPeerReviews bool `json:"automatic_peer_reviews" yaml:"automatic_peer_reviews" meddler:"automatic_peer_reviews"` // Integer representing the amount of reviews each user is assigned. NOTE: This // key is NOT present unless you have automatic_peer_reviews set to true. PeerReviewCount int `json:"peer_review_count" yaml:"peer_review_count" meddler:"peer_review_count"` // String representing a date the reviews are due by. Must be a date that occurs // after the default due date. If blank, or date is not after the assignment's // due date, the assignment's due date will be used. NOTE: This key is NOT // present unless you have automatic_peer_reviews set to true. PeerReviewsAssignAt string `json:"peer_reviews_assign_at" yaml:"peer_reviews_assign_at" meddler:"peer_reviews_assign_at"` // Boolean representing whether or not members from within the same group on a // group assignment can be assigned to peer review their own group's work IntraGroupPeerReviews bool `json:"intra_group_peer_reviews" yaml:"intra_group_peer_reviews" meddler:"intra_group_peer_reviews"` HtmlUrl string `json:"html_url" yaml:"html_url" meddler:"html_url"` // the URL to the assignment's web page IntegrationId string `json:"integration_id" yaml:"integration_id" meddler:"integration_id"` // (optional, Third Party unique identifier for Assignment) IntegrationData map[string]string `json:"integration_data" yaml:"integration_data" meddler:"integration_data"` // (optional, Third Party integration data for assignment) AnonymousSubmissions bool `json:"anonymous_submissions" yaml:"anonymous_submissions" meddler:"anonymous_submissions"` // (Optional) whether anonymous submissions are accepted (applies only to quiz assignments) // (Optional) If true, the assignment will be omitted from the student's final // grade OmitFromFinalGrade bool `json:"omit_from_final_grade" yaml:"omit_from_final_grade" meddler:"omit_from_final_grade"` // (Optional) the DiscussionTopic associated with the assignment, if // applicable TODO: is it the topic object or its id? DiscussionTopic int `json:"discussion_topic" yaml:"discussion_topic" meddler:"discussion_topic"` // (Optional) If true, the rubric is directly tied to grading the assignment. // Otherwise, it is only advisory. Included if there is an associated rubric. UseRubricForGrading bool `json:"use_rubric_for_grading" yaml:"use_rubric_for_grading" meddler:"use_rubric_for_grading"` // (Optional) An object describing the basic attributes of the rubric, including // the point total. Included if there is an associated rubric. RubricSettings map[string]interface{} `json:"rubric_settings" yaml:"rubric_settings" meddler:"rubric_settings"` // (Optional) A list of scoring criteria and ratings for each rubric criterion. // Included if there is an associated rubric. Rubric []RubricCriterion `json:"rubric" yaml:"rubric" meddler:"rubric"` // if the requesting user has grading rights, the number of submissions that // need grading. NeedsGradingCount int `json:"needs_grading_count" yaml:"needs_grading_count" meddler:"needs_grading_count"` SubmissionsDownloadUrl string `json:"submissions_download_url" yaml:"submissions_download_url" meddler:"submissions_download_url"` // the URL to download all submissions as a zip } type ExternalToolTagAttributes struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` Url string `json:"url" yaml:"url" meddler:"url"` // URL to the external tool NewTab bool `json:"new_tab" yaml:"new_tab" meddler:"new_tab"` // Whether or not there is a new tab for the external tool } type RubricCriterion struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` CanvasId int `json:"id" yaml:"id" meddler:"canvas_id"` Description string `json:"description" yaml:"description" meddler:"description"` LongDescription string `json:"long_description" yaml:"long_description" meddler:"long_description"` Points int `json:"points" yaml:"points" meddler:"points"` } func getAssignments(db *sql.DB) []*Assignment { assignments := make([]*Assignment, 0) courses, _ := findCourses(db) values := url.Values{} values.Add("per_page", "100") // TODO: do it for all courses courseId := courses[0].CanvasId reqUrl := fmt.Sprintf(assignmentsPath, courseId) mustGetObject(reqUrl, values, &assignments) return assignments } func pullAssignments(db *sql.DB) { assignments := getAssignments(db) for _, assignment := range assignments { assignment.Pull(db) } } func pushAssignment(db *sql.DB, filepath string) error { assignment := new(Assignment) _, err := readFile(filepath, assignment) if err != nil { return err } return assignment.Push(db) } func pushAssignments(db *sql.DB) { files, err := ioutil.ReadDir(assignmentsDir) if err != nil { log.Fatal(err) } for _, f := range files { filepath := f.Name() err = pushAssignment(db, filepath) if err != nil { log.Fatalf("Failed to push assignment %s: %v\n", filepath, err) } } } func (assignment *Assignment) Dump() error { metadata, err := yaml.Marshal(assignment) if err != nil { return err } assignmentFilePath := fmt.Sprintf("%s/%s.md", assignmentsDir, assignment.Slug()) return writeFile(assignmentFilePath, string(metadata), assignment.Description) } func (assignment *Assignment) Pull(db *sql.DB) error {
// For now, pushing only creates assignments. Later, we'll have to track whether // an assignment already exists before pushing it to either create or update it. // The difference is that assignments have separate API endpoints for the two // actions whereas pages do not. func (assignment *Assignment) Push(db *sql.DB) error { // Convert struct to map marshalled, err := json.Marshal(assignment) if err != nil { return err } var assignmentMap map[string]interface{} err = json.Unmarshal(marshalled, &assignmentMap) if err != nil { return err } // fix a few fields assignmentMap["description"] = string(blackfriday.MarkdownCommon([]byte(assignment.Description))) invalidFields := []string{"updated_at", "created_at", "id", "html_url", "submissions_download_url", "course_id", "anonymous_submissions", "discussion_topic", "intra_group_peer_reviews", "needs_grading_count", "peer_review_count", "peer_reviews_assign_at", "quiz_id", "rubric", "rubric_settings", "use_rubric_for_grading"} for _, field := range invalidFields { delete(assignmentMap, field) } a := map[string]interface{}{ "assignment": assignmentMap, } courses, _ := findCourses(db) for _, course := range courses { courseId := course.CanvasId createAssignmentPath := fmt.Sprintf(assignmentsPath, courseId) fmt.Printf("Pushing %s to %s\n", assignment.Name, course.Name) mustPostObject(createAssignmentPath, url.Values{}, a, nil) } return nil } func (assignment *Assignment) Slug() string { return slug(assignment.Name) }
return pullComponent(db, assignmentPath, assignment.CanvasId, assignment) }
identifier_body
assignment.go
package main import ( "database/sql" "encoding/json" "fmt" "io/ioutil" "log" "net/url" "github.com/russross/blackfriday" "gopkg.in/yaml.v2" ) const ( assignmentsTable = "assignments" assignmentsDir = "assignments" // TODO: make configable ) type Assignment struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` CanvasId int `json:"id" yaml:"id" meddler:"canvas_id"` CourseId int `json:"course_id" yaml:"course_id" meddler:"course_id"` // the ID of the course the assignment belongs to Name string `json:"name" yaml:"name" meddler:"name"` // the name of the assignment Published bool `json:"published" yaml:"published" meddler:"published"` // Whether the assignment is published Description string `json:"description" yaml:"-" meddler:"description"` // the assignment description, in an HTML fragment PointsPossible float64 `json:"points_possible" yaml:"points_possible" meddler:"points_possible"` // the maximum points possible for the assignment Position int `json:"position" yaml:"position" meddler:"position"` // the sorting order of the assignment in the group // the types of submissions allowed for this assignment list containing one or // more of the following: 'discussion_topic', 'online_quiz', 'on_paper', 'none', // 'external_tool', 'online_text_entry', 'online_url', 'online_upload' // 'media_recording' SubmissionTypes []string `json:"submission_types" yaml:"submission_types" meddler:"submission_types"` // Allowed file extensions, which take effect if submission_types includes // 'online_upload'. AllowedExtensions []string `json:"allowed_extensions" yaml:"allowed_extensions" meddler:"allowed_extensions"` // (Optional) assignment's settings for external tools if submission_types // include 'external_tool'. Only url and new_tab are included (new_tab defaults // to false). Use the 'External Tools' API if you need more information about // an external tool. ExternalToolTagAttributes ExternalToolTagAttributes `json:"external_tool_tag_attributes" yaml:"external_tool_tag_attributes" meddler:"external_tool_tag_attributes"` // The number of submission attempts a student can make for this assignment. -1 // is considered unlimited. AllowedAttempts int `json:"allowed_attempts" yaml:"allowed_attempts" meddler:"allowed_attempts"` // The type of grading the assignment receives; one of 'pass_fail', 'percent', // 'letter_grade', 'gpa_scale', 'points' GradingType string `json:"grading_type" yaml:"grading_type" meddler:"grading_type"` AssignmentGroupId int `json:"assignment_group_id" yaml:"assignment_group_id" meddler:"assignment_group_id"` // the ID of the assignment's group QuizId int `json:"quiz_id" yaml:"quiz_id" meddler:"quiz_id"` // (Optional) id of the associated quiz (applies only when submission_types is ['online_quiz']) // the due date for the assignment. returns null if not present. NOTE: If this // assignment has assignment overrides, this field will be the due date as it // applies to the user requesting information from the API. DueAt string `json:"due_at" yaml:"due_at" meddler:"due_at"`
// the unlock date as it applies to the user requesting information from the // API. UnlockAt string `json:"unlock_at" yaml:"unlock_at" meddler:"unlock_at"` // the lock date (assignment is locked after this date). returns null if not // present. NOTE: If this assignment has assignment overrides, this field will // be the lock date as it applies to the user requesting information from the // API. LockAt string `json:"lock_at" yaml:"lock_at" meddler:"lock_at"` CreatedAt string `json:"created_at" yaml:"created_at" meddler:"created_at"` // The time at which this assignment was originally created UpdatedAt string `json:"updated_at" yaml:"updated_at" meddler:"updated_at"` // The time at which this assignment was last modified in any way // If this is a group assignment, boolean flag indicating whether or not // students will be graded individually. GradeGroupStudentsIndividually bool `json:"grade_group_students_individually" yaml:"grade_group_students_individually" meddler:"grade_group_students_individually"` // The ID of the assignment’s group set, if this is a group assignment. For // group discussions, set group_category_id on the discussion topic, not the // linked assignment. GroupCategoryId int `json:"group_category_id" yaml:"group_category_id" meddler:"group_category_id"` PeerReviews bool `json:"peer_reviews" yaml:"peer_reviews" meddler:"peer_reviews"` // Boolean indicating if peer reviews are required for this assignment // Boolean indicating peer reviews are assigned automatically. If false, the // teacher is expected to manually assign peer reviews. AutomaticPeerReviews bool `json:"automatic_peer_reviews" yaml:"automatic_peer_reviews" meddler:"automatic_peer_reviews"` // Integer representing the amount of reviews each user is assigned. NOTE: This // key is NOT present unless you have automatic_peer_reviews set to true. PeerReviewCount int `json:"peer_review_count" yaml:"peer_review_count" meddler:"peer_review_count"` // String representing a date the reviews are due by. Must be a date that occurs // after the default due date. If blank, or date is not after the assignment's // due date, the assignment's due date will be used. NOTE: This key is NOT // present unless you have automatic_peer_reviews set to true. PeerReviewsAssignAt string `json:"peer_reviews_assign_at" yaml:"peer_reviews_assign_at" meddler:"peer_reviews_assign_at"` // Boolean representing whether or not members from within the same group on a // group assignment can be assigned to peer review their own group's work IntraGroupPeerReviews bool `json:"intra_group_peer_reviews" yaml:"intra_group_peer_reviews" meddler:"intra_group_peer_reviews"` HtmlUrl string `json:"html_url" yaml:"html_url" meddler:"html_url"` // the URL to the assignment's web page IntegrationId string `json:"integration_id" yaml:"integration_id" meddler:"integration_id"` // (optional, Third Party unique identifier for Assignment) IntegrationData map[string]string `json:"integration_data" yaml:"integration_data" meddler:"integration_data"` // (optional, Third Party integration data for assignment) AnonymousSubmissions bool `json:"anonymous_submissions" yaml:"anonymous_submissions" meddler:"anonymous_submissions"` // (Optional) whether anonymous submissions are accepted (applies only to quiz assignments) // (Optional) If true, the assignment will be omitted from the student's final // grade OmitFromFinalGrade bool `json:"omit_from_final_grade" yaml:"omit_from_final_grade" meddler:"omit_from_final_grade"` // (Optional) the DiscussionTopic associated with the assignment, if // applicable TODO: is it the topic object or its id? DiscussionTopic int `json:"discussion_topic" yaml:"discussion_topic" meddler:"discussion_topic"` // (Optional) If true, the rubric is directly tied to grading the assignment. // Otherwise, it is only advisory. Included if there is an associated rubric. UseRubricForGrading bool `json:"use_rubric_for_grading" yaml:"use_rubric_for_grading" meddler:"use_rubric_for_grading"` // (Optional) An object describing the basic attributes of the rubric, including // the point total. Included if there is an associated rubric. RubricSettings map[string]interface{} `json:"rubric_settings" yaml:"rubric_settings" meddler:"rubric_settings"` // (Optional) A list of scoring criteria and ratings for each rubric criterion. // Included if there is an associated rubric. Rubric []RubricCriterion `json:"rubric" yaml:"rubric" meddler:"rubric"` // if the requesting user has grading rights, the number of submissions that // need grading. NeedsGradingCount int `json:"needs_grading_count" yaml:"needs_grading_count" meddler:"needs_grading_count"` SubmissionsDownloadUrl string `json:"submissions_download_url" yaml:"submissions_download_url" meddler:"submissions_download_url"` // the URL to download all submissions as a zip } type ExternalToolTagAttributes struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` Url string `json:"url" yaml:"url" meddler:"url"` // URL to the external tool NewTab bool `json:"new_tab" yaml:"new_tab" meddler:"new_tab"` // Whether or not there is a new tab for the external tool } type RubricCriterion struct { Id int `json:"-" yaml:"-" meddler:"id,pk"` CanvasId int `json:"id" yaml:"id" meddler:"canvas_id"` Description string `json:"description" yaml:"description" meddler:"description"` LongDescription string `json:"long_description" yaml:"long_description" meddler:"long_description"` Points int `json:"points" yaml:"points" meddler:"points"` } func getAssignments(db *sql.DB) []*Assignment { assignments := make([]*Assignment, 0) courses, _ := findCourses(db) values := url.Values{} values.Add("per_page", "100") // TODO: do it for all courses courseId := courses[0].CanvasId reqUrl := fmt.Sprintf(assignmentsPath, courseId) mustGetObject(reqUrl, values, &assignments) return assignments } func pullAssignments(db *sql.DB) { assignments := getAssignments(db) for _, assignment := range assignments { assignment.Pull(db) } } func pushAssignment(db *sql.DB, filepath string) error { assignment := new(Assignment) _, err := readFile(filepath, assignment) if err != nil { return err } return assignment.Push(db) } func pushAssignments(db *sql.DB) { files, err := ioutil.ReadDir(assignmentsDir) if err != nil { log.Fatal(err) } for _, f := range files { filepath := f.Name() err = pushAssignment(db, filepath) if err != nil { log.Fatalf("Failed to push assignment %s: %v\n", filepath, err) } } } func (assignment *Assignment) Dump() error { metadata, err := yaml.Marshal(assignment) if err != nil { return err } assignmentFilePath := fmt.Sprintf("%s/%s.md", assignmentsDir, assignment.Slug()) return writeFile(assignmentFilePath, string(metadata), assignment.Description) } func (assignment *Assignment) Pull(db *sql.DB) error { return pullComponent(db, assignmentPath, assignment.CanvasId, assignment) } // For now, pushing only creates assignments. Later, we'll have to track whether // an assignment already exists before pushing it to either create or update it. // The difference is that assignments have separate API endpoints for the two // actions whereas pages do not. func (assignment *Assignment) Push(db *sql.DB) error { // Convert struct to map marshalled, err := json.Marshal(assignment) if err != nil { return err } var assignmentMap map[string]interface{} err = json.Unmarshal(marshalled, &assignmentMap) if err != nil { return err } // fix a few fields assignmentMap["description"] = string(blackfriday.MarkdownCommon([]byte(assignment.Description))) invalidFields := []string{"updated_at", "created_at", "id", "html_url", "submissions_download_url", "course_id", "anonymous_submissions", "discussion_topic", "intra_group_peer_reviews", "needs_grading_count", "peer_review_count", "peer_reviews_assign_at", "quiz_id", "rubric", "rubric_settings", "use_rubric_for_grading"} for _, field := range invalidFields { delete(assignmentMap, field) } a := map[string]interface{}{ "assignment": assignmentMap, } courses, _ := findCourses(db) for _, course := range courses { courseId := course.CanvasId createAssignmentPath := fmt.Sprintf(assignmentsPath, courseId) fmt.Printf("Pushing %s to %s\n", assignment.Name, course.Name) mustPostObject(createAssignmentPath, url.Values{}, a, nil) } return nil } func (assignment *Assignment) Slug() string { return slug(assignment.Name) }
// the unlock date (assignment is unlocked after this date) returns null if not // present NOTE: If this assignment has assignment overrides, this field will be
random_line_split
home.ts
// //Import the theme import * as scroll from "./scroll.js"; import * as outlook from "../../../outlook/v/code/outlook.js"; import * as library from "../../../library/v/code/library.js" import * as fuel from "./fuel.js" import school from "./school.js"; import market from "./market.js"; import * as school_lib from "./library.js"; import create_element from "./create.js"; import * as io from "./io.js" // //This is my school sytem market place where the school data is represented as //lables with a brief description of the school and links to their schools. export default class home extends scroll.scroll{ // public Fuel: show|null=null; public sql_: string; // constructor( //The css string used to derive the elemnt where this sql will be placed css: string, // //The mother view where this panel is placed. base: outlook.view, // //The database name that is the base of this query dbname: string ) { super(css, base, dbname); this.sql_ = this.get_sql_(); } // //Get the sql that was set by the constructor private get_sql_():string { return ' select `school`.`logo`, ' + ' `school`.`id`, ' + ' `school`.`name`, ' + ' `school`.`address`, ' + ' `school`.`location` ' + ' from `general_school`.`school` '; } // set sql(s: string) { this.sql_=s } get sql(){return this.sql_} // // //Paint this market place from the first selection in a lable format. async continue_paint() { // const count_sql = `select count(1) as count from (${this.sql}) as su`; // //Retrieve the maximum records available for display const records = await school.current.exec("database", [this.config.app_db], "get_sql_data", [count_sql]) const ifuel = Object.values(records)[0] this.max_records = parseInt(String(ifuel["count"])); await this.goto(); } // // get_io(col: school_lib.column_meta):io.io{ return new io.input("text",this) } // //Sets the ifuel and displays it in the required procedure public async show(Ifuel:library.Ifuel, offset:number) { // //Make these retrieved results visible if (this.Fuel === null)
else { await this.Fuel.activate(Ifuel, offset); this.Fuel.paint(this.target!,offset) } } // //This is the search event listener //Filters the schools displayed on the market pannel and to a selected //shool public async show_school(evt: Event): Promise<void>{ // //Throw an error if this method was called without the fuel being set if (this.Fuel === null) throw new Error("method called before fuel isset"); // //Get the selector element selected value selected. const selected = (<HTMLSelectElement>evt.target).value; // //Test if the accademy is already displayed in the show const exist: false|academy = await this.Fuel.exists(selected); /** * * 1. the accademy is already painted bring it to focus*/ if (exist instanceof academy) { exist.focus(); return } /* * 2. Get the associated Ifuel, expand the show and bring it to focus */ this.Fuel!.add_academy(selected); } } // //This home page models the school market place and hence we need to model a market. //this promted the modeling of the fuel as stock and the barrels as items class show extends fuel.fuel { // constructor(records: library.Ifuel, sql: string, public host: home, offset: number) { super(records, sql, host, offset) // this.display = 'label'; } // //Overide the show method to include the bootrap class styles keeping in mind //we are only interested in the label view public paint(element: HTMLElement): void { // //Allow every barrel to paint itsself this.forEach(bar => bar.paint(element)); } // //Adds an academy with the given selection name async add_academy(selection: string):Promise<academy|false> { // //Change the sql to include that condition // //Get the original sql const sql = this.host.original_sql === undefined ? this.sql : this.host.original_sql; this.host.original_sql = sql; // //The modifying where clause const modify = ` where school.name= ${selection}`; // //Modify the sql this.host.sql = sql + modify; // //Get the ifuel for this const ifuel = await this.host.query(0, 1); // //Expand this repository with the given information. this.expand(-1, ifuel) // //Return the newly created accademy if it exists return this.exists(selection); } // //Returns the accademy with a given name from its current repository, i.e., displayed in the show room async exists(selection: string): Promise<academy | false>{ // //Get the academy with the given name const acdms: Array<academy> = [] this.forEach(acc => acdms.push(<academy>acc)); // //filter to obtain the required accademy const selected = acdms.filter(acc => acc.name!.data === selection) // //result false if none is selected if (selected.length === 0) return false; // return selected[0]; } // //Converts the static list of the records into barrels making them members of this //array coz currently this array is empty. This method is also called when there //is need to expand this array with more data hence the method is public and has an //optional Ifuel with it public async activate(ifuel?: library.Ifuel, start?: number): Promise<void> { // //Test if the columns are already set if (this.columns === undefined) await this.get_columns(); // //The records to be activated can either come from the constructor or as a //parameter const records = ifuel === undefined ? this.records : ifuel; const offset = start === undefined ? this.offset : start; // //Loop through the static structure of the ifuel creating barrels in each //indexing them by their offsets. records.forEach((rec, index) => { // //Evaluate the offset used to derive this barrel const map_index: number = offset + index; // //Die for now if this fuel has repeated barellels if (this.has(map_index)) { // //alert the user then die alert(`The fuel is overiding at idex ${map_index}`); throw new Error(`The fuel is overiding at idex ${map_index}`); } // //Add the activated tin into this collection. this.set(map_index, new academy(rec, this, map_index)); }); } } // //An accademy is an advert of a school as it appears in the market place once selected //the accademy becomes an institution and hence allows the users to have accces of the //various school assets. class academy extends fuel.barrel{ // // The tin that houses the name of this accademy. public name?: fuel.tin; // //The tin that houses the short code of this accademy. public id?: fuel.tin; // //The tin with the logo/image tag public logo?: fuel.tin; // //The school name tag that has an onclick used for openning a selected school public header?: HTMLHeadElement; // //The element used to display th public element ?:HTMLElement // //The short code for this accademy constructor( // //This is the static collection of the tins as an object items:{[index:string]:library.basic_value}, // //The bigger fuel collection parent: show, // //The offset of this barrel in the database offset: number ) { super(items, parent, offset); } // //The activation of the static tins to tins required population of its //metadata this is incase these tins were derived from a random sql. activate():void{ super.activate(); // //Set the image, name and id tins const Tins=["logo", "name", "id"].map(el => this.find(el)); // //Assign the properties for (let index = 0; index < Tins.length; index++) { switch (index) { case 0: this.logo = Tins[index]; break case 1: this.name = Tins[index]; break case 2: this.id = Tins[index]; break } } } // //Put this element into focus focus() { this.element!.scrollIntoView(); this.element!.focus(); } // //Returns the accademy property selected from the current collection of tins private find(property: string): fuel.tin{ // //convert this map into a simple array let Tins: Array<fuel.tin> = []; this.forEach(element => Tins.push(element)); // //Filter the tins using the name as a creiteria const selected= Tins.filter(Tin=>Tin.name===property) // //Ensure only one tin was filtered if(selected.length>1) throw new Error("invalid sql that returned dublicate column names") // //return the promised tin return selected[0] } // // //Converts this accademy to an institution for the user to access the various school //resources public async open_school() { // //The institution to be viewed const Institution= new institution(this.parent.host) // //administer the institution await Institution.administer() } // //Paints this barrel by default as a table this method can be overidden to //change the mode of display public paint(el?: HTMLElement): HTMLElement{ // //Set the anchor if (el instanceof HTMLElement) this.anchor = el; // //Get the element to attach this display const element = el === undefined ? this.anchor! : el; // //Create the div element responsible for panting this academy this.element = create_element(element, "div", { className: "accademy col-md-4", tabindex:0}); // //The div that houses the logo and the school name used for bootrap styling const container = create_element(this.element, "div", { className: "full blog_img_popular" }); // //The image tag for the logo create_element(container, "img", { src: String(this.logo?.data), className: "img-responsive" }); // //The school name as a h4 this.header= create_element(container,"h4",{onclick:()=>this.open_school(),textContent:String(this.name!.data)}) // //Return the elemnt created. return this.element; } } // //An institution is a class where the user can access the various school resources via event //listeners. //This class was designed to extend an app as a baby but since it cannot extend both an app //and a baby an institution was created that has a school. class institution extends outlook.baby<void>{ // //To create this instition we need a school an the url where this school will be //displayed constructor( // //The mother of this institution is the school mother:school, // //The id or short name for this institution public id: string, // //The name of this instutution public name: string // ) { super(mother); // //Set the pannels of this institution this.set_pannels(); } // //Both the get_results and the check are requirement of the baby which by now are doing //nothing async get_result() { } check(): boolean {return true;} }
{ // this.Fuel = new show(Ifuel, this.sql, this, offset); // //Activate the fuel await this.Fuel.activate(); // //Paint this labels to make them visible. await this.Fuel.paint(this.target!); }
conditional_block
home.ts
// //Import the theme import * as scroll from "./scroll.js"; import * as outlook from "../../../outlook/v/code/outlook.js"; import * as library from "../../../library/v/code/library.js" import * as fuel from "./fuel.js" import school from "./school.js"; import market from "./market.js"; import * as school_lib from "./library.js"; import create_element from "./create.js"; import * as io from "./io.js" // //This is my school sytem market place where the school data is represented as //lables with a brief description of the school and links to their schools. export default class home extends scroll.scroll{ // public Fuel: show|null=null; public sql_: string; // constructor( //The css string used to derive the elemnt where this sql will be placed css: string, // //The mother view where this panel is placed. base: outlook.view, // //The database name that is the base of this query dbname: string ) { super(css, base, dbname); this.sql_ = this.get_sql_(); } // //Get the sql that was set by the constructor private get_sql_():string { return ' select `school`.`logo`, ' + ' `school`.`id`, ' + ' `school`.`name`, ' + ' `school`.`address`, ' + ' `school`.`location` ' + ' from `general_school`.`school` '; } // set sql(s: string) { this.sql_=s } get sql(){return this.sql_} // // //Paint this market place from the first selection in a lable format. async continue_paint() { // const count_sql = `select count(1) as count from (${this.sql}) as su`; // //Retrieve the maximum records available for display const records = await school.current.exec("database", [this.config.app_db], "get_sql_data", [count_sql]) const ifuel = Object.values(records)[0] this.max_records = parseInt(String(ifuel["count"])); await this.goto(); } // // get_io(col: school_lib.column_meta):io.io{ return new io.input("text",this) } // //Sets the ifuel and displays it in the required procedure public async show(Ifuel:library.Ifuel, offset:number) { // //Make these retrieved results visible if (this.Fuel === null) { // this.Fuel = new show(Ifuel, this.sql, this, offset); // //Activate the fuel await this.Fuel.activate(); // //Paint this labels to make them visible. await this.Fuel.paint(this.target!); } else { await this.Fuel.activate(Ifuel, offset); this.Fuel.paint(this.target!,offset) } } // //This is the search event listener //Filters the schools displayed on the market pannel and to a selected //shool public async show_school(evt: Event): Promise<void>{ // //Throw an error if this method was called without the fuel being set if (this.Fuel === null) throw new Error("method called before fuel isset"); // //Get the selector element selected value selected. const selected = (<HTMLSelectElement>evt.target).value; // //Test if the accademy is already displayed in the show const exist: false|academy = await this.Fuel.exists(selected); /** * * 1. the accademy is already painted bring it to focus*/ if (exist instanceof academy) { exist.focus(); return } /* * 2. Get the associated Ifuel, expand the show and bring it to focus */ this.Fuel!.add_academy(selected); } } // //This home page models the school market place and hence we need to model a market. //this promted the modeling of the fuel as stock and the barrels as items class show extends fuel.fuel { // constructor(records: library.Ifuel, sql: string, public host: home, offset: number) { super(records, sql, host, offset) // this.display = 'label'; } // //Overide the show method to include the bootrap class styles keeping in mind //we are only interested in the label view public paint(element: HTMLElement): void { // //Allow every barrel to paint itsself this.forEach(bar => bar.paint(element)); } // //Adds an academy with the given selection name async add_academy(selection: string):Promise<academy|false> { // //Change the sql to include that condition // //Get the original sql
// //Modify the sql this.host.sql = sql + modify; // //Get the ifuel for this const ifuel = await this.host.query(0, 1); // //Expand this repository with the given information. this.expand(-1, ifuel) // //Return the newly created accademy if it exists return this.exists(selection); } // //Returns the accademy with a given name from its current repository, i.e., displayed in the show room async exists(selection: string): Promise<academy | false>{ // //Get the academy with the given name const acdms: Array<academy> = [] this.forEach(acc => acdms.push(<academy>acc)); // //filter to obtain the required accademy const selected = acdms.filter(acc => acc.name!.data === selection) // //result false if none is selected if (selected.length === 0) return false; // return selected[0]; } // //Converts the static list of the records into barrels making them members of this //array coz currently this array is empty. This method is also called when there //is need to expand this array with more data hence the method is public and has an //optional Ifuel with it public async activate(ifuel?: library.Ifuel, start?: number): Promise<void> { // //Test if the columns are already set if (this.columns === undefined) await this.get_columns(); // //The records to be activated can either come from the constructor or as a //parameter const records = ifuel === undefined ? this.records : ifuel; const offset = start === undefined ? this.offset : start; // //Loop through the static structure of the ifuel creating barrels in each //indexing them by their offsets. records.forEach((rec, index) => { // //Evaluate the offset used to derive this barrel const map_index: number = offset + index; // //Die for now if this fuel has repeated barellels if (this.has(map_index)) { // //alert the user then die alert(`The fuel is overiding at idex ${map_index}`); throw new Error(`The fuel is overiding at idex ${map_index}`); } // //Add the activated tin into this collection. this.set(map_index, new academy(rec, this, map_index)); }); } } // //An accademy is an advert of a school as it appears in the market place once selected //the accademy becomes an institution and hence allows the users to have accces of the //various school assets. class academy extends fuel.barrel{ // // The tin that houses the name of this accademy. public name?: fuel.tin; // //The tin that houses the short code of this accademy. public id?: fuel.tin; // //The tin with the logo/image tag public logo?: fuel.tin; // //The school name tag that has an onclick used for openning a selected school public header?: HTMLHeadElement; // //The element used to display th public element ?:HTMLElement // //The short code for this accademy constructor( // //This is the static collection of the tins as an object items:{[index:string]:library.basic_value}, // //The bigger fuel collection parent: show, // //The offset of this barrel in the database offset: number ) { super(items, parent, offset); } // //The activation of the static tins to tins required population of its //metadata this is incase these tins were derived from a random sql. activate():void{ super.activate(); // //Set the image, name and id tins const Tins=["logo", "name", "id"].map(el => this.find(el)); // //Assign the properties for (let index = 0; index < Tins.length; index++) { switch (index) { case 0: this.logo = Tins[index]; break case 1: this.name = Tins[index]; break case 2: this.id = Tins[index]; break } } } // //Put this element into focus focus() { this.element!.scrollIntoView(); this.element!.focus(); } // //Returns the accademy property selected from the current collection of tins private find(property: string): fuel.tin{ // //convert this map into a simple array let Tins: Array<fuel.tin> = []; this.forEach(element => Tins.push(element)); // //Filter the tins using the name as a creiteria const selected= Tins.filter(Tin=>Tin.name===property) // //Ensure only one tin was filtered if(selected.length>1) throw new Error("invalid sql that returned dublicate column names") // //return the promised tin return selected[0] } // // //Converts this accademy to an institution for the user to access the various school //resources public async open_school() { // //The institution to be viewed const Institution= new institution(this.parent.host) // //administer the institution await Institution.administer() } // //Paints this barrel by default as a table this method can be overidden to //change the mode of display public paint(el?: HTMLElement): HTMLElement{ // //Set the anchor if (el instanceof HTMLElement) this.anchor = el; // //Get the element to attach this display const element = el === undefined ? this.anchor! : el; // //Create the div element responsible for panting this academy this.element = create_element(element, "div", { className: "accademy col-md-4", tabindex:0}); // //The div that houses the logo and the school name used for bootrap styling const container = create_element(this.element, "div", { className: "full blog_img_popular" }); // //The image tag for the logo create_element(container, "img", { src: String(this.logo?.data), className: "img-responsive" }); // //The school name as a h4 this.header= create_element(container,"h4",{onclick:()=>this.open_school(),textContent:String(this.name!.data)}) // //Return the elemnt created. return this.element; } } // //An institution is a class where the user can access the various school resources via event //listeners. //This class was designed to extend an app as a baby but since it cannot extend both an app //and a baby an institution was created that has a school. class institution extends outlook.baby<void>{ // //To create this instition we need a school an the url where this school will be //displayed constructor( // //The mother of this institution is the school mother:school, // //The id or short name for this institution public id: string, // //The name of this instutution public name: string // ) { super(mother); // //Set the pannels of this institution this.set_pannels(); } // //Both the get_results and the check are requirement of the baby which by now are doing //nothing async get_result() { } check(): boolean {return true;} }
const sql = this.host.original_sql === undefined ? this.sql : this.host.original_sql; this.host.original_sql = sql; // //The modifying where clause const modify = ` where school.name= ${selection}`;
random_line_split
home.ts
// //Import the theme import * as scroll from "./scroll.js"; import * as outlook from "../../../outlook/v/code/outlook.js"; import * as library from "../../../library/v/code/library.js" import * as fuel from "./fuel.js" import school from "./school.js"; import market from "./market.js"; import * as school_lib from "./library.js"; import create_element from "./create.js"; import * as io from "./io.js" // //This is my school sytem market place where the school data is represented as //lables with a brief description of the school and links to their schools. export default class home extends scroll.scroll{ // public Fuel: show|null=null; public sql_: string; // constructor( //The css string used to derive the elemnt where this sql will be placed css: string, // //The mother view where this panel is placed. base: outlook.view, // //The database name that is the base of this query dbname: string ) { super(css, base, dbname); this.sql_ = this.get_sql_(); } // //Get the sql that was set by the constructor private get_sql_():string { return ' select `school`.`logo`, ' + ' `school`.`id`, ' + ' `school`.`name`, ' + ' `school`.`address`, ' + ' `school`.`location` ' + ' from `general_school`.`school` '; } // set sql(s: string) { this.sql_=s } get sql(){return this.sql_} // // //Paint this market place from the first selection in a lable format. async continue_paint() { // const count_sql = `select count(1) as count from (${this.sql}) as su`; // //Retrieve the maximum records available for display const records = await school.current.exec("database", [this.config.app_db], "get_sql_data", [count_sql]) const ifuel = Object.values(records)[0] this.max_records = parseInt(String(ifuel["count"])); await this.goto(); } // // get_io(col: school_lib.column_meta):io.io{ return new io.input("text",this) } // //Sets the ifuel and displays it in the required procedure public async show(Ifuel:library.Ifuel, offset:number) { // //Make these retrieved results visible if (this.Fuel === null) { // this.Fuel = new show(Ifuel, this.sql, this, offset); // //Activate the fuel await this.Fuel.activate(); // //Paint this labels to make them visible. await this.Fuel.paint(this.target!); } else { await this.Fuel.activate(Ifuel, offset); this.Fuel.paint(this.target!,offset) } } // //This is the search event listener //Filters the schools displayed on the market pannel and to a selected //shool public async show_school(evt: Event): Promise<void>{ // //Throw an error if this method was called without the fuel being set if (this.Fuel === null) throw new Error("method called before fuel isset"); // //Get the selector element selected value selected. const selected = (<HTMLSelectElement>evt.target).value; // //Test if the accademy is already displayed in the show const exist: false|academy = await this.Fuel.exists(selected); /** * * 1. the accademy is already painted bring it to focus*/ if (exist instanceof academy) { exist.focus(); return } /* * 2. Get the associated Ifuel, expand the show and bring it to focus */ this.Fuel!.add_academy(selected); } } // //This home page models the school market place and hence we need to model a market. //this promted the modeling of the fuel as stock and the barrels as items class show extends fuel.fuel { // constructor(records: library.Ifuel, sql: string, public host: home, offset: number) { super(records, sql, host, offset) // this.display = 'label'; } // //Overide the show method to include the bootrap class styles keeping in mind //we are only interested in the label view public paint(element: HTMLElement): void { // //Allow every barrel to paint itsself this.forEach(bar => bar.paint(element)); } // //Adds an academy with the given selection name async add_academy(selection: string):Promise<academy|false> { // //Change the sql to include that condition // //Get the original sql const sql = this.host.original_sql === undefined ? this.sql : this.host.original_sql; this.host.original_sql = sql; // //The modifying where clause const modify = ` where school.name= ${selection}`; // //Modify the sql this.host.sql = sql + modify; // //Get the ifuel for this const ifuel = await this.host.query(0, 1); // //Expand this repository with the given information. this.expand(-1, ifuel) // //Return the newly created accademy if it exists return this.exists(selection); } // //Returns the accademy with a given name from its current repository, i.e., displayed in the show room async exists(selection: string): Promise<academy | false>{ // //Get the academy with the given name const acdms: Array<academy> = [] this.forEach(acc => acdms.push(<academy>acc)); // //filter to obtain the required accademy const selected = acdms.filter(acc => acc.name!.data === selection) // //result false if none is selected if (selected.length === 0) return false; // return selected[0]; } // //Converts the static list of the records into barrels making them members of this //array coz currently this array is empty. This method is also called when there //is need to expand this array with more data hence the method is public and has an //optional Ifuel with it public async activate(ifuel?: library.Ifuel, start?: number): Promise<void> { // //Test if the columns are already set if (this.columns === undefined) await this.get_columns(); // //The records to be activated can either come from the constructor or as a //parameter const records = ifuel === undefined ? this.records : ifuel; const offset = start === undefined ? this.offset : start; // //Loop through the static structure of the ifuel creating barrels in each //indexing them by their offsets. records.forEach((rec, index) => { // //Evaluate the offset used to derive this barrel const map_index: number = offset + index; // //Die for now if this fuel has repeated barellels if (this.has(map_index)) { // //alert the user then die alert(`The fuel is overiding at idex ${map_index}`); throw new Error(`The fuel is overiding at idex ${map_index}`); } // //Add the activated tin into this collection. this.set(map_index, new academy(rec, this, map_index)); }); } } // //An accademy is an advert of a school as it appears in the market place once selected //the accademy becomes an institution and hence allows the users to have accces of the //various school assets. class academy extends fuel.barrel{ // // The tin that houses the name of this accademy. public name?: fuel.tin; // //The tin that houses the short code of this accademy. public id?: fuel.tin; // //The tin with the logo/image tag public logo?: fuel.tin; // //The school name tag that has an onclick used for openning a selected school public header?: HTMLHeadElement; // //The element used to display th public element ?:HTMLElement // //The short code for this accademy constructor( // //This is the static collection of the tins as an object items:{[index:string]:library.basic_value}, // //The bigger fuel collection parent: show, // //The offset of this barrel in the database offset: number ) { super(items, parent, offset); } // //The activation of the static tins to tins required population of its //metadata this is incase these tins were derived from a random sql. activate():void{ super.activate(); // //Set the image, name and id tins const Tins=["logo", "name", "id"].map(el => this.find(el)); // //Assign the properties for (let index = 0; index < Tins.length; index++) { switch (index) { case 0: this.logo = Tins[index]; break case 1: this.name = Tins[index]; break case 2: this.id = Tins[index]; break } } } // //Put this element into focus focus() { this.element!.scrollIntoView(); this.element!.focus(); } // //Returns the accademy property selected from the current collection of tins private find(property: string): fuel.tin{ // //convert this map into a simple array let Tins: Array<fuel.tin> = []; this.forEach(element => Tins.push(element)); // //Filter the tins using the name as a creiteria const selected= Tins.filter(Tin=>Tin.name===property) // //Ensure only one tin was filtered if(selected.length>1) throw new Error("invalid sql that returned dublicate column names") // //return the promised tin return selected[0] } // // //Converts this accademy to an institution for the user to access the various school //resources public async
() { // //The institution to be viewed const Institution= new institution(this.parent.host) // //administer the institution await Institution.administer() } // //Paints this barrel by default as a table this method can be overidden to //change the mode of display public paint(el?: HTMLElement): HTMLElement{ // //Set the anchor if (el instanceof HTMLElement) this.anchor = el; // //Get the element to attach this display const element = el === undefined ? this.anchor! : el; // //Create the div element responsible for panting this academy this.element = create_element(element, "div", { className: "accademy col-md-4", tabindex:0}); // //The div that houses the logo and the school name used for bootrap styling const container = create_element(this.element, "div", { className: "full blog_img_popular" }); // //The image tag for the logo create_element(container, "img", { src: String(this.logo?.data), className: "img-responsive" }); // //The school name as a h4 this.header= create_element(container,"h4",{onclick:()=>this.open_school(),textContent:String(this.name!.data)}) // //Return the elemnt created. return this.element; } } // //An institution is a class where the user can access the various school resources via event //listeners. //This class was designed to extend an app as a baby but since it cannot extend both an app //and a baby an institution was created that has a school. class institution extends outlook.baby<void>{ // //To create this instition we need a school an the url where this school will be //displayed constructor( // //The mother of this institution is the school mother:school, // //The id or short name for this institution public id: string, // //The name of this instutution public name: string // ) { super(mother); // //Set the pannels of this institution this.set_pannels(); } // //Both the get_results and the check are requirement of the baby which by now are doing //nothing async get_result() { } check(): boolean {return true;} }
open_school
identifier_name
main.py
from other.rnn.text_setiment_classification.data import ProcessACLData from other.rnn.text_setiment_classification.brnn import BiRNN from torch.utils.data import DataLoader from torchtext.vocab import GloVe import torch, os # 设置GPU环境 os.environ['CUDA_VISIBLE_DEVICES'] = '1' class SemanticClassification: def __init__(self): # 定义相关参数 self.batch_size = 64 # 批次大小 self.num_workers = 8 # 线程数量 self.embedded_size = 100 # 词向量大小 self.num_hiddens = 100 # 隐藏层数量 self.num_layers = 2 # 堆叠LSTM的层数 self.num_classes = 2 # 类别数量 self.name_classes = ['neg', 'pos'] # 类别名称 self.pretrained = True # 是否使用预训练的词向量 self.lr = 0.01 # 学习率 self.num_epochs = 20 # 批次数量 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备 self.checkpoints = 'text.pth' # 定义处理对象的实体类 data = ProcessACLData() # 获得词典 self.vocab_data = data.get_vocab_imdb() # 获取训练、测试数据集及其词典 self.train_set = data.get_dataset(data.train_dir, self.vocab_data) self.test_set= data.get_dataset(data.test_dir, self.vocab_data) print('训练数据大小:{}'.format(len(self.train_set))) print('测试数据大小:{}'.format(len(self.test_set))) # 获得数据加载器 self.train_loader = DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=8) self.test_loader = DataLoader(self.test_set, batch_size=self.batch_size, num_workers=8) # 定义训练和测试网络结构 self.net = BiRNN(vocab=self.vocab_data, embed_size=self.embedded_size, num_hiddens=self.num_hiddens, num_layers=self.num_layers) # 定义GloVe对象来加载预训练词向量,维度要与网络中embedding大小一致 glove = GloVe(name='6B', dim=self.embedded_size, cache='glove') # 加载预训练的值 if self.pretrained: self.net.embedding.weight.data.copy_( self.load_pretrained_embedding(self.vocab_data.itos, glove) ) # 取消求梯度 self.net.embedding.weight.requires_grad = False def train(self, net): """ 训练 :return: """ # 定义Adam优化器和交叉熵损失函数 if self.pretrained: # 过滤掉预训练的词向量的权重,只训练没有包含的值 self.optimizer = torch.optim.Adam(filter(lambda p:p.requires_grad, net.parameters()), lr=self.lr) else: self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr) self.criterion = torch.nn.CrossEntropyLoss() print('training stage....') # 将网络结构调成训练模式;将网络放置到GPU上;初始化梯度张量 net.cuda(device=self.device) self.optimizer.zero_grad() # 周期遍历 for epoch in range(self.num_epochs): print('Epoch {}/{}'.format(epoch, self.num_epochs - 1)) print('-' * 20) # 调成训练模式 net.train() # 定义准确率变量,损失值,批次数量,样本总数量;最好精确率 train_acc = 0.0 train_loss = 0.0 num_batch = 0 num_samples = 0 best_acc = 0 # 进行每周期的网络的训练 for index, data in enumerate(self.train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 words, labels = data words = words.to(self.device) labels = labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(words) outputs = torch.nn.functional.softmax(outputs, dim=1) loss = self.criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc train_loss += loss num_batch += 1 num_samples += words.size(0) # 计算梯度、更新参数、重置梯度张量 loss.backward() self.optimizer.step() self.optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 10 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\n'. format(index, loss, acc / words.size(0))) # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss / num_batch # 进行验证 valid_acc, valid_loss = self.eval(net, self.test_loader, self.criterion) # 输出损失值和精度值 print('epoch:{} -------\n train loss:{:.4f}, train acc:{:.4f}\n ' 'valid loss:{:.4f}, valid acc:{:.4f}\n'. format(epoch,train_loss, train_acc, valid_loss, valid_acc)) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), self.checkpoints) print('epoch:{}, update model...'.format(epoch)) print() def eval(self, net, valid_loader, criterion): """ 验证 :param net: 网络结构 :param valid_loader: 验证集加载器 :param criterion: 损失函数 :return: """ print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() net.cuda(device=self.device) valid_acc = 0.0 valid_loss = 0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for index, dataset in enumerate(valid_loader, start=0): data, labels = dataset # 将测试数据放入GPU上 data, labels = data.to(self.device), labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(data) outputs = torch.nn.functional.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1 num_samples += data.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss def test(self, net, test_loader): print('test stage...\n') # 加载模型权重、将网络放入GPU if os.path.exists(self.checkpoints): net.load_state_dict(torch.load(self.checkpoints)) print('load model argument...') net.to(self.device) # 将网络结构调成验证模式、定义准确率、标签列表和预测列表 net.eval() correct = 0 targets, preds = [], [] # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for data, labels in test_loader: # 将测试数据放入GPU上 data, labels = data.to(self.device), labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(data) outputs = torch.nn.functional.softmax(outputs, dim=1) pred = torch.argmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);统计真实标签和对应预测标签 correct += torch.sum((pred == labels)).item() targets += list(labels.cpu().numpy()) preds += list(pred.cpu().numpy()) # 计算测试精度和混淆矩阵 test_acc = 100. * correct / len(test_loader.dataset) # confusion_mat = metrics.confusion_matrix(targets, preds) # confusion_mat = confusion_matrix(targets, preds) # print('numbers samples:{}, test accuracy:{},\nconfusion matrix:\n{}'. # format(len(test_loader.data2), test_acc, confusion_mat)) print('numbers samples:{}, test accuracy:{},\n'. format(len(test_loader.dataset), test_acc)) return test_acc def predict(self, net, vocab, sentence): """ 预测句子的情感 :param net: 网络结构 :param vocab: 词典数据 :param sentence: 预测句子 :return: """ print('predict stage...\n') # 加载模型权重、将网络放入GPU if os.path.exists(self.checkpoints): net.load_state_dict(torch.load(self.checkpoints)) print('load model argument...') # 将网络结构调成验证模式 net.eval() # 将数据转化成词向量 vector = torch.tensor([vocab.stoi[word] for word in sentence]) vector = vector.view(1, -1) # 模型预测 output = net(vector) label = torch.argmax(output, dim=1).cpu().tolist() # 输出 print('data2:{}, label:{}'.format(sentence, self.name_classes[label[0]])) def load_pretrained_embedding(self, word_vocab, pretrained_vocab): """ 从GloVe中预训练好的pretrained_vocab中提取当前vocab对应的词向量
:return: """ # 初始当前数据的词向量 embedding = torch.zeros(len(word_vocab), pretrained_vocab.vectors[0].shape[0]) # 统计不包含的单词 num_out = 0 # 遍历当前词典 for i, word in enumerate(word_vocab): # 若单词不在GloVe中,则报出异常 try: # word对应于GloVe中word的索引,并根据索引替换掉向量 idx = pretrained_vocab.stoi[word] embedding[i, :] = pretrained_vocab.vectors[idx] except KeyError: num_out += 1 print('\r{}'.format(i), end=' ') # 输出不包含的单词数量 if num_out > 0: print('有{}单词不包含'.format(num_out)) return embedding if __name__ == '__main__': data_1 = ['this', 'movie', 'is', 'so', 'great'] data_2 = ['this', 'movie', 'is', 'so', 'bad'] text = SemanticClassification() # text.train(text.net) text.predict(text.net, text.vocab_data, data_2)
:param word_vocab: 当前数据集的词典,['ship',...] :param pretrained_vocab:GloVe中预训练的词向量
random_line_split
main.py
from other.rnn.text_setiment_classification.data import ProcessACLData from other.rnn.text_setiment_classification.brnn import BiRNN from torch.utils.data import DataLoader from torchtext.vocab import GloVe import torch, os # 设置GPU环境 os.environ['CUDA_VISIBLE_DEVICES'] = '1' class SemanticClassification: def __in
it__(self): # 定义相关参数 self.batch_size = 64 # 批次大小 self.num_workers = 8 # 线程数量 self.embedded_size = 100 # 词向量大小 self.num_hiddens = 100 # 隐藏层数量 self.num_layers = 2 # 堆叠LSTM的层数 self.num_classes = 2 # 类别数量 self.name_classes = ['neg', 'pos'] # 类别名称 self.pretrained = True # 是否使用预训练的词向量 self.lr = 0.01 # 学习率 self.num_epochs = 20 # 批次数量 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备 self.checkpoints = 'text.pth' # 定义处理对象的实体类 data = ProcessACLData() # 获得词典 self.vocab_data = data.get_vocab_imdb() # 获取训练、测试数据集及其词典 self.train_set = data.get_dataset(data.train_dir, self.vocab_data) self.test_set= data.get_dataset(data.test_dir, self.vocab_data) print('训练数据大小:{}'.format(len(self.train_set))) print('测试数据大小:{}'.format(len(self.test_set))) # 获得数据加载器 self.train_loader = DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=8) self.test_loader = DataLoader(self.test_set, batch_size=self.batch_size, num_workers=8) # 定义训练和测试网络结构 self.net = BiRNN(vocab=self.vocab_data, embed_size=self.embedded_size, num_hiddens=self.num_hiddens, num_layers=self.num_layers) # 定义GloVe对象来加载预训练词向量,维度要与网络中embedding大小一致 glove = GloVe(name='6B', dim=self.embedded_size, cache='glove') # 加载预训练的值 if self.pretrained: self.net.embedding.weight.data.copy_( self.load_pretrained_embedding(self.vocab_data.itos, glove) ) # 取消求梯度 self.net.embedding.weight.requires_grad = False def train(self, net): """ 训练 :return: """ # 定义Adam优化器和交叉熵损失函数 if self.pretrained: # 过滤掉预训练的词向量的权重,只训练没有包含的值 self.optimizer = torch.optim.Adam(filter(lambda p:p.requires_grad, net.parameters()), lr=self.lr) else: self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr) self.criterion = torch.nn.CrossEntropyLoss() print('training stage....') # 将网络结构调成训练模式;将网络放置到GPU上;初始化梯度张量 net.cuda(device=self.device) self.optimizer.zero_grad() # 周期遍历 for epoch in range(self.num_epochs): print('Epoch {}/{}'.format(epoch, self.num_epochs - 1)) print('-' * 20) # 调成训练模式 net.train() # 定义准确率变量,损失值,批次数量,样本总数量;最好精确率 train_acc = 0.0 train_loss = 0.0 num_batch = 0 num_samples = 0 best_acc = 0 # 进行每周期的网络的训练 for index, data in enumerate(self.train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 words, labels = data words = words.to(self.device) labels = labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(words) outputs = torch.nn.functional.softmax(outputs, dim=1) loss = self.criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc train_loss += loss num_batch += 1 num_samples += words.size(0) # 计算梯度、更新参数、重置梯度张量 loss.backward() self.optimizer.step() self.optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 10 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\n'. format(index, loss, acc / words.size(0))) # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss / num_batch # 进行验证 valid_acc, valid_loss = self.eval(net, self.test_loader, self.criterion) # 输出损失值和精度值 print('epoch:{} -------\n train loss:{:.4f}, train acc:{:.4f}\n ' 'valid loss:{:.4f}, valid acc:{:.4f}\n'. format(epoch,train_loss, train_acc, valid_loss, valid_acc)) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), self.checkpoints) print('epoch:{}, update model...'.format(epoch)) print() def eval(self, net, valid_loader, criterion): """ 验证 :param net: 网络结构 :param valid_loader: 验证集加载器 :param criterion: 损失函数 :return: """ print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() net.cuda(device=self.device) valid_acc = 0.0 valid_loss = 0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for index, dataset in enumerate(valid_loader, start=0): data, labels = dataset # 将测试数据放入GPU上 data, labels = data.to(self.device), labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(data) outputs = torch.nn.functional.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1 num_samples += data.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss def test(self, net, test_loader): print('test stage...\n') # 加载模型权重、将网络放入GPU if os.path.exists(self.checkpoints): net.load_state_dict(torch.load(self.checkpoints)) print('load model argument...') net.to(self.device) # 将网络结构调成验证模式、定义准确率、标签列表和预测列表 net.eval() correct = 0 targets, preds = [], [] # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for data, labels in test_loader: # 将测试数据放入GPU上 data, labels = data.to(self.device), labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(data) outputs = torch.nn.functional.softmax(outputs, dim=1) pred = torch.argmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);统计真实标签和对应预测标签 correct += torch.sum((pred == labels)).item() targets += list(labels.cpu().numpy()) preds += list(pred.cpu().numpy()) # 计算测试精度和混淆矩阵 test_acc = 100. * correct / len(test_loader.dataset) # confusion_mat = metrics.confusion_matrix(targets, preds) # confusion_mat = confusion_matrix(targets, preds) # print('numbers samples:{}, test accuracy:{},\nconfusion matrix:\n{}'. # format(len(test_loader.data2), test_acc, confusion_mat)) print('numbers samples:{}, test accuracy:{},\n'. format(len(test_loader.dataset), test_acc)) return test_acc def predict(self, net, vocab, sentence): """ 预测句子的情感 :param net: 网络结构 :param vocab: 词典数据 :param sentence: 预测句子 :return: """ print('predict stage...\n') # 加载模型权重、将网络放入GPU if os.path.exists(self.checkpoints): net.load_state_dict(torch.load(self.checkpoints)) print('load model argument...') # 将网络结构调成验证模式 net.eval() # 将数据转化成词向量 vector = torch.tensor([vocab.stoi[word] for word in sentence]) vector = vector.view(1, -1) # 模型预测 output = net(vector) label = torch.argmax(output, dim=1).cpu().tolist() # 输出 print('data2:{}, label:{}'.format(sentence, self.name_classes[label[0]])) def load_pretrained_embedding(self, word_vocab, pretrained_vocab): """ 从GloVe中预训练好的pretrained_vocab中提取当前vocab对应的词向量 :param word_vocab: 当前数据集的词典,['ship',...] :param pretrained_vocab:GloVe中预训练的词向量 :return: """ # 初始当前数据的词向量 embedding = torch.zeros(len(word_vocab), pretrained_vocab.vectors[0].shape[0]) # 统计不包含的单词 num_out = 0 # 遍历当前词典 for i, word in enumerate(word_vocab): # 若单词不在GloVe中,则报出异常 try: # word对应于GloVe中word的索引,并根据索引替换掉向量 idx = pretrained_vocab.stoi[word] embedding[i, :] = pretrained_vocab.vectors[idx] except KeyError: num_out += 1 print('\r{}'.format(i), end=' ') # 输出不包含的单词数量 if num_out > 0: print('有{}单词不包含'.format(num_out)) return embedding if __name__ == '__main__': data_1 = ['this', 'movie', 'is', 'so', 'great'] data_2 = ['this', 'movie', 'is', 'so', 'bad'] text = SemanticClassification() # text.train(text.net) text.predict(text.net, text.vocab_data, data_2)
identifier_body
main.py
from other.rnn.text_setiment_classification.data import ProcessACLData from other.rnn.text_setiment_classification.brnn import BiRNN from torch.utils.data import DataLoader from torchtext.vocab import GloVe import torch, os # 设置GPU环境 os.environ['CUDA_VISIBLE_DEVICES'] = '1' class SemanticClassification: def __init__(self): # 定义相关参数 self.batch_size = 64 # 批次大小 self.num_workers = 8 # 线程数量 self.embedded_size = 100 # 词向量大小 self.num_hiddens = 100 # 隐藏层数量 self.num_layers = 2 # 堆叠LSTM的层数 self.num_classes = 2 # 类别数量 self.name_classes = ['neg', 'pos'] # 类别名称 self.pretrained = True # 是否使用预训练的词向量 self.lr = 0.01 # 学习率 self.num_epochs = 20 # 批次数量 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备 self.checkpoints = 'text.pth' # 定义处理对象的实体类 data = ProcessACLData() # 获得词典 self.vocab_data = data.get_vocab_imdb() # 获取训练、测试数据集及其词典 self.train_set = data.get_dataset(data.train_dir, self.vocab_data) self.test_set= data.get_dataset(data.test_dir, self.vocab_data) print('训练数据大小:{}'.format(len(self.train_set))) print('测试数据大小:{}'.format(len(self.test_set))) # 获得数据加载器 self.train_loader = DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=8) self.test_loader = DataLoader(self.test_set, batch_size=self.batch_size, num_workers=8) # 定义训练和测试网络结构 self.net = BiRNN(vocab=self.vocab_data, embed_size=self.embedded_size, num_hiddens=self.num_hiddens, num_layers=self.num_layers) # 定义GloVe对象来加载预训练词向量,维度要与网络中embedding大小一致 glove = GloVe(name='6B', dim=self.embedded_size, cache='glove') # 加载预训练的值 if self.pretrained: self.net.embedding.weight.data.copy_( self.load_pretrained_embedding(self.vocab_data.itos, glove) ) # 取消求梯度 self.net.embedding.weight.requires_grad = False def train(self, net): """ 训练 :return: """ # 定义Adam优化器和交叉熵损失函数 if self.pretrained: # 过滤掉预训练的词向量的权重,只训练没有包含的值 self.optimizer = torch.optim.Adam(filter(lambda p:p.requires_grad, net.parameters()), lr=self.lr) else: self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr) self.criterion = torch.nn.CrossEntropyLoss() print('training stage....') # 将网络结构调成训练模式;将网络放置到GPU上;初始化梯度张量 net.cuda(d
rmat(epoch, self.num_epochs - 1)) print('-' * 20) # 调成训练模式 net.train() # 定义准确率变量,损失值,批次数量,样本总数量;最好精确率 train_acc = 0.0 train_loss = 0.0 num_batch = 0 num_samples = 0 best_acc = 0 # 进行每周期的网络的训练 for index, data in enumerate(self.train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 words, labels = data words = words.to(self.device) labels = labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(words) outputs = torch.nn.functional.softmax(outputs, dim=1) loss = self.criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc train_loss += loss num_batch += 1 num_samples += words.size(0) # 计算梯度、更新参数、重置梯度张量 loss.backward() self.optimizer.step() self.optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 10 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\n'. format(index, loss, acc / words.size(0))) # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss / num_batch # 进行验证 valid_acc, valid_loss = self.eval(net, self.test_loader, self.criterion) # 输出损失值和精度值 print('epoch:{} -------\n train loss:{:.4f}, train acc:{:.4f}\n ' 'valid loss:{:.4f}, valid acc:{:.4f}\n'. format(epoch,train_loss, train_acc, valid_loss, valid_acc)) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), self.checkpoints) print('epoch:{}, update model...'.format(epoch)) print() def eval(self, net, valid_loader, criterion): """ 验证 :param net: 网络结构 :param valid_loader: 验证集加载器 :param criterion: 损失函数 :return: """ print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() net.cuda(device=self.device) valid_acc = 0.0 valid_loss = 0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for index, dataset in enumerate(valid_loader, start=0): data, labels = dataset # 将测试数据放入GPU上 data, labels = data.to(self.device), labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(data) outputs = torch.nn.functional.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1 num_samples += data.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss def test(self, net, test_loader): print('test stage...\n') # 加载模型权重、将网络放入GPU if os.path.exists(self.checkpoints): net.load_state_dict(torch.load(self.checkpoints)) print('load model argument...') net.to(self.device) # 将网络结构调成验证模式、定义准确率、标签列表和预测列表 net.eval() correct = 0 targets, preds = [], [] # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for data, labels in test_loader: # 将测试数据放入GPU上 data, labels = data.to(self.device), labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(data) outputs = torch.nn.functional.softmax(outputs, dim=1) pred = torch.argmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);统计真实标签和对应预测标签 correct += torch.sum((pred == labels)).item() targets += list(labels.cpu().numpy()) preds += list(pred.cpu().numpy()) # 计算测试精度和混淆矩阵 test_acc = 100. * correct / len(test_loader.dataset) # confusion_mat = metrics.confusion_matrix(targets, preds) # confusion_mat = confusion_matrix(targets, preds) # print('numbers samples:{}, test accuracy:{},\nconfusion matrix:\n{}'. # format(len(test_loader.data2), test_acc, confusion_mat)) print('numbers samples:{}, test accuracy:{},\n'. format(len(test_loader.dataset), test_acc)) return test_acc def predict(self, net, vocab, sentence): """ 预测句子的情感 :param net: 网络结构 :param vocab: 词典数据 :param sentence: 预测句子 :return: """ print('predict stage...\n') # 加载模型权重、将网络放入GPU if os.path.exists(self.checkpoints): net.load_state_dict(torch.load(self.checkpoints)) print('load model argument...') # 将网络结构调成验证模式 net.eval() # 将数据转化成词向量 vector = torch.tensor([vocab.stoi[word] for word in sentence]) vector = vector.view(1, -1) # 模型预测 output = net(vector) label = torch.argmax(output, dim=1).cpu().tolist() # 输出 print('data2:{}, label:{}'.format(sentence, self.name_classes[label[0]])) def load_pretrained_embedding(self, word_vocab, pretrained_vocab): """ 从GloVe中预训练好的pretrained_vocab中提取当前vocab对应的词向量 :param word_vocab: 当前数据集的词典,['ship',...] :param pretrained_vocab:GloVe中预训练的词向量 :return: """ # 初始当前数据的词向量 embedding = torch.zeros(len(word_vocab), pretrained_vocab.vectors[0].shape[0]) # 统计不包含的单词 num_out = 0 # 遍历当前词典 for i, word in enumerate(word_vocab): # 若单词不在GloVe中,则报出异常 try: # word对应于GloVe中word的索引,并根据索引替换掉向量 idx = pretrained_vocab.stoi[word] embedding[i, :] = pretrained_vocab.vectors[idx] except KeyError: num_out += 1 print('\r{}'.format(i), end=' ') # 输出不包含的单词数量 if num_out > 0: print('有{}单词不包含'.format(num_out)) return embedding if __name__ == '__main__': data_1 = ['this', 'movie', 'is', 'so', 'great'] data_2 = ['this', 'movie', 'is', 'so', 'bad'] text = SemanticClassification() # text.train(text.net) text.predict(text.net, text.vocab_data, data_2)
evice=self.device) self.optimizer.zero_grad() # 周期遍历 for epoch in range(self.num_epochs): print('Epoch {}/{}'.fo
conditional_block
main.py
from other.rnn.text_setiment_classification.data import ProcessACLData from other.rnn.text_setiment_classification.brnn import BiRNN from torch.utils.data import DataLoader from torchtext.vocab import GloVe import torch, os # 设置GPU环境 os.environ['CUDA_VISIBLE_DEVICES'] = '1' class SemanticClassification: def __init__(self): # 定义相关参数 self.batch_size = 64 # 批次大小 self.num_workers = 8 # 线程数量 self.embedded_size = 100 # 词向量大小 self.num_hiddens = 100 # 隐藏层数量 self.num_layers = 2 # 堆叠LSTM的层数 self.num_classes = 2 # 类别数量 self.name_classes = ['neg', 'pos'] # 类别名称 self.pretrained = True # 是否使用预训练的词向量 self.lr = 0.01 # 学习率 self.num_epochs = 20 # 批次数量 self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备 self.checkpoints = 'text.pth' # 定义处理对象的实体类 data = ProcessACLData() # 获得词典 self.vocab_data = data.get_vocab_imdb() # 获取训练、测试数据集及其词典 self.train_set = data.get_dataset(data.train_dir, self.vocab_data) self.test_set= data.get_dataset(data.test_dir, self.vocab_data) print('训练数据大小:{}'.format(len(self.train_set))) print('测试数据大小:{}'.format(len(self.test_set))) # 获得数据加载器 self.train_loader = DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=8) self.test_loader = DataLoader(self.test_set, batch_size=self.batch_size, num_workers=8) # 定义训练和测试网络结构 self.net = BiRNN(vocab=self.vocab_data, embed_size=self.embedded_size, num_hiddens=self.num_hiddens, num_layers=self.num_layers) # 定义GloVe对象来加载预训练词向量,维度要与网络中embedding大小一致 glove = GloVe(name='6B', dim=self.embedded_size, cache='glove') # 加载预训练的值 if self.pretrained: self.net.embedding.weight.data.copy_( self.load_pretrained_embedding(self.vocab_data.itos, glove) ) # 取消求梯度 self.net.embedding.weight.requires_grad = False def train(self, net): """ 训练 :return: """ # 定义Adam优化器和交叉熵损失函数 if self.pretrained: # 过滤掉预训练的词向量的权重,只训练没有包含的值 self.optimizer = torch.optim.Adam(filter(lambda p:p.requires_grad, net.parameters()), lr=self.lr) else: self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr) self.criterion = torch.nn.CrossEntropyLoss() print('training stage....') # 将网络结构调成训练模式;将网络放置到GPU上;初始化梯度张量 net.cuda(device=self.device) self.optimizer.zero_grad() # 周期遍历 for epoch in range(self.num_epochs): print('Epoch {}/{}'.format(epoch, self.num_epochs - 1)) print('-' * 20) # 调成训练模式 net.train() # 定义准确率变量,损失值,批次数量,样本总数量;最好精确率 train_acc = 0.0 train_loss = 0.0 num_batch = 0 num_samples = 0 best_acc = 0 # 进行每周期的网络的训练 for index, data in enumerate(self.train_loader, start=0): # 获取每批次的训练数据、并将训练数据放入GPU中 words, labels = data words = words.to(self.device) labels = labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围;计算损失函数值 outputs = net(words) outputs = torch.nn.functional.softmax(outputs, dim=1) loss = self.criterion(outputs, labels) # 计算每个预测值概率最大的索引(下标) preds = torch.argmax(outputs, dim=1) # 计算批次的准确率,预测值中预测正确的样本占总样本的比例 # 统计准确率、损失值、批次数量 acc = torch.sum(preds == labels).item() train_acc += acc train_loss += loss num_batch += 1 num_samples += words.size(0) # 计算梯度、更新参数、重置梯度张量 loss.backward() self.optimizer.step() self.optimizer.zero_grad() # 输出一定次数的损失和精度情况 if (index + 1) % 10 == 0: # 输出损失值和精度值 print(' batch:{}, batch_loss:{:.4f}, batch_acc:{:.4f}\n'. format(index, loss, acc / words.size(0))) # 计算训练的准确率和损失值 train_acc = train_acc / num_samples train_loss = train_loss / num_batch # 进行验证 valid_acc, valid_loss = self.eval(net, self.test_loader, self.criterion) # 输出损失值和精度值 print('epoch:{} -------\n train loss:{:.4f}, train acc:{:.4f}\n ' 'valid loss:{:.4f}, valid acc:{:.4f}\n'. format(epoch,train_loss, train_acc, valid_loss, valid_acc)) # 选出最好的模型参数 if valid_acc > best_acc: # 更新最好精度、保存最好的模型参数 best_acc = valid_acc torch.save(net.state_dict(), self.checkpoints) print('epoch:{}, update model...'.format(epoch)) print() def eval(self, net, valid_loader, criterion): """ 验证 :param net: 网络结构 :param valid_loader: 验证集加载器 :param criterion: 损失函数 :return: """ print(' valid stage...') # 将网络结构调成验证模式;所有样本的准确率、损失值;统计批次数量; net.eval() net.cuda(device=self.device) valid_acc = 0.0 valid_loss = 0.0 num_batch = 0 num_samples = 0 # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for index, dataset in enumerate(valid_loader, start=0): data, labels = dataset # 将测试数据放入GPU上 data, labels = data.to(self.device), labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(data) outputs = torch.nn.functional.softmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);计算损失值 pred = torch.argmax(outputs, dim=1) loss = criterion(outputs, labels) # 统计真实标签和预测标签的对应情况;计算损失 valid_acc += torch.sum((pred == labels)).item() valid_loss += loss num_batch += 1 num_samples += data.size(0) # 计算测试精度和损失值 valid_acc = valid_acc / num_samples valid_loss = valid_loss / num_batch return valid_acc, valid_loss def test(self, net, test_loader): print('test stage...\n') # 加载模型权重、将网络放入GPU if os.path.exists(self.checkpoints): net.load_state_dict(torch.load(self.checkpoints)) print('load model argument...') net.to(self.device) # 将网络结构调成验证模式、定义准确率、标签列表和预测列表 net.eval() correct = 0 targets, preds = [], [] # 进行测试集的测试 with torch.no_grad(): # 不使用梯度,减少内存占用 for data, labels in test_loader: # 将测试数据放入GPU上 data, labels = data.to(self.device), labels.to(self.device) # 推理输出网络预测值,并使用softmax使预测值满足0-1概率范围 outputs = net(data) outputs = torch.nn.functional.softmax(outputs, dim=1) pred = torch.argmax(outputs, dim=1) # 计算每个预测值概率最大的索引(下标);统计真实标签和对应预测标签 correct += torch.sum((pred == labels)).item() targets += list(labels.cpu().numpy()) preds += list(pred.cpu().numpy()) # 计算测试精度和混淆矩阵 test_acc = 100. * correct / len(test_loader.dataset) # confusion_mat = metrics.confusion_matrix(targets, preds) # confusion_mat = confusion_matrix(targets, preds) # print('numbers samples:{}, test accuracy:{},\nconfusion matrix:\n{}'. # format(len(test_loader.data2), test_acc, confusion_mat)) print('numbers samples:{}, test accuracy:{},\n'. format(len(test_loader.dataset), test_acc)) return test_acc def predict(self, net, vocab, sentence): """ 预测句子的情感 :param net: 网络结构 :param vocab: 词典数据 :param sentence: 预测句子 :return: """ print('predict stage...\n') # 加载模型权重、将网络放入GPU if os.path.exists(self.checkpoints): net.load_state_dict(torch.load(self.checkpoints)) print('load model argument...') # 将网络结构调成验证模式 net.eval() # 将数据转化成词向量 vector = torch.tensor([vocab.stoi[word] for word in sentence]) vector = vector.view(1, -1) # 模型预测 output = net(vector) label = torch.argmax(output, dim=1).cpu().tolist() # 输出 print('data2:{}, label:{}'.format(sentence, self.name_classes[label[0]])) def load_pretrained_embedding(self, word_vocab, pretrained_vocab): """ 从GloVe中预训练好的pretrained_vocab中提取当前vocab对应的词向量 :param word_vocab: 当前数据集的词典,['ship',...] :param pretrained_vocab:GloVe中预训练的词向量 :return: """ # 初始当前数据的词向量 embedding = torch.zeros(len(word_vocab), pretrained_vocab.vectors[0].shape[0]) # 统计不包含的单词 num_out = 0 # 遍历当前词典 for i, word in enumerate(word_vocab): # 若单词不在GloVe中,则报出异常 try: # word对应于GloVe中word的索引,并根据索引替换掉向量 idx = pretrained_vocab.stoi[word] embedding[i, :] = pretrained_vocab.vectors[idx] except KeyError: num_out += 1 print('\r{}'.format(i), end=' ') # 输出不包含的单词数量 if num_out > 0: print('有{}单词不包含'.format(num_out)) return embedding if __name__ == '__main__': data_1 = ['this', 'movie', 'is', 'so', 'great'] data_2 = ['this', 'movie', 'is', 'so', 'bad'] text = SemanticClassification() # text.train(text.net) text.predict(text.net, text.vocab_data, data_2)
identifier_name
list.go
package cmd import ( "context" "errors" "flag" "fmt" "strings" "github.com/spf13/cobra" "github.com/spf13/pflag" core "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clioptions "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/kubernetes" clientcore "k8s.io/client-go/kubernetes/typed/core/v1" clientrbac "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ) const ( whoCanUsage = `kubectl who-can VERB (TYPE | TYPE/NAME | NONRESOURCEURL)` whoCanLong = `Shows which users, groups and service accounts can perform a given verb on a given resource type. VERB is a logical Kubernetes API verb like 'get', 'list', 'watch', 'delete', etc. TYPE is a Kubernetes resource. Shortcuts and API groups will be resolved, e.g. 'po' or 'pods.metrics.k8s.io'. NAME is the name of a particular Kubernetes resource. NONRESOURCEURL is a partial URL that starts with "/".` whoCanExample = ` # List who can get pods from any of the available namespaces kubectl who-can get pods --all-namespaces # List who can create pods in the current namespace kubectl who-can create pods # List who can get pods specifying the API group kubectl who-can get pods.metrics.k8s.io # List who can create services in namespace "foo" kubectl who-can create services -n foo # List who can get the service named "mongodb" in namespace "bar" kubectl who-can get svc/mongodb --namespace bar # List who can do everything with pods in the current namespace kubectl who-can '*' pods # List who can list every resource in the namespace "baz" kubectl who-can list '*' -n baz # List who can read pod logs kubectl who-can get pods --subresource=log # List who can access the URL /logs/ kubectl who-can get /logs` ) const ( // RoleKind is the RoleRef's Kind referencing a Role. RoleKind = "Role" // ClusterRoleKind is the RoleRef's Kind referencing a ClusterRole. ClusterRoleKind = "ClusterRole" ) const ( subResourceFlag = "subresource" allNamespacesFlag = "all-namespaces" namespaceFlag = "namespace" outputFlag = "output" outputWide = "wide" outputJson = "json" ) // Action represents an action a subject can be given permission to. type Action struct { Verb string Resource string ResourceName string SubResource string NonResourceURL string Namespace string AllNamespaces bool } type resolvedAction struct { Action gr schema.GroupResource } // roles is a set of Role names matching the specified Action. type roles map[string]struct{} // clusterRoles is a set of ClusterRole names matching the specified Action. type clusterRoles map[string]struct{} type WhoCan struct { clientNamespace clientcore.NamespaceInterface clientRBAC clientrbac.RbacV1Interface namespaceValidator NamespaceValidator resourceResolver ResourceResolver accessChecker AccessChecker policyRuleMatcher PolicyRuleMatcher } // NewWhoCan constructs a new WhoCan checker with the specified rest.Config and RESTMapper. func NewWhoCan(restConfig *rest.Config, mapper apimeta.RESTMapper) (*WhoCan, error) { client, err := kubernetes.NewForConfig(restConfig) if err != nil { return nil, err } clientNamespace := client.CoreV1().Namespaces() return &WhoCan{ clientNamespace: clientNamespace, clientRBAC: client.RbacV1(), namespaceValidator: NewNamespaceValidator(clientNamespace), resourceResolver: NewResourceResolver(client.Discovery(), mapper), accessChecker: NewAccessChecker(client.AuthorizationV1().SelfSubjectAccessReviews()), policyRuleMatcher: NewPolicyRuleMatcher(), }, nil } // NewWhoCanCommand constructs the WhoCan command with the specified IOStreams. func NewWhoCanCommand(streams clioptions.IOStreams) (*cobra.Command, error) { var configFlags *clioptions.ConfigFlags cmd := &cobra.Command{ Use: whoCanUsage, Long: whoCanLong, Example: whoCanExample, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { clientConfig := configFlags.ToRawKubeConfigLoader() restConfig, err := clientConfig.ClientConfig() if err != nil { return fmt.Errorf("getting rest config: %v", err) } mapper, err := configFlags.ToRESTMapper() if err != nil { return fmt.Errorf("getting mapper: %v", err) } action, err := ActionFrom(clientConfig, cmd.Flags(), args) if err != nil { return err } o, err := NewWhoCan(restConfig, mapper) if err != nil { return err } warnings, err := o.CheckAPIAccess(action) if err != nil { return err } output, err := cmd.Flags().GetString(outputFlag) if err != nil { return err } printer := NewPrinter(streams.Out, output == outputWide) // Output warnings printer.PrintWarnings(warnings) roleBindings, clusterRoleBindings, err := o.Check(action) if err != nil { return err } // Output check results output = strings.ToLower(output) if output == outputJson { printer.ExportData(action, roleBindings, clusterRoleBindings) } else if output == outputWide || output == "" { printer.PrintChecks(action, roleBindings, clusterRoleBindings) } else { return fmt.Errorf("invalid output format: %v", output) } return nil }, } cmd.Flags().String(subResourceFlag, "", "SubResource such as pod/log or deployment/scale") cmd.Flags().BoolP(allNamespacesFlag, "A", false, "If true, check for users that can do the specified action in any of the available namespaces") cmd.Flags().StringP(outputFlag, "o", "", "Output format. Currently the only supported output format is wide or JSON.") flag.CommandLine.VisitAll(func(gf *flag.Flag) { cmd.Flags().AddGoFlag(gf) }) configFlags = clioptions.NewConfigFlags(true) configFlags.AddFlags(cmd.Flags()) return cmd, nil } // ActionFrom sets all information required to check who can perform the specified action. func ActionFrom(clientConfig clientcmd.ClientConfig, flags *pflag.FlagSet, args []string) (action Action, err error) { if len(args) < 2 { err = errors.New("you must specify two or three arguments: verb, resource, and optional resourceName") return } action.Verb = args[0] if strings.HasPrefix(args[1], "/") { action.NonResourceURL = args[1] klog.V(3).Infof("Resolved nonResourceURL `%s`", action.NonResourceURL) } else { resourceTokens := strings.SplitN(args[1], "/", 2) action.Resource = resourceTokens[0] if len(resourceTokens) > 1 { action.ResourceName = resourceTokens[1] klog.V(3).Infof("Resolved resourceName `%s`", action.ResourceName) } } action.SubResource, err = flags.GetString(subResourceFlag) if err != nil { return } action.AllNamespaces, err = flags.GetBool(allNamespacesFlag) if err != nil { return } if action.AllNamespaces { action.Namespace = core.NamespaceAll klog.V(3).Infof("Resolved namespace `%s` from --all-namespaces flag", action.Namespace) return } action.Namespace, err = flags.GetString(namespaceFlag) if err != nil { return } if action.Namespace != "" { klog.V(3).Infof("Resolved namespace `%s` from --namespace flag", action.Namespace) return } // Neither --all-namespaces nor --namespace flag was specified action.Namespace, _, err = clientConfig.Namespace() if err != nil { err = fmt.Errorf("getting namespace from current context: %v", err) } klog.V(3).Infof("Resolved namespace `%s` from current context", action.Namespace) return } // Validate makes sure that the specified action is valid. func (w *WhoCan) validate(action Action) error { if action.NonResourceURL != "" && action.SubResource != "" { return fmt.Errorf("--subresource cannot be used with NONRESOURCEURL") } err := w.namespaceValidator.Validate(action.Namespace) if err != nil { return fmt.Errorf("validating namespace: %v", err) } return nil } // Check checks who can perform the action specified by WhoCanOptions and returns the role bindings that allows the // action to be performed. func (w *WhoCan) Check(action Action) (roleBindings []rbac.RoleBinding, clusterRoleBindings []rbac.ClusterRoleBinding, err error) { err = w.validate(action) if err != nil { err = fmt.Errorf("validation: %v", err) return } resolvedAction := resolvedAction{Action: action} if action.Resource != "" { resolvedAction.gr, err = w.resourceResolver.Resolve(action.Verb, action.Resource, action.SubResource) if err != nil { err = fmt.Errorf("resolving resource: %v", err) return } klog.V(3).Infof("Resolved resource `%s`", resolvedAction.gr.String()) } // Get the Roles that relate to the Verbs and Resources we are interested in roleNames, err := w.getRolesFor(resolvedAction) if err != nil { return []rbac.RoleBinding{}, []rbac.ClusterRoleBinding{}, fmt.Errorf("getting Roles: %v", err) } // Get the ClusterRoles that relate to the verbs and resources we are interested in clusterRoleNames, err := w.getClusterRolesFor(resolvedAction) if err != nil { return []rbac.RoleBinding{}, []rbac.ClusterRoleBinding{}, fmt.Errorf("getting ClusterRoles: %v", err) } // Get the RoleBindings that relate to this set of Roles or ClusterRoles roleBindings, err = w.getRoleBindings(resolvedAction, roleNames, clusterRoleNames) if err != nil { err = fmt.Errorf("getting RoleBindings: %v", err) return } // Get the ClusterRoleBindings that relate to this set of ClusterRoles clusterRoleBindings, err = w.getClusterRoleBindings(clusterRoleNames) if err != nil { err = fmt.Errorf("getting ClusterRoleBindings: %v", err) return } return } // CheckAPIAccess checks whether the subject in the current context has enough privileges to query Kubernetes API // server to perform Check. func (w *WhoCan) CheckAPIAccess(action Action) ([]string, error) { type check struct { verb string resource string namespace string } var checks []check var warnings []string ctx := context.Background() // Determine which checks need to be executed. if action.Namespace == "" { checks = append(checks, check{"list", "namespaces", ""}) nsList, err := w.clientNamespace.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("listing namespaces: %v", err) } for _, ns := range nsList.Items { checks = append(checks, check{"list", "roles", ns.Name}) checks = append(checks, check{"list", "rolebindings", ns.Name}) } } else { checks = append(checks, check{"list", "roles", action.Namespace}) checks = append(checks, check{"list", "rolebindings", action.Namespace}) } // Actually run the checks and collect warnings. for _, check := range checks { allowed, err := w.accessChecker.IsAllowedTo(check.verb, check.resource, check.namespace) if err != nil { return nil, err } if !allowed { var msg string if check.namespace == "" { msg = fmt.Sprintf("The user is not allowed to %s %s", check.verb, check.resource) } else { msg = fmt.Sprintf("The user is not allowed to %s %s in the %s namespace", check.verb, check.resource, check.namespace) } warnings = append(warnings, msg) } } return warnings, nil } // GetRolesFor returns a set of names of Roles matching the specified Action. func (w *WhoCan) getRolesFor(action resolvedAction) (roles, error) { ctx := context.Background() rl, err := w.clientRBAC.Roles(action.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } roleNames := make(map[string]struct{}, 10) for _, item := range rl.Items { if w.policyRuleMatcher.MatchesRole(item, action) { if _, ok := roleNames[item.Name]; !ok { roleNames[item.Name] = struct{}{} } } } return roleNames, nil } // GetClusterRolesFor returns a set of names of ClusterRoles matching the specified Action. func (w *WhoCan)
(action resolvedAction) (clusterRoles, error) { ctx := context.Background() crl, err := w.clientRBAC.ClusterRoles().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } cr := make(map[string]struct{}, 10) for _, item := range crl.Items { if w.policyRuleMatcher.MatchesClusterRole(item, action) { if _, ok := cr[item.Name]; !ok { cr[item.Name] = struct{}{} } } } return cr, nil } // GetRoleBindings returns the RoleBindings that refer to the given set of Role names or ClusterRole names. func (w *WhoCan) getRoleBindings(action resolvedAction, roleNames roles, clusterRoleNames clusterRoles) (roleBindings []rbac.RoleBinding, err error) { // TODO I'm wondering if GetRoleBindings should be invoked at all when the --all-namespaces flag is specified? if action.Namespace == core.NamespaceAll { return } ctx := context.Background() list, err := w.clientRBAC.RoleBindings(action.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return } for _, roleBinding := range list.Items { if roleBinding.RoleRef.Kind == RoleKind { if _, ok := roleNames[roleBinding.RoleRef.Name]; ok { roleBindings = append(roleBindings, roleBinding) } } else if roleBinding.RoleRef.Kind == ClusterRoleKind { if _, ok := clusterRoleNames[roleBinding.RoleRef.Name]; ok { roleBindings = append(roleBindings, roleBinding) } } } return } // GetClusterRoleBindings returns the ClusterRoleBindings that refer to the given sef of ClusterRole names. func (w *WhoCan) getClusterRoleBindings(clusterRoleNames clusterRoles) (clusterRoleBindings []rbac.ClusterRoleBinding, err error) { ctx := context.Background() list, err := w.clientRBAC.ClusterRoleBindings().List(ctx, metav1.ListOptions{}) if err != nil { return } for _, roleBinding := range list.Items { if _, ok := clusterRoleNames[roleBinding.RoleRef.Name]; ok { clusterRoleBindings = append(clusterRoleBindings, roleBinding) } } return } func (w Action) String() string { if w.NonResourceURL != "" { return fmt.Sprintf("%s %s", w.Verb, w.NonResourceURL) } name := w.ResourceName if name != "" { name = "/" + name } return fmt.Sprintf("%s %s%s", w.Verb, w.Resource, name) }
getClusterRolesFor
identifier_name
list.go
package cmd import ( "context" "errors" "flag" "fmt" "strings" "github.com/spf13/cobra" "github.com/spf13/pflag" core "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clioptions "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/kubernetes" clientcore "k8s.io/client-go/kubernetes/typed/core/v1" clientrbac "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ) const ( whoCanUsage = `kubectl who-can VERB (TYPE | TYPE/NAME | NONRESOURCEURL)` whoCanLong = `Shows which users, groups and service accounts can perform a given verb on a given resource type. VERB is a logical Kubernetes API verb like 'get', 'list', 'watch', 'delete', etc. TYPE is a Kubernetes resource. Shortcuts and API groups will be resolved, e.g. 'po' or 'pods.metrics.k8s.io'. NAME is the name of a particular Kubernetes resource. NONRESOURCEURL is a partial URL that starts with "/".` whoCanExample = ` # List who can get pods from any of the available namespaces kubectl who-can get pods --all-namespaces # List who can create pods in the current namespace kubectl who-can create pods # List who can get pods specifying the API group kubectl who-can get pods.metrics.k8s.io # List who can create services in namespace "foo" kubectl who-can create services -n foo # List who can get the service named "mongodb" in namespace "bar" kubectl who-can get svc/mongodb --namespace bar # List who can do everything with pods in the current namespace kubectl who-can '*' pods # List who can list every resource in the namespace "baz" kubectl who-can list '*' -n baz # List who can read pod logs kubectl who-can get pods --subresource=log # List who can access the URL /logs/ kubectl who-can get /logs` ) const ( // RoleKind is the RoleRef's Kind referencing a Role. RoleKind = "Role" // ClusterRoleKind is the RoleRef's Kind referencing a ClusterRole. ClusterRoleKind = "ClusterRole" ) const ( subResourceFlag = "subresource" allNamespacesFlag = "all-namespaces" namespaceFlag = "namespace" outputFlag = "output" outputWide = "wide" outputJson = "json" ) // Action represents an action a subject can be given permission to. type Action struct { Verb string Resource string ResourceName string SubResource string NonResourceURL string Namespace string AllNamespaces bool } type resolvedAction struct { Action gr schema.GroupResource } // roles is a set of Role names matching the specified Action. type roles map[string]struct{} // clusterRoles is a set of ClusterRole names matching the specified Action. type clusterRoles map[string]struct{} type WhoCan struct { clientNamespace clientcore.NamespaceInterface clientRBAC clientrbac.RbacV1Interface namespaceValidator NamespaceValidator resourceResolver ResourceResolver accessChecker AccessChecker policyRuleMatcher PolicyRuleMatcher } // NewWhoCan constructs a new WhoCan checker with the specified rest.Config and RESTMapper. func NewWhoCan(restConfig *rest.Config, mapper apimeta.RESTMapper) (*WhoCan, error) { client, err := kubernetes.NewForConfig(restConfig) if err != nil { return nil, err } clientNamespace := client.CoreV1().Namespaces() return &WhoCan{ clientNamespace: clientNamespace, clientRBAC: client.RbacV1(), namespaceValidator: NewNamespaceValidator(clientNamespace), resourceResolver: NewResourceResolver(client.Discovery(), mapper), accessChecker: NewAccessChecker(client.AuthorizationV1().SelfSubjectAccessReviews()), policyRuleMatcher: NewPolicyRuleMatcher(), }, nil } // NewWhoCanCommand constructs the WhoCan command with the specified IOStreams. func NewWhoCanCommand(streams clioptions.IOStreams) (*cobra.Command, error)
// ActionFrom sets all information required to check who can perform the specified action. func ActionFrom(clientConfig clientcmd.ClientConfig, flags *pflag.FlagSet, args []string) (action Action, err error) { if len(args) < 2 { err = errors.New("you must specify two or three arguments: verb, resource, and optional resourceName") return } action.Verb = args[0] if strings.HasPrefix(args[1], "/") { action.NonResourceURL = args[1] klog.V(3).Infof("Resolved nonResourceURL `%s`", action.NonResourceURL) } else { resourceTokens := strings.SplitN(args[1], "/", 2) action.Resource = resourceTokens[0] if len(resourceTokens) > 1 { action.ResourceName = resourceTokens[1] klog.V(3).Infof("Resolved resourceName `%s`", action.ResourceName) } } action.SubResource, err = flags.GetString(subResourceFlag) if err != nil { return } action.AllNamespaces, err = flags.GetBool(allNamespacesFlag) if err != nil { return } if action.AllNamespaces { action.Namespace = core.NamespaceAll klog.V(3).Infof("Resolved namespace `%s` from --all-namespaces flag", action.Namespace) return } action.Namespace, err = flags.GetString(namespaceFlag) if err != nil { return } if action.Namespace != "" { klog.V(3).Infof("Resolved namespace `%s` from --namespace flag", action.Namespace) return } // Neither --all-namespaces nor --namespace flag was specified action.Namespace, _, err = clientConfig.Namespace() if err != nil { err = fmt.Errorf("getting namespace from current context: %v", err) } klog.V(3).Infof("Resolved namespace `%s` from current context", action.Namespace) return } // Validate makes sure that the specified action is valid. func (w *WhoCan) validate(action Action) error { if action.NonResourceURL != "" && action.SubResource != "" { return fmt.Errorf("--subresource cannot be used with NONRESOURCEURL") } err := w.namespaceValidator.Validate(action.Namespace) if err != nil { return fmt.Errorf("validating namespace: %v", err) } return nil } // Check checks who can perform the action specified by WhoCanOptions and returns the role bindings that allows the // action to be performed. func (w *WhoCan) Check(action Action) (roleBindings []rbac.RoleBinding, clusterRoleBindings []rbac.ClusterRoleBinding, err error) { err = w.validate(action) if err != nil { err = fmt.Errorf("validation: %v", err) return } resolvedAction := resolvedAction{Action: action} if action.Resource != "" { resolvedAction.gr, err = w.resourceResolver.Resolve(action.Verb, action.Resource, action.SubResource) if err != nil { err = fmt.Errorf("resolving resource: %v", err) return } klog.V(3).Infof("Resolved resource `%s`", resolvedAction.gr.String()) } // Get the Roles that relate to the Verbs and Resources we are interested in roleNames, err := w.getRolesFor(resolvedAction) if err != nil { return []rbac.RoleBinding{}, []rbac.ClusterRoleBinding{}, fmt.Errorf("getting Roles: %v", err) } // Get the ClusterRoles that relate to the verbs and resources we are interested in clusterRoleNames, err := w.getClusterRolesFor(resolvedAction) if err != nil { return []rbac.RoleBinding{}, []rbac.ClusterRoleBinding{}, fmt.Errorf("getting ClusterRoles: %v", err) } // Get the RoleBindings that relate to this set of Roles or ClusterRoles roleBindings, err = w.getRoleBindings(resolvedAction, roleNames, clusterRoleNames) if err != nil { err = fmt.Errorf("getting RoleBindings: %v", err) return } // Get the ClusterRoleBindings that relate to this set of ClusterRoles clusterRoleBindings, err = w.getClusterRoleBindings(clusterRoleNames) if err != nil { err = fmt.Errorf("getting ClusterRoleBindings: %v", err) return } return } // CheckAPIAccess checks whether the subject in the current context has enough privileges to query Kubernetes API // server to perform Check. func (w *WhoCan) CheckAPIAccess(action Action) ([]string, error) { type check struct { verb string resource string namespace string } var checks []check var warnings []string ctx := context.Background() // Determine which checks need to be executed. if action.Namespace == "" { checks = append(checks, check{"list", "namespaces", ""}) nsList, err := w.clientNamespace.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("listing namespaces: %v", err) } for _, ns := range nsList.Items { checks = append(checks, check{"list", "roles", ns.Name}) checks = append(checks, check{"list", "rolebindings", ns.Name}) } } else { checks = append(checks, check{"list", "roles", action.Namespace}) checks = append(checks, check{"list", "rolebindings", action.Namespace}) } // Actually run the checks and collect warnings. for _, check := range checks { allowed, err := w.accessChecker.IsAllowedTo(check.verb, check.resource, check.namespace) if err != nil { return nil, err } if !allowed { var msg string if check.namespace == "" { msg = fmt.Sprintf("The user is not allowed to %s %s", check.verb, check.resource) } else { msg = fmt.Sprintf("The user is not allowed to %s %s in the %s namespace", check.verb, check.resource, check.namespace) } warnings = append(warnings, msg) } } return warnings, nil } // GetRolesFor returns a set of names of Roles matching the specified Action. func (w *WhoCan) getRolesFor(action resolvedAction) (roles, error) { ctx := context.Background() rl, err := w.clientRBAC.Roles(action.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } roleNames := make(map[string]struct{}, 10) for _, item := range rl.Items { if w.policyRuleMatcher.MatchesRole(item, action) { if _, ok := roleNames[item.Name]; !ok { roleNames[item.Name] = struct{}{} } } } return roleNames, nil } // GetClusterRolesFor returns a set of names of ClusterRoles matching the specified Action. func (w *WhoCan) getClusterRolesFor(action resolvedAction) (clusterRoles, error) { ctx := context.Background() crl, err := w.clientRBAC.ClusterRoles().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } cr := make(map[string]struct{}, 10) for _, item := range crl.Items { if w.policyRuleMatcher.MatchesClusterRole(item, action) { if _, ok := cr[item.Name]; !ok { cr[item.Name] = struct{}{} } } } return cr, nil } // GetRoleBindings returns the RoleBindings that refer to the given set of Role names or ClusterRole names. func (w *WhoCan) getRoleBindings(action resolvedAction, roleNames roles, clusterRoleNames clusterRoles) (roleBindings []rbac.RoleBinding, err error) { // TODO I'm wondering if GetRoleBindings should be invoked at all when the --all-namespaces flag is specified? if action.Namespace == core.NamespaceAll { return } ctx := context.Background() list, err := w.clientRBAC.RoleBindings(action.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return } for _, roleBinding := range list.Items { if roleBinding.RoleRef.Kind == RoleKind { if _, ok := roleNames[roleBinding.RoleRef.Name]; ok { roleBindings = append(roleBindings, roleBinding) } } else if roleBinding.RoleRef.Kind == ClusterRoleKind { if _, ok := clusterRoleNames[roleBinding.RoleRef.Name]; ok { roleBindings = append(roleBindings, roleBinding) } } } return } // GetClusterRoleBindings returns the ClusterRoleBindings that refer to the given sef of ClusterRole names. func (w *WhoCan) getClusterRoleBindings(clusterRoleNames clusterRoles) (clusterRoleBindings []rbac.ClusterRoleBinding, err error) { ctx := context.Background() list, err := w.clientRBAC.ClusterRoleBindings().List(ctx, metav1.ListOptions{}) if err != nil { return } for _, roleBinding := range list.Items { if _, ok := clusterRoleNames[roleBinding.RoleRef.Name]; ok { clusterRoleBindings = append(clusterRoleBindings, roleBinding) } } return } func (w Action) String() string { if w.NonResourceURL != "" { return fmt.Sprintf("%s %s", w.Verb, w.NonResourceURL) } name := w.ResourceName if name != "" { name = "/" + name } return fmt.Sprintf("%s %s%s", w.Verb, w.Resource, name) }
{ var configFlags *clioptions.ConfigFlags cmd := &cobra.Command{ Use: whoCanUsage, Long: whoCanLong, Example: whoCanExample, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { clientConfig := configFlags.ToRawKubeConfigLoader() restConfig, err := clientConfig.ClientConfig() if err != nil { return fmt.Errorf("getting rest config: %v", err) } mapper, err := configFlags.ToRESTMapper() if err != nil { return fmt.Errorf("getting mapper: %v", err) } action, err := ActionFrom(clientConfig, cmd.Flags(), args) if err != nil { return err } o, err := NewWhoCan(restConfig, mapper) if err != nil { return err } warnings, err := o.CheckAPIAccess(action) if err != nil { return err } output, err := cmd.Flags().GetString(outputFlag) if err != nil { return err } printer := NewPrinter(streams.Out, output == outputWide) // Output warnings printer.PrintWarnings(warnings) roleBindings, clusterRoleBindings, err := o.Check(action) if err != nil { return err } // Output check results output = strings.ToLower(output) if output == outputJson { printer.ExportData(action, roleBindings, clusterRoleBindings) } else if output == outputWide || output == "" { printer.PrintChecks(action, roleBindings, clusterRoleBindings) } else { return fmt.Errorf("invalid output format: %v", output) } return nil }, } cmd.Flags().String(subResourceFlag, "", "SubResource such as pod/log or deployment/scale") cmd.Flags().BoolP(allNamespacesFlag, "A", false, "If true, check for users that can do the specified action in any of the available namespaces") cmd.Flags().StringP(outputFlag, "o", "", "Output format. Currently the only supported output format is wide or JSON.") flag.CommandLine.VisitAll(func(gf *flag.Flag) { cmd.Flags().AddGoFlag(gf) }) configFlags = clioptions.NewConfigFlags(true) configFlags.AddFlags(cmd.Flags()) return cmd, nil }
identifier_body
list.go
package cmd import ( "context" "errors" "flag" "fmt" "strings" "github.com/spf13/cobra" "github.com/spf13/pflag" core "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clioptions "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/kubernetes" clientcore "k8s.io/client-go/kubernetes/typed/core/v1" clientrbac "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ) const ( whoCanUsage = `kubectl who-can VERB (TYPE | TYPE/NAME | NONRESOURCEURL)` whoCanLong = `Shows which users, groups and service accounts can perform a given verb on a given resource type. VERB is a logical Kubernetes API verb like 'get', 'list', 'watch', 'delete', etc. TYPE is a Kubernetes resource. Shortcuts and API groups will be resolved, e.g. 'po' or 'pods.metrics.k8s.io'. NAME is the name of a particular Kubernetes resource. NONRESOURCEURL is a partial URL that starts with "/".` whoCanExample = ` # List who can get pods from any of the available namespaces kubectl who-can get pods --all-namespaces # List who can create pods in the current namespace kubectl who-can create pods # List who can get pods specifying the API group kubectl who-can get pods.metrics.k8s.io # List who can create services in namespace "foo" kubectl who-can create services -n foo # List who can get the service named "mongodb" in namespace "bar" kubectl who-can get svc/mongodb --namespace bar # List who can do everything with pods in the current namespace kubectl who-can '*' pods # List who can list every resource in the namespace "baz" kubectl who-can list '*' -n baz # List who can read pod logs kubectl who-can get pods --subresource=log # List who can access the URL /logs/ kubectl who-can get /logs` ) const ( // RoleKind is the RoleRef's Kind referencing a Role. RoleKind = "Role" // ClusterRoleKind is the RoleRef's Kind referencing a ClusterRole. ClusterRoleKind = "ClusterRole" ) const ( subResourceFlag = "subresource" allNamespacesFlag = "all-namespaces" namespaceFlag = "namespace" outputFlag = "output" outputWide = "wide" outputJson = "json" ) // Action represents an action a subject can be given permission to. type Action struct { Verb string Resource string ResourceName string SubResource string NonResourceURL string Namespace string AllNamespaces bool } type resolvedAction struct { Action gr schema.GroupResource } // roles is a set of Role names matching the specified Action. type roles map[string]struct{} // clusterRoles is a set of ClusterRole names matching the specified Action. type clusterRoles map[string]struct{} type WhoCan struct { clientNamespace clientcore.NamespaceInterface clientRBAC clientrbac.RbacV1Interface namespaceValidator NamespaceValidator resourceResolver ResourceResolver accessChecker AccessChecker policyRuleMatcher PolicyRuleMatcher } // NewWhoCan constructs a new WhoCan checker with the specified rest.Config and RESTMapper. func NewWhoCan(restConfig *rest.Config, mapper apimeta.RESTMapper) (*WhoCan, error) { client, err := kubernetes.NewForConfig(restConfig) if err != nil { return nil, err } clientNamespace := client.CoreV1().Namespaces() return &WhoCan{ clientNamespace: clientNamespace, clientRBAC: client.RbacV1(), namespaceValidator: NewNamespaceValidator(clientNamespace), resourceResolver: NewResourceResolver(client.Discovery(), mapper), accessChecker: NewAccessChecker(client.AuthorizationV1().SelfSubjectAccessReviews()), policyRuleMatcher: NewPolicyRuleMatcher(), }, nil } // NewWhoCanCommand constructs the WhoCan command with the specified IOStreams. func NewWhoCanCommand(streams clioptions.IOStreams) (*cobra.Command, error) { var configFlags *clioptions.ConfigFlags cmd := &cobra.Command{ Use: whoCanUsage, Long: whoCanLong, Example: whoCanExample, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { clientConfig := configFlags.ToRawKubeConfigLoader() restConfig, err := clientConfig.ClientConfig() if err != nil { return fmt.Errorf("getting rest config: %v", err) } mapper, err := configFlags.ToRESTMapper() if err != nil { return fmt.Errorf("getting mapper: %v", err) } action, err := ActionFrom(clientConfig, cmd.Flags(), args) if err != nil { return err } o, err := NewWhoCan(restConfig, mapper) if err != nil { return err } warnings, err := o.CheckAPIAccess(action) if err != nil { return err } output, err := cmd.Flags().GetString(outputFlag) if err != nil { return err } printer := NewPrinter(streams.Out, output == outputWide) // Output warnings printer.PrintWarnings(warnings) roleBindings, clusterRoleBindings, err := o.Check(action) if err != nil { return err } // Output check results output = strings.ToLower(output) if output == outputJson { printer.ExportData(action, roleBindings, clusterRoleBindings) } else if output == outputWide || output == "" { printer.PrintChecks(action, roleBindings, clusterRoleBindings) } else { return fmt.Errorf("invalid output format: %v", output) } return nil }, } cmd.Flags().String(subResourceFlag, "", "SubResource such as pod/log or deployment/scale") cmd.Flags().BoolP(allNamespacesFlag, "A", false, "If true, check for users that can do the specified action in any of the available namespaces") cmd.Flags().StringP(outputFlag, "o", "", "Output format. Currently the only supported output format is wide or JSON.") flag.CommandLine.VisitAll(func(gf *flag.Flag) { cmd.Flags().AddGoFlag(gf) }) configFlags = clioptions.NewConfigFlags(true) configFlags.AddFlags(cmd.Flags()) return cmd, nil } // ActionFrom sets all information required to check who can perform the specified action. func ActionFrom(clientConfig clientcmd.ClientConfig, flags *pflag.FlagSet, args []string) (action Action, err error) { if len(args) < 2 { err = errors.New("you must specify two or three arguments: verb, resource, and optional resourceName") return } action.Verb = args[0] if strings.HasPrefix(args[1], "/") { action.NonResourceURL = args[1] klog.V(3).Infof("Resolved nonResourceURL `%s`", action.NonResourceURL) } else { resourceTokens := strings.SplitN(args[1], "/", 2) action.Resource = resourceTokens[0] if len(resourceTokens) > 1 { action.ResourceName = resourceTokens[1] klog.V(3).Infof("Resolved resourceName `%s`", action.ResourceName) } } action.SubResource, err = flags.GetString(subResourceFlag) if err != nil { return } action.AllNamespaces, err = flags.GetBool(allNamespacesFlag) if err != nil { return } if action.AllNamespaces { action.Namespace = core.NamespaceAll klog.V(3).Infof("Resolved namespace `%s` from --all-namespaces flag", action.Namespace) return } action.Namespace, err = flags.GetString(namespaceFlag) if err != nil { return } if action.Namespace != "" { klog.V(3).Infof("Resolved namespace `%s` from --namespace flag", action.Namespace) return } // Neither --all-namespaces nor --namespace flag was specified action.Namespace, _, err = clientConfig.Namespace() if err != nil { err = fmt.Errorf("getting namespace from current context: %v", err) } klog.V(3).Infof("Resolved namespace `%s` from current context", action.Namespace) return } // Validate makes sure that the specified action is valid. func (w *WhoCan) validate(action Action) error { if action.NonResourceURL != "" && action.SubResource != "" { return fmt.Errorf("--subresource cannot be used with NONRESOURCEURL") } err := w.namespaceValidator.Validate(action.Namespace) if err != nil { return fmt.Errorf("validating namespace: %v", err) } return nil } // Check checks who can perform the action specified by WhoCanOptions and returns the role bindings that allows the // action to be performed. func (w *WhoCan) Check(action Action) (roleBindings []rbac.RoleBinding, clusterRoleBindings []rbac.ClusterRoleBinding, err error) { err = w.validate(action) if err != nil { err = fmt.Errorf("validation: %v", err) return } resolvedAction := resolvedAction{Action: action} if action.Resource != "" { resolvedAction.gr, err = w.resourceResolver.Resolve(action.Verb, action.Resource, action.SubResource) if err != nil { err = fmt.Errorf("resolving resource: %v", err) return } klog.V(3).Infof("Resolved resource `%s`", resolvedAction.gr.String()) } // Get the Roles that relate to the Verbs and Resources we are interested in roleNames, err := w.getRolesFor(resolvedAction) if err != nil { return []rbac.RoleBinding{}, []rbac.ClusterRoleBinding{}, fmt.Errorf("getting Roles: %v", err) } // Get the ClusterRoles that relate to the verbs and resources we are interested in clusterRoleNames, err := w.getClusterRolesFor(resolvedAction) if err != nil { return []rbac.RoleBinding{}, []rbac.ClusterRoleBinding{}, fmt.Errorf("getting ClusterRoles: %v", err) } // Get the RoleBindings that relate to this set of Roles or ClusterRoles roleBindings, err = w.getRoleBindings(resolvedAction, roleNames, clusterRoleNames) if err != nil { err = fmt.Errorf("getting RoleBindings: %v", err) return } // Get the ClusterRoleBindings that relate to this set of ClusterRoles clusterRoleBindings, err = w.getClusterRoleBindings(clusterRoleNames) if err != nil { err = fmt.Errorf("getting ClusterRoleBindings: %v", err) return } return } // CheckAPIAccess checks whether the subject in the current context has enough privileges to query Kubernetes API // server to perform Check. func (w *WhoCan) CheckAPIAccess(action Action) ([]string, error) { type check struct { verb string resource string namespace string } var checks []check var warnings []string ctx := context.Background() // Determine which checks need to be executed. if action.Namespace == "" { checks = append(checks, check{"list", "namespaces", ""}) nsList, err := w.clientNamespace.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("listing namespaces: %v", err) } for _, ns := range nsList.Items { checks = append(checks, check{"list", "roles", ns.Name}) checks = append(checks, check{"list", "rolebindings", ns.Name}) } } else { checks = append(checks, check{"list", "roles", action.Namespace}) checks = append(checks, check{"list", "rolebindings", action.Namespace}) } // Actually run the checks and collect warnings. for _, check := range checks { allowed, err := w.accessChecker.IsAllowedTo(check.verb, check.resource, check.namespace) if err != nil { return nil, err } if !allowed { var msg string if check.namespace == "" { msg = fmt.Sprintf("The user is not allowed to %s %s", check.verb, check.resource) } else { msg = fmt.Sprintf("The user is not allowed to %s %s in the %s namespace", check.verb, check.resource, check.namespace) } warnings = append(warnings, msg) } } return warnings, nil } // GetRolesFor returns a set of names of Roles matching the specified Action. func (w *WhoCan) getRolesFor(action resolvedAction) (roles, error) { ctx := context.Background() rl, err := w.clientRBAC.Roles(action.Namespace).List(ctx, metav1.ListOptions{}) if err != nil {
for _, item := range rl.Items { if w.policyRuleMatcher.MatchesRole(item, action) { if _, ok := roleNames[item.Name]; !ok { roleNames[item.Name] = struct{}{} } } } return roleNames, nil } // GetClusterRolesFor returns a set of names of ClusterRoles matching the specified Action. func (w *WhoCan) getClusterRolesFor(action resolvedAction) (clusterRoles, error) { ctx := context.Background() crl, err := w.clientRBAC.ClusterRoles().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } cr := make(map[string]struct{}, 10) for _, item := range crl.Items { if w.policyRuleMatcher.MatchesClusterRole(item, action) { if _, ok := cr[item.Name]; !ok { cr[item.Name] = struct{}{} } } } return cr, nil } // GetRoleBindings returns the RoleBindings that refer to the given set of Role names or ClusterRole names. func (w *WhoCan) getRoleBindings(action resolvedAction, roleNames roles, clusterRoleNames clusterRoles) (roleBindings []rbac.RoleBinding, err error) { // TODO I'm wondering if GetRoleBindings should be invoked at all when the --all-namespaces flag is specified? if action.Namespace == core.NamespaceAll { return } ctx := context.Background() list, err := w.clientRBAC.RoleBindings(action.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return } for _, roleBinding := range list.Items { if roleBinding.RoleRef.Kind == RoleKind { if _, ok := roleNames[roleBinding.RoleRef.Name]; ok { roleBindings = append(roleBindings, roleBinding) } } else if roleBinding.RoleRef.Kind == ClusterRoleKind { if _, ok := clusterRoleNames[roleBinding.RoleRef.Name]; ok { roleBindings = append(roleBindings, roleBinding) } } } return } // GetClusterRoleBindings returns the ClusterRoleBindings that refer to the given sef of ClusterRole names. func (w *WhoCan) getClusterRoleBindings(clusterRoleNames clusterRoles) (clusterRoleBindings []rbac.ClusterRoleBinding, err error) { ctx := context.Background() list, err := w.clientRBAC.ClusterRoleBindings().List(ctx, metav1.ListOptions{}) if err != nil { return } for _, roleBinding := range list.Items { if _, ok := clusterRoleNames[roleBinding.RoleRef.Name]; ok { clusterRoleBindings = append(clusterRoleBindings, roleBinding) } } return } func (w Action) String() string { if w.NonResourceURL != "" { return fmt.Sprintf("%s %s", w.Verb, w.NonResourceURL) } name := w.ResourceName if name != "" { name = "/" + name } return fmt.Sprintf("%s %s%s", w.Verb, w.Resource, name) }
return nil, err } roleNames := make(map[string]struct{}, 10)
random_line_split
list.go
package cmd import ( "context" "errors" "flag" "fmt" "strings" "github.com/spf13/cobra" "github.com/spf13/pflag" core "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clioptions "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/kubernetes" clientcore "k8s.io/client-go/kubernetes/typed/core/v1" clientrbac "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ) const ( whoCanUsage = `kubectl who-can VERB (TYPE | TYPE/NAME | NONRESOURCEURL)` whoCanLong = `Shows which users, groups and service accounts can perform a given verb on a given resource type. VERB is a logical Kubernetes API verb like 'get', 'list', 'watch', 'delete', etc. TYPE is a Kubernetes resource. Shortcuts and API groups will be resolved, e.g. 'po' or 'pods.metrics.k8s.io'. NAME is the name of a particular Kubernetes resource. NONRESOURCEURL is a partial URL that starts with "/".` whoCanExample = ` # List who can get pods from any of the available namespaces kubectl who-can get pods --all-namespaces # List who can create pods in the current namespace kubectl who-can create pods # List who can get pods specifying the API group kubectl who-can get pods.metrics.k8s.io # List who can create services in namespace "foo" kubectl who-can create services -n foo # List who can get the service named "mongodb" in namespace "bar" kubectl who-can get svc/mongodb --namespace bar # List who can do everything with pods in the current namespace kubectl who-can '*' pods # List who can list every resource in the namespace "baz" kubectl who-can list '*' -n baz # List who can read pod logs kubectl who-can get pods --subresource=log # List who can access the URL /logs/ kubectl who-can get /logs` ) const ( // RoleKind is the RoleRef's Kind referencing a Role. RoleKind = "Role" // ClusterRoleKind is the RoleRef's Kind referencing a ClusterRole. ClusterRoleKind = "ClusterRole" ) const ( subResourceFlag = "subresource" allNamespacesFlag = "all-namespaces" namespaceFlag = "namespace" outputFlag = "output" outputWide = "wide" outputJson = "json" ) // Action represents an action a subject can be given permission to. type Action struct { Verb string Resource string ResourceName string SubResource string NonResourceURL string Namespace string AllNamespaces bool } type resolvedAction struct { Action gr schema.GroupResource } // roles is a set of Role names matching the specified Action. type roles map[string]struct{} // clusterRoles is a set of ClusterRole names matching the specified Action. type clusterRoles map[string]struct{} type WhoCan struct { clientNamespace clientcore.NamespaceInterface clientRBAC clientrbac.RbacV1Interface namespaceValidator NamespaceValidator resourceResolver ResourceResolver accessChecker AccessChecker policyRuleMatcher PolicyRuleMatcher } // NewWhoCan constructs a new WhoCan checker with the specified rest.Config and RESTMapper. func NewWhoCan(restConfig *rest.Config, mapper apimeta.RESTMapper) (*WhoCan, error) { client, err := kubernetes.NewForConfig(restConfig) if err != nil { return nil, err } clientNamespace := client.CoreV1().Namespaces() return &WhoCan{ clientNamespace: clientNamespace, clientRBAC: client.RbacV1(), namespaceValidator: NewNamespaceValidator(clientNamespace), resourceResolver: NewResourceResolver(client.Discovery(), mapper), accessChecker: NewAccessChecker(client.AuthorizationV1().SelfSubjectAccessReviews()), policyRuleMatcher: NewPolicyRuleMatcher(), }, nil } // NewWhoCanCommand constructs the WhoCan command with the specified IOStreams. func NewWhoCanCommand(streams clioptions.IOStreams) (*cobra.Command, error) { var configFlags *clioptions.ConfigFlags cmd := &cobra.Command{ Use: whoCanUsage, Long: whoCanLong, Example: whoCanExample, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { clientConfig := configFlags.ToRawKubeConfigLoader() restConfig, err := clientConfig.ClientConfig() if err != nil { return fmt.Errorf("getting rest config: %v", err) } mapper, err := configFlags.ToRESTMapper() if err != nil { return fmt.Errorf("getting mapper: %v", err) } action, err := ActionFrom(clientConfig, cmd.Flags(), args) if err != nil { return err } o, err := NewWhoCan(restConfig, mapper) if err != nil { return err } warnings, err := o.CheckAPIAccess(action) if err != nil { return err } output, err := cmd.Flags().GetString(outputFlag) if err != nil { return err } printer := NewPrinter(streams.Out, output == outputWide) // Output warnings printer.PrintWarnings(warnings) roleBindings, clusterRoleBindings, err := o.Check(action) if err != nil { return err } // Output check results output = strings.ToLower(output) if output == outputJson { printer.ExportData(action, roleBindings, clusterRoleBindings) } else if output == outputWide || output == "" { printer.PrintChecks(action, roleBindings, clusterRoleBindings) } else { return fmt.Errorf("invalid output format: %v", output) } return nil }, } cmd.Flags().String(subResourceFlag, "", "SubResource such as pod/log or deployment/scale") cmd.Flags().BoolP(allNamespacesFlag, "A", false, "If true, check for users that can do the specified action in any of the available namespaces") cmd.Flags().StringP(outputFlag, "o", "", "Output format. Currently the only supported output format is wide or JSON.") flag.CommandLine.VisitAll(func(gf *flag.Flag) { cmd.Flags().AddGoFlag(gf) }) configFlags = clioptions.NewConfigFlags(true) configFlags.AddFlags(cmd.Flags()) return cmd, nil } // ActionFrom sets all information required to check who can perform the specified action. func ActionFrom(clientConfig clientcmd.ClientConfig, flags *pflag.FlagSet, args []string) (action Action, err error) { if len(args) < 2 { err = errors.New("you must specify two or three arguments: verb, resource, and optional resourceName") return } action.Verb = args[0] if strings.HasPrefix(args[1], "/") { action.NonResourceURL = args[1] klog.V(3).Infof("Resolved nonResourceURL `%s`", action.NonResourceURL) } else { resourceTokens := strings.SplitN(args[1], "/", 2) action.Resource = resourceTokens[0] if len(resourceTokens) > 1 { action.ResourceName = resourceTokens[1] klog.V(3).Infof("Resolved resourceName `%s`", action.ResourceName) } } action.SubResource, err = flags.GetString(subResourceFlag) if err != nil { return } action.AllNamespaces, err = flags.GetBool(allNamespacesFlag) if err != nil { return } if action.AllNamespaces { action.Namespace = core.NamespaceAll klog.V(3).Infof("Resolved namespace `%s` from --all-namespaces flag", action.Namespace) return } action.Namespace, err = flags.GetString(namespaceFlag) if err != nil { return } if action.Namespace != "" { klog.V(3).Infof("Resolved namespace `%s` from --namespace flag", action.Namespace) return } // Neither --all-namespaces nor --namespace flag was specified action.Namespace, _, err = clientConfig.Namespace() if err != nil { err = fmt.Errorf("getting namespace from current context: %v", err) } klog.V(3).Infof("Resolved namespace `%s` from current context", action.Namespace) return } // Validate makes sure that the specified action is valid. func (w *WhoCan) validate(action Action) error { if action.NonResourceURL != "" && action.SubResource != "" { return fmt.Errorf("--subresource cannot be used with NONRESOURCEURL") } err := w.namespaceValidator.Validate(action.Namespace) if err != nil { return fmt.Errorf("validating namespace: %v", err) } return nil } // Check checks who can perform the action specified by WhoCanOptions and returns the role bindings that allows the // action to be performed. func (w *WhoCan) Check(action Action) (roleBindings []rbac.RoleBinding, clusterRoleBindings []rbac.ClusterRoleBinding, err error) { err = w.validate(action) if err != nil { err = fmt.Errorf("validation: %v", err) return } resolvedAction := resolvedAction{Action: action} if action.Resource != "" { resolvedAction.gr, err = w.resourceResolver.Resolve(action.Verb, action.Resource, action.SubResource) if err != nil { err = fmt.Errorf("resolving resource: %v", err) return } klog.V(3).Infof("Resolved resource `%s`", resolvedAction.gr.String()) } // Get the Roles that relate to the Verbs and Resources we are interested in roleNames, err := w.getRolesFor(resolvedAction) if err != nil { return []rbac.RoleBinding{}, []rbac.ClusterRoleBinding{}, fmt.Errorf("getting Roles: %v", err) } // Get the ClusterRoles that relate to the verbs and resources we are interested in clusterRoleNames, err := w.getClusterRolesFor(resolvedAction) if err != nil { return []rbac.RoleBinding{}, []rbac.ClusterRoleBinding{}, fmt.Errorf("getting ClusterRoles: %v", err) } // Get the RoleBindings that relate to this set of Roles or ClusterRoles roleBindings, err = w.getRoleBindings(resolvedAction, roleNames, clusterRoleNames) if err != nil { err = fmt.Errorf("getting RoleBindings: %v", err) return } // Get the ClusterRoleBindings that relate to this set of ClusterRoles clusterRoleBindings, err = w.getClusterRoleBindings(clusterRoleNames) if err != nil { err = fmt.Errorf("getting ClusterRoleBindings: %v", err) return } return } // CheckAPIAccess checks whether the subject in the current context has enough privileges to query Kubernetes API // server to perform Check. func (w *WhoCan) CheckAPIAccess(action Action) ([]string, error) { type check struct { verb string resource string namespace string } var checks []check var warnings []string ctx := context.Background() // Determine which checks need to be executed. if action.Namespace == "" { checks = append(checks, check{"list", "namespaces", ""}) nsList, err := w.clientNamespace.List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("listing namespaces: %v", err) } for _, ns := range nsList.Items { checks = append(checks, check{"list", "roles", ns.Name}) checks = append(checks, check{"list", "rolebindings", ns.Name}) } } else { checks = append(checks, check{"list", "roles", action.Namespace}) checks = append(checks, check{"list", "rolebindings", action.Namespace}) } // Actually run the checks and collect warnings. for _, check := range checks { allowed, err := w.accessChecker.IsAllowedTo(check.verb, check.resource, check.namespace) if err != nil
if !allowed { var msg string if check.namespace == "" { msg = fmt.Sprintf("The user is not allowed to %s %s", check.verb, check.resource) } else { msg = fmt.Sprintf("The user is not allowed to %s %s in the %s namespace", check.verb, check.resource, check.namespace) } warnings = append(warnings, msg) } } return warnings, nil } // GetRolesFor returns a set of names of Roles matching the specified Action. func (w *WhoCan) getRolesFor(action resolvedAction) (roles, error) { ctx := context.Background() rl, err := w.clientRBAC.Roles(action.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } roleNames := make(map[string]struct{}, 10) for _, item := range rl.Items { if w.policyRuleMatcher.MatchesRole(item, action) { if _, ok := roleNames[item.Name]; !ok { roleNames[item.Name] = struct{}{} } } } return roleNames, nil } // GetClusterRolesFor returns a set of names of ClusterRoles matching the specified Action. func (w *WhoCan) getClusterRolesFor(action resolvedAction) (clusterRoles, error) { ctx := context.Background() crl, err := w.clientRBAC.ClusterRoles().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } cr := make(map[string]struct{}, 10) for _, item := range crl.Items { if w.policyRuleMatcher.MatchesClusterRole(item, action) { if _, ok := cr[item.Name]; !ok { cr[item.Name] = struct{}{} } } } return cr, nil } // GetRoleBindings returns the RoleBindings that refer to the given set of Role names or ClusterRole names. func (w *WhoCan) getRoleBindings(action resolvedAction, roleNames roles, clusterRoleNames clusterRoles) (roleBindings []rbac.RoleBinding, err error) { // TODO I'm wondering if GetRoleBindings should be invoked at all when the --all-namespaces flag is specified? if action.Namespace == core.NamespaceAll { return } ctx := context.Background() list, err := w.clientRBAC.RoleBindings(action.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { return } for _, roleBinding := range list.Items { if roleBinding.RoleRef.Kind == RoleKind { if _, ok := roleNames[roleBinding.RoleRef.Name]; ok { roleBindings = append(roleBindings, roleBinding) } } else if roleBinding.RoleRef.Kind == ClusterRoleKind { if _, ok := clusterRoleNames[roleBinding.RoleRef.Name]; ok { roleBindings = append(roleBindings, roleBinding) } } } return } // GetClusterRoleBindings returns the ClusterRoleBindings that refer to the given sef of ClusterRole names. func (w *WhoCan) getClusterRoleBindings(clusterRoleNames clusterRoles) (clusterRoleBindings []rbac.ClusterRoleBinding, err error) { ctx := context.Background() list, err := w.clientRBAC.ClusterRoleBindings().List(ctx, metav1.ListOptions{}) if err != nil { return } for _, roleBinding := range list.Items { if _, ok := clusterRoleNames[roleBinding.RoleRef.Name]; ok { clusterRoleBindings = append(clusterRoleBindings, roleBinding) } } return } func (w Action) String() string { if w.NonResourceURL != "" { return fmt.Sprintf("%s %s", w.Verb, w.NonResourceURL) } name := w.ResourceName if name != "" { name = "/" + name } return fmt.Sprintf("%s %s%s", w.Verb, w.Resource, name) }
{ return nil, err }
conditional_block
main.rs
use std::convert::TryInto; use std::path::{Path, PathBuf}; use std::sync::Arc; use bindle::client::{ tokens::{HttpBasic, NoToken, OidcToken, TokenManager}, Client, ClientError, Result, }; use bindle::invoice::signature::{ KeyRing, SecretKeyEntry, SecretKeyFile, SecretKeyStorage, SignatureRole, }; use bindle::invoice::Invoice; use bindle::provider::ProviderError; use bindle::signature::KeyEntry; use bindle::standalone::{StandaloneRead, StandaloneWrite}; use bindle::{ cache::{Cache, DumbCache}, provider::Provider, }; use clap::Clap; use sha2::Digest; use tokio::io::AsyncWriteExt; use tokio::sync::Mutex; use tokio_stream::StreamExt; use tokio_util::io::StreamReader; use tracing::log::{info, warn}; mod opts; use opts::*; #[tokio::main] async fn main() { // Trap and format error messages using the proper value if let Err(e) = run().await.map_err(anyhow::Error::new) { eprintln!("{}", e); for (i, cause) in e.chain().enumerate() { // Skip the first message because it is printed above. if i > 0 { if i == 1 { eprintln!("\nError trace:"); } eprintln!("\t{}: {}", i, cause); } } std::process::exit(1); } } /// An internal token type so we dynamically choose between various token types. We can't box dynner /// them for some reason, so we need to basically do our own fake dynamic dispatch. If this could be /// useful for other consumers of the bindle crate, we could add this in the future #[derive(Clone)] enum PickYourAuth { None(NoToken), Http(HttpBasic), Oidc(OidcToken), } #[async_trait::async_trait] impl TokenManager for PickYourAuth { async fn apply_auth_header( &self, builder: reqwest::RequestBuilder, ) -> Result<reqwest::RequestBuilder> { match &self { PickYourAuth::None(nt) => nt.apply_auth_header(builder).await, PickYourAuth::Http(h) => h.apply_auth_header(builder).await, PickYourAuth::Oidc(oidc) => oidc.apply_auth_header(builder).await, } } } async fn run() -> std::result::Result<(), ClientError> { let opts = opts::Opts::parse(); // TODO: Allow log level setting outside of RUST_LOG (this is easier with this subscriber) tracing_subscriber::fmt::init(); let bindle_dir = opts.bindle_dir.unwrap_or_else(|| { dirs::cache_dir() .expect("Unable to infer cache directory") .join("bindle") }); tokio::fs::create_dir_all(&bindle_dir).await?; let token_file = opts.token_file.unwrap_or_else(|| { dirs::config_dir() .expect("Unable to infer cache directory") .join("bindle/.token") }); // Theoretically, someone could create the token file at /, which would mean this function // wouldn't return anything. This isn't an error, so do not attempt to create the directory if // so if let Some(p) = token_file.parent() { tokio::fs::create_dir_all(p).await?; } let token = if !matches!(opts.subcmd, SubCommand::Login(_)) { match OidcToken::new_from_file(&token_file).await { Ok(t) => { tracing::debug!("Found and loaded token file"); PickYourAuth::Oidc(t) } // Token doesn't exist on disk, so assume they don't want token auth Err(ClientError::Io(e)) if matches!(e.kind(), std::io::ErrorKind::NotFound) => { tracing::debug!("No token file located, no token authentication will be used"); // If we have basic auth set, and no token was found, use that if let Some(user) = opts.http_user { PickYourAuth::Http(HttpBasic::new( &user, &opts.http_password.unwrap_or_default(), )) } else { PickYourAuth::None(NoToken) } } Err(e) => { let message = format!("Error loading token file {:?}: {}", &token_file, e); return Err(ClientError::InvalidConfig(message)); } } } else { PickYourAuth::None(NoToken) }; let bindle_client = Client::new(&opts.server_url, token)?; let local = bindle::provider::file::FileProvider::new( bindle_dir, bindle::search::NoopEngine::default(), ) .await; let cache = DumbCache::new(bindle_client.clone(), local); // We don't verify locally yet, but we will need the keyring to do so let _keyring = load_keyring(opts.keyring) .await .unwrap_or_else(|_| KeyRing::default()); match opts.subcmd { SubCommand::Info(info_opts) => { let inv = match info_opts.yanked { true => cache.get_invoice(info_opts.bindle_id), false => cache.get_yanked_invoice(info_opts.bindle_id), } .await .map_err(map_storage_error)?; match info_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&inv)?) .await? } Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await?, } } SubCommand::GetInvoice(gi_opts) => { let inv = match gi_opts.yanked { true => cache.get_invoice(&gi_opts.bindle_id), false => cache.get_yanked_invoice(&gi_opts.bindle_id), } .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&gi_opts.output) .await?; file.write_all(&toml::to_vec(&inv)?).await?; file.flush().await?; println!( "Wrote invoice {} to {}", gi_opts.bindle_id, gi_opts.output.display() ); } SubCommand::GetParcel(gp_opts) => get_parcel(cache, gp_opts).await?, SubCommand::Yank(yank_opts) => { bindle_client.yank_invoice(&yank_opts.bindle_id).await?; println!("Bindle {} yanked", yank_opts.bindle_id); } SubCommand::Search(search_opts) => { // TODO: Do we want to use the cache for searching? let matches = bindle_client .query_invoices(search_opts.clone().into()) .await?; match search_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout() .write_all(&toml::to_vec(&matches)?) .await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&matches)?) .await? } Some(format) if &format == "table" => tablify(&matches), Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tablify(&matches), } } SubCommand::Get(get_opts) => get_all(cache, get_opts).await?, SubCommand::Push(push_opts) => push_all(bindle_client, push_opts).await?, SubCommand::PushInvoice(push_opts) => { let resp = bindle_client .create_invoice_from_file(push_opts.path) .await?; println!("Invoice {} created", resp.invoice.bindle.id); } SubCommand::SignInvoice(sign_opts) => { // Role let role = if let Some(r) = sign_opts.role { role_from_name(r)? } else { SignatureRole::Creator }; // Keyfile let keyfile = match sign_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; // Signing key let key = first_matching_key(keyfile, &role).await?; // Load the invoice and sign it. let mut inv: Invoice = bindle::client::load::toml(sign_opts.invoice.as_str()).await?; inv.sign(role.clone(), &key)?; // Write the signed invoice to a file. let outfile = sign_opts .destination .unwrap_or_else(|| format!("./invoice-{}.toml", inv.canonical_name())); println!( "Signed as {} with role {} and wrote to {}", sign_opts.invoice, role, outfile ); tokio::fs::write(outfile, toml::to_string(&inv)?).await?; } SubCommand::PushFile(push_opts) => { let label = generate_label(&push_opts.path, push_opts.name, push_opts.media_type).await?; println!("Uploading file {} to server", push_opts.path.display()); bindle_client .create_parcel_from_file(push_opts.bindle_id, &label.sha256, push_opts.path) .await?; println!("File successfully uploaded"); } SubCommand::GenerateLabel(generate_opts) => { let label = generate_label( generate_opts.path, generate_opts.name, generate_opts.media_type, ) .await?; println!("{}", toml::to_string_pretty(&label)?); } SubCommand::PrintKey(print_key_opts) => { let dir = match print_key_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; let keyfile = SecretKeyFile::load_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let matches: Vec<KeyEntry> = match print_key_opts.label { Some(name) => keyfile .key .iter() .filter_map(|k| { if !k.label.contains(&name) { return None; } match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), } }) .collect(), None => keyfile .key .iter() .filter_map(|k| match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), }) .collect(), }; let keyring = KeyRing::new(matches); let out = toml::to_string(&keyring).map_err(|e| ClientError::Other(e.to_string()))?; println!("{}", out); } SubCommand::CreateKey(create_opts) => { let dir = match create_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; println!("Writing keys to {}", dir.display()); match tokio::fs::metadata(&dir).await { Err(e) if matches!(e.kind(), std::io::ErrorKind::NotFound) => { println!("File {} does not exist. Creating it.", dir.display()); let mut keyfile = SecretKeyFile::default(); let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Ok(info) => { if !info.is_file() { eprint!("Path must point to a file."); return Err(ClientError::Other( "Keyfile cannot be directory or symlink".to_owned(), )); } let mut keyfile = SecretKeyFile::load_file(&dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Err(e) => return Err(e.into()), } } SubCommand::Login(_login_opts) => { // TODO: We'll use login opts when we enable additional login providers OidcToken::login(&opts.server_url, token_file).await?; println!("Login successful"); } } Ok(()) } async fn generate_label( file_path: impl AsRef<Path>, name: Option<String>, media_type: Option<String>, ) -> Result<bindle::Label> { let path = file_path.as_ref().to_owned(); let mut file = tokio::fs::File::open(&path).await?; let media_type = media_type.unwrap_or_else(|| { mime_guess::from_path(&path) .first_or_octet_stream() .to_string() }); info!("Using media type {}", media_type); // Note: Should be able to unwrap here because the file opening step would have // failed in conditions where this returns `None` let name = name.unwrap_or_else(|| path.file_name().unwrap().to_string_lossy().to_string()); info!("Using name {}", name); let size = file.metadata().await?.len(); let mut sha = bindle::async_util::AsyncSha256::new(); tokio::io::copy(&mut file, &mut sha).await?; let result = sha.into_inner().expect("data lock error").finalize(); Ok(bindle::Label { sha256: format!("{:x}", result), media_type, size, name, annotations: None, // TODO: allow annotations from command line ..bindle::Label::default() }) } async fn get_parcel<C: Cache + Send + Sync + Clone>(cache: C, opts: GetParcel) -> Result<()> { let parcel = cache .get_parcel(opts.bindle_id, &opts.sha) .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&opts.output) .await?; tokio::io::copy( &mut StreamReader::new( parcel.map(|res| res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))), ), &mut file, ) .await?; println!("Wrote parcel {} to {}", opts.sha, opts.output.display()); Ok(()) } async fn push_all<T: TokenManager + Send + Sync + Clone + 'static>( client: Client<T>, opts: Push, ) -> Result<()> { let standalone = StandaloneRead::new(opts.path, &opts.bindle_id).await?; standalone.push(&client).await?; println!("Pushed bindle {}", opts.bindle_id); Ok(()) } async fn get_all<C: Cache + Send + Sync + Clone>(cache: C, opts: Get) -> Result<()> { let inv = match opts.yanked { true => cache.get_invoice(opts.bindle_id), false => cache.get_yanked_invoice(opts.bindle_id), } .await .map_err(map_storage_error)?; println!("Fetched invoice. Starting fetch of parcels"); let parcels = Arc::new(Mutex::new(std::collections::HashMap::new())); let zero_vec = Vec::with_capacity(0); let is_export = opts.export.is_some(); let parcel_fetch = inv .parcel .as_ref() .unwrap_or(&zero_vec) .iter() .map(|p| { ( p.label.sha256.clone(), inv.bindle.id.clone(), cache.clone(), parcels.clone(), ) }) .map(|(sha, bindle_id, c, parcels)| async move { match c.get_parcel(bindle_id, &sha).await { Ok(p) => { println!("Fetched parcel {}", sha); if is_export { parcels.lock().await.insert( sha, StreamReader::new(p.map(|res| { res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) })), ); } } Err(e) => { match e { ProviderError::NotFound => warn!("Parcel {} does not exist", sha), ProviderError::ProxyError(err) if matches!(err, ClientError::ParcelNotFound) => { warn!("Parcel {} does not exist", sha) } // Only return an error if it isn't a not found error. By design, an invoice // can contain parcels that don't yet exist ProviderError::ProxyError(inner) => return Err(inner), _ => { return Err(ClientError::Other(format!( "Unable to get parcel {}: {:?}", sha, e ))) } } } } Ok(()) }); futures::future::join_all(parcel_fetch) .await .into_iter() .collect::<Result<Vec<_>>>()?; if let Some(p) = opts.export { let standalone = StandaloneWrite::new(p, &inv.bindle.id)?; standalone .write( inv, // All locks should be done at this point (as all futures exited), so panicing feels // right here as it is an unrecoverable condition Arc::try_unwrap(parcels) .map_err(|_| ClientError::Other("Unexpected lock error".to_string())) .unwrap()
Ok(()) } async fn load_keyring(keyring: Option<PathBuf>) -> anyhow::Result<KeyRing> { // This takes an Option<PathBuf> because we want to wrap all of the flag handling in this // function, including setting the default if the kyering is None. let dir = keyring .unwrap_or_else(default_config_dir) .join("keyring.toml"); let kr = bindle::client::load::toml(dir).await?; Ok(kr) } fn map_storage_error(e: ProviderError) -> ClientError { match e { ProviderError::Io(e) => ClientError::Io(e), ProviderError::ProxyError(inner) => inner, ProviderError::InvalidId(parse_err) => ClientError::InvalidId(parse_err), _ => ClientError::Other(format!("{}", e)), } } fn default_config_dir() -> PathBuf { dirs::config_dir() .map(|v| v.join("bindle/")) .unwrap_or_else(|| "./bindle".into()) } /// Get the config dir, ensuring that it exists. /// /// This will return the default config directory. If that directory does not /// exist, it will be created before the path is returned. /// /// If the system does not have a configuration directory, this will create a directory named /// `bindle/` in the local working directory. /// /// This will return an error async fn ensure_config_dir() -> Result<PathBuf> { let dir = default_config_dir(); tokio::fs::create_dir_all(&dir).await?; Ok(dir) } fn role_from_name(name: String) -> Result<SignatureRole> { match name.as_str() { "c" | "creator" => Ok(SignatureRole::Creator), "h" | "host" => Ok(SignatureRole::Host), "a" | "approver" => Ok(SignatureRole::Approver), "p" | "proxy" => Ok(SignatureRole::Proxy), _ => Err(ClientError::Other("Unknown role".to_owned())), } } async fn first_matching_key(fpath: PathBuf, role: &SignatureRole) -> Result<SecretKeyEntry> { let keys = SecretKeyFile::load_file(&fpath).await.map_err(|e| { ClientError::Other(format!( "Error loading file {}: {}", fpath.display(), e.to_string() )) })?; keys.get_first_matching(role) .map(|k| k.to_owned()) .ok_or_else(|| ClientError::Other("No satisfactory key found".to_owned())) } fn tablify(matches: &bindle::search::Matches) { let last = matches.offset + matches.invoices.len() as u64; let trailer = if matches.more { format!(" - More results are available with --offset={}", last) } else { "".to_owned() }; for i in matches.invoices.iter() { println!( "{}:\t{}", &i.bindle.id, &i.bindle .description .clone() .unwrap_or_else(|| "[no description available]".to_string()) ) } if matches.total > 0 { println!( "=== Showing results {} to {} of {} (limit: {}){}", matches.offset + 1, last, matches.total, matches.limit, trailer, ); } else { println!("No matching bindles were found"); } }
.into_inner(), ) .await?; }
random_line_split
main.rs
use std::convert::TryInto; use std::path::{Path, PathBuf}; use std::sync::Arc; use bindle::client::{ tokens::{HttpBasic, NoToken, OidcToken, TokenManager}, Client, ClientError, Result, }; use bindle::invoice::signature::{ KeyRing, SecretKeyEntry, SecretKeyFile, SecretKeyStorage, SignatureRole, }; use bindle::invoice::Invoice; use bindle::provider::ProviderError; use bindle::signature::KeyEntry; use bindle::standalone::{StandaloneRead, StandaloneWrite}; use bindle::{ cache::{Cache, DumbCache}, provider::Provider, }; use clap::Clap; use sha2::Digest; use tokio::io::AsyncWriteExt; use tokio::sync::Mutex; use tokio_stream::StreamExt; use tokio_util::io::StreamReader; use tracing::log::{info, warn}; mod opts; use opts::*; #[tokio::main] async fn main() { // Trap and format error messages using the proper value if let Err(e) = run().await.map_err(anyhow::Error::new) { eprintln!("{}", e); for (i, cause) in e.chain().enumerate() { // Skip the first message because it is printed above. if i > 0 { if i == 1 { eprintln!("\nError trace:"); } eprintln!("\t{}: {}", i, cause); } } std::process::exit(1); } } /// An internal token type so we dynamically choose between various token types. We can't box dynner /// them for some reason, so we need to basically do our own fake dynamic dispatch. If this could be /// useful for other consumers of the bindle crate, we could add this in the future #[derive(Clone)] enum PickYourAuth { None(NoToken), Http(HttpBasic), Oidc(OidcToken), } #[async_trait::async_trait] impl TokenManager for PickYourAuth { async fn apply_auth_header( &self, builder: reqwest::RequestBuilder, ) -> Result<reqwest::RequestBuilder> { match &self { PickYourAuth::None(nt) => nt.apply_auth_header(builder).await, PickYourAuth::Http(h) => h.apply_auth_header(builder).await, PickYourAuth::Oidc(oidc) => oidc.apply_auth_header(builder).await, } } } async fn run() -> std::result::Result<(), ClientError> { let opts = opts::Opts::parse(); // TODO: Allow log level setting outside of RUST_LOG (this is easier with this subscriber) tracing_subscriber::fmt::init(); let bindle_dir = opts.bindle_dir.unwrap_or_else(|| { dirs::cache_dir() .expect("Unable to infer cache directory") .join("bindle") }); tokio::fs::create_dir_all(&bindle_dir).await?; let token_file = opts.token_file.unwrap_or_else(|| { dirs::config_dir() .expect("Unable to infer cache directory") .join("bindle/.token") }); // Theoretically, someone could create the token file at /, which would mean this function // wouldn't return anything. This isn't an error, so do not attempt to create the directory if // so if let Some(p) = token_file.parent() { tokio::fs::create_dir_all(p).await?; } let token = if !matches!(opts.subcmd, SubCommand::Login(_)) { match OidcToken::new_from_file(&token_file).await { Ok(t) => { tracing::debug!("Found and loaded token file"); PickYourAuth::Oidc(t) } // Token doesn't exist on disk, so assume they don't want token auth Err(ClientError::Io(e)) if matches!(e.kind(), std::io::ErrorKind::NotFound) => { tracing::debug!("No token file located, no token authentication will be used"); // If we have basic auth set, and no token was found, use that if let Some(user) = opts.http_user { PickYourAuth::Http(HttpBasic::new( &user, &opts.http_password.unwrap_or_default(), )) } else { PickYourAuth::None(NoToken) } } Err(e) => { let message = format!("Error loading token file {:?}: {}", &token_file, e); return Err(ClientError::InvalidConfig(message)); } } } else { PickYourAuth::None(NoToken) }; let bindle_client = Client::new(&opts.server_url, token)?; let local = bindle::provider::file::FileProvider::new( bindle_dir, bindle::search::NoopEngine::default(), ) .await; let cache = DumbCache::new(bindle_client.clone(), local); // We don't verify locally yet, but we will need the keyring to do so let _keyring = load_keyring(opts.keyring) .await .unwrap_or_else(|_| KeyRing::default()); match opts.subcmd { SubCommand::Info(info_opts) => { let inv = match info_opts.yanked { true => cache.get_invoice(info_opts.bindle_id), false => cache.get_yanked_invoice(info_opts.bindle_id), } .await .map_err(map_storage_error)?; match info_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&inv)?) .await? } Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await?, } } SubCommand::GetInvoice(gi_opts) => { let inv = match gi_opts.yanked { true => cache.get_invoice(&gi_opts.bindle_id), false => cache.get_yanked_invoice(&gi_opts.bindle_id), } .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&gi_opts.output) .await?; file.write_all(&toml::to_vec(&inv)?).await?; file.flush().await?; println!( "Wrote invoice {} to {}", gi_opts.bindle_id, gi_opts.output.display() ); } SubCommand::GetParcel(gp_opts) => get_parcel(cache, gp_opts).await?, SubCommand::Yank(yank_opts) => { bindle_client.yank_invoice(&yank_opts.bindle_id).await?; println!("Bindle {} yanked", yank_opts.bindle_id); } SubCommand::Search(search_opts) => { // TODO: Do we want to use the cache for searching? let matches = bindle_client .query_invoices(search_opts.clone().into()) .await?; match search_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout() .write_all(&toml::to_vec(&matches)?) .await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&matches)?) .await? } Some(format) if &format == "table" => tablify(&matches), Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tablify(&matches), } } SubCommand::Get(get_opts) => get_all(cache, get_opts).await?, SubCommand::Push(push_opts) => push_all(bindle_client, push_opts).await?, SubCommand::PushInvoice(push_opts) => { let resp = bindle_client .create_invoice_from_file(push_opts.path) .await?; println!("Invoice {} created", resp.invoice.bindle.id); } SubCommand::SignInvoice(sign_opts) => { // Role let role = if let Some(r) = sign_opts.role { role_from_name(r)? } else { SignatureRole::Creator }; // Keyfile let keyfile = match sign_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; // Signing key let key = first_matching_key(keyfile, &role).await?; // Load the invoice and sign it. let mut inv: Invoice = bindle::client::load::toml(sign_opts.invoice.as_str()).await?; inv.sign(role.clone(), &key)?; // Write the signed invoice to a file. let outfile = sign_opts .destination .unwrap_or_else(|| format!("./invoice-{}.toml", inv.canonical_name())); println!( "Signed as {} with role {} and wrote to {}", sign_opts.invoice, role, outfile ); tokio::fs::write(outfile, toml::to_string(&inv)?).await?; } SubCommand::PushFile(push_opts) => { let label = generate_label(&push_opts.path, push_opts.name, push_opts.media_type).await?; println!("Uploading file {} to server", push_opts.path.display()); bindle_client .create_parcel_from_file(push_opts.bindle_id, &label.sha256, push_opts.path) .await?; println!("File successfully uploaded"); } SubCommand::GenerateLabel(generate_opts) => { let label = generate_label( generate_opts.path, generate_opts.name, generate_opts.media_type, ) .await?; println!("{}", toml::to_string_pretty(&label)?); } SubCommand::PrintKey(print_key_opts) => { let dir = match print_key_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; let keyfile = SecretKeyFile::load_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let matches: Vec<KeyEntry> = match print_key_opts.label { Some(name) => keyfile .key .iter() .filter_map(|k| { if !k.label.contains(&name) { return None; } match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), } }) .collect(), None => keyfile .key .iter() .filter_map(|k| match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), }) .collect(), }; let keyring = KeyRing::new(matches); let out = toml::to_string(&keyring).map_err(|e| ClientError::Other(e.to_string()))?; println!("{}", out); } SubCommand::CreateKey(create_opts) => { let dir = match create_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; println!("Writing keys to {}", dir.display()); match tokio::fs::metadata(&dir).await { Err(e) if matches!(e.kind(), std::io::ErrorKind::NotFound) => { println!("File {} does not exist. Creating it.", dir.display()); let mut keyfile = SecretKeyFile::default(); let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Ok(info) => { if !info.is_file() { eprint!("Path must point to a file."); return Err(ClientError::Other( "Keyfile cannot be directory or symlink".to_owned(), )); } let mut keyfile = SecretKeyFile::load_file(&dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Err(e) => return Err(e.into()), } } SubCommand::Login(_login_opts) => { // TODO: We'll use login opts when we enable additional login providers OidcToken::login(&opts.server_url, token_file).await?; println!("Login successful"); } } Ok(()) } async fn generate_label( file_path: impl AsRef<Path>, name: Option<String>, media_type: Option<String>, ) -> Result<bindle::Label> { let path = file_path.as_ref().to_owned(); let mut file = tokio::fs::File::open(&path).await?; let media_type = media_type.unwrap_or_else(|| { mime_guess::from_path(&path) .first_or_octet_stream() .to_string() }); info!("Using media type {}", media_type); // Note: Should be able to unwrap here because the file opening step would have // failed in conditions where this returns `None` let name = name.unwrap_or_else(|| path.file_name().unwrap().to_string_lossy().to_string()); info!("Using name {}", name); let size = file.metadata().await?.len(); let mut sha = bindle::async_util::AsyncSha256::new(); tokio::io::copy(&mut file, &mut sha).await?; let result = sha.into_inner().expect("data lock error").finalize(); Ok(bindle::Label { sha256: format!("{:x}", result), media_type, size, name, annotations: None, // TODO: allow annotations from command line ..bindle::Label::default() }) } async fn get_parcel<C: Cache + Send + Sync + Clone>(cache: C, opts: GetParcel) -> Result<()> { let parcel = cache .get_parcel(opts.bindle_id, &opts.sha) .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&opts.output) .await?; tokio::io::copy( &mut StreamReader::new( parcel.map(|res| res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))), ), &mut file, ) .await?; println!("Wrote parcel {} to {}", opts.sha, opts.output.display()); Ok(()) } async fn push_all<T: TokenManager + Send + Sync + Clone + 'static>( client: Client<T>, opts: Push, ) -> Result<()> { let standalone = StandaloneRead::new(opts.path, &opts.bindle_id).await?; standalone.push(&client).await?; println!("Pushed bindle {}", opts.bindle_id); Ok(()) } async fn get_all<C: Cache + Send + Sync + Clone>(cache: C, opts: Get) -> Result<()> { let inv = match opts.yanked { true => cache.get_invoice(opts.bindle_id), false => cache.get_yanked_invoice(opts.bindle_id), } .await .map_err(map_storage_error)?; println!("Fetched invoice. Starting fetch of parcels"); let parcels = Arc::new(Mutex::new(std::collections::HashMap::new())); let zero_vec = Vec::with_capacity(0); let is_export = opts.export.is_some(); let parcel_fetch = inv .parcel .as_ref() .unwrap_or(&zero_vec) .iter() .map(|p| { ( p.label.sha256.clone(), inv.bindle.id.clone(), cache.clone(), parcels.clone(), ) }) .map(|(sha, bindle_id, c, parcels)| async move { match c.get_parcel(bindle_id, &sha).await { Ok(p) => { println!("Fetched parcel {}", sha); if is_export { parcels.lock().await.insert( sha, StreamReader::new(p.map(|res| { res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) })), ); } } Err(e) => { match e { ProviderError::NotFound => warn!("Parcel {} does not exist", sha), ProviderError::ProxyError(err) if matches!(err, ClientError::ParcelNotFound) => { warn!("Parcel {} does not exist", sha) } // Only return an error if it isn't a not found error. By design, an invoice // can contain parcels that don't yet exist ProviderError::ProxyError(inner) => return Err(inner), _ => { return Err(ClientError::Other(format!( "Unable to get parcel {}: {:?}", sha, e ))) } } } } Ok(()) }); futures::future::join_all(parcel_fetch) .await .into_iter() .collect::<Result<Vec<_>>>()?; if let Some(p) = opts.export { let standalone = StandaloneWrite::new(p, &inv.bindle.id)?; standalone .write( inv, // All locks should be done at this point (as all futures exited), so panicing feels // right here as it is an unrecoverable condition Arc::try_unwrap(parcels) .map_err(|_| ClientError::Other("Unexpected lock error".to_string())) .unwrap() .into_inner(), ) .await?; } Ok(()) } async fn load_keyring(keyring: Option<PathBuf>) -> anyhow::Result<KeyRing> { // This takes an Option<PathBuf> because we want to wrap all of the flag handling in this // function, including setting the default if the kyering is None. let dir = keyring .unwrap_or_else(default_config_dir) .join("keyring.toml"); let kr = bindle::client::load::toml(dir).await?; Ok(kr) } fn map_storage_error(e: ProviderError) -> ClientError { match e { ProviderError::Io(e) => ClientError::Io(e), ProviderError::ProxyError(inner) => inner, ProviderError::InvalidId(parse_err) => ClientError::InvalidId(parse_err), _ => ClientError::Other(format!("{}", e)), } } fn default_config_dir() -> PathBuf { dirs::config_dir() .map(|v| v.join("bindle/")) .unwrap_or_else(|| "./bindle".into()) } /// Get the config dir, ensuring that it exists. /// /// This will return the default config directory. If that directory does not /// exist, it will be created before the path is returned. /// /// If the system does not have a configuration directory, this will create a directory named /// `bindle/` in the local working directory. /// /// This will return an error async fn ensure_config_dir() -> Result<PathBuf> { let dir = default_config_dir(); tokio::fs::create_dir_all(&dir).await?; Ok(dir) } fn role_from_name(name: String) -> Result<SignatureRole>
async fn first_matching_key(fpath: PathBuf, role: &SignatureRole) -> Result<SecretKeyEntry> { let keys = SecretKeyFile::load_file(&fpath).await.map_err(|e| { ClientError::Other(format!( "Error loading file {}: {}", fpath.display(), e.to_string() )) })?; keys.get_first_matching(role) .map(|k| k.to_owned()) .ok_or_else(|| ClientError::Other("No satisfactory key found".to_owned())) } fn tablify(matches: &bindle::search::Matches) { let last = matches.offset + matches.invoices.len() as u64; let trailer = if matches.more { format!(" - More results are available with --offset={}", last) } else { "".to_owned() }; for i in matches.invoices.iter() { println!( "{}:\t{}", &i.bindle.id, &i.bindle .description .clone() .unwrap_or_else(|| "[no description available]".to_string()) ) } if matches.total > 0 { println!( "=== Showing results {} to {} of {} (limit: {}){}", matches.offset + 1, last, matches.total, matches.limit, trailer, ); } else { println!("No matching bindles were found"); } }
{ match name.as_str() { "c" | "creator" => Ok(SignatureRole::Creator), "h" | "host" => Ok(SignatureRole::Host), "a" | "approver" => Ok(SignatureRole::Approver), "p" | "proxy" => Ok(SignatureRole::Proxy), _ => Err(ClientError::Other("Unknown role".to_owned())), } }
identifier_body
main.rs
use std::convert::TryInto; use std::path::{Path, PathBuf}; use std::sync::Arc; use bindle::client::{ tokens::{HttpBasic, NoToken, OidcToken, TokenManager}, Client, ClientError, Result, }; use bindle::invoice::signature::{ KeyRing, SecretKeyEntry, SecretKeyFile, SecretKeyStorage, SignatureRole, }; use bindle::invoice::Invoice; use bindle::provider::ProviderError; use bindle::signature::KeyEntry; use bindle::standalone::{StandaloneRead, StandaloneWrite}; use bindle::{ cache::{Cache, DumbCache}, provider::Provider, }; use clap::Clap; use sha2::Digest; use tokio::io::AsyncWriteExt; use tokio::sync::Mutex; use tokio_stream::StreamExt; use tokio_util::io::StreamReader; use tracing::log::{info, warn}; mod opts; use opts::*; #[tokio::main] async fn main() { // Trap and format error messages using the proper value if let Err(e) = run().await.map_err(anyhow::Error::new) { eprintln!("{}", e); for (i, cause) in e.chain().enumerate() { // Skip the first message because it is printed above. if i > 0 { if i == 1 { eprintln!("\nError trace:"); } eprintln!("\t{}: {}", i, cause); } } std::process::exit(1); } } /// An internal token type so we dynamically choose between various token types. We can't box dynner /// them for some reason, so we need to basically do our own fake dynamic dispatch. If this could be /// useful for other consumers of the bindle crate, we could add this in the future #[derive(Clone)] enum PickYourAuth { None(NoToken), Http(HttpBasic), Oidc(OidcToken), } #[async_trait::async_trait] impl TokenManager for PickYourAuth { async fn apply_auth_header( &self, builder: reqwest::RequestBuilder, ) -> Result<reqwest::RequestBuilder> { match &self { PickYourAuth::None(nt) => nt.apply_auth_header(builder).await, PickYourAuth::Http(h) => h.apply_auth_header(builder).await, PickYourAuth::Oidc(oidc) => oidc.apply_auth_header(builder).await, } } } async fn run() -> std::result::Result<(), ClientError> { let opts = opts::Opts::parse(); // TODO: Allow log level setting outside of RUST_LOG (this is easier with this subscriber) tracing_subscriber::fmt::init(); let bindle_dir = opts.bindle_dir.unwrap_or_else(|| { dirs::cache_dir() .expect("Unable to infer cache directory") .join("bindle") }); tokio::fs::create_dir_all(&bindle_dir).await?; let token_file = opts.token_file.unwrap_or_else(|| { dirs::config_dir() .expect("Unable to infer cache directory") .join("bindle/.token") }); // Theoretically, someone could create the token file at /, which would mean this function // wouldn't return anything. This isn't an error, so do not attempt to create the directory if // so if let Some(p) = token_file.parent() { tokio::fs::create_dir_all(p).await?; } let token = if !matches!(opts.subcmd, SubCommand::Login(_)) { match OidcToken::new_from_file(&token_file).await { Ok(t) => { tracing::debug!("Found and loaded token file"); PickYourAuth::Oidc(t) } // Token doesn't exist on disk, so assume they don't want token auth Err(ClientError::Io(e)) if matches!(e.kind(), std::io::ErrorKind::NotFound) => { tracing::debug!("No token file located, no token authentication will be used"); // If we have basic auth set, and no token was found, use that if let Some(user) = opts.http_user { PickYourAuth::Http(HttpBasic::new( &user, &opts.http_password.unwrap_or_default(), )) } else { PickYourAuth::None(NoToken) } } Err(e) => { let message = format!("Error loading token file {:?}: {}", &token_file, e); return Err(ClientError::InvalidConfig(message)); } } } else { PickYourAuth::None(NoToken) }; let bindle_client = Client::new(&opts.server_url, token)?; let local = bindle::provider::file::FileProvider::new( bindle_dir, bindle::search::NoopEngine::default(), ) .await; let cache = DumbCache::new(bindle_client.clone(), local); // We don't verify locally yet, but we will need the keyring to do so let _keyring = load_keyring(opts.keyring) .await .unwrap_or_else(|_| KeyRing::default()); match opts.subcmd { SubCommand::Info(info_opts) => { let inv = match info_opts.yanked { true => cache.get_invoice(info_opts.bindle_id), false => cache.get_yanked_invoice(info_opts.bindle_id), } .await .map_err(map_storage_error)?; match info_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&inv)?) .await? } Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await?, } } SubCommand::GetInvoice(gi_opts) => { let inv = match gi_opts.yanked { true => cache.get_invoice(&gi_opts.bindle_id), false => cache.get_yanked_invoice(&gi_opts.bindle_id), } .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&gi_opts.output) .await?; file.write_all(&toml::to_vec(&inv)?).await?; file.flush().await?; println!( "Wrote invoice {} to {}", gi_opts.bindle_id, gi_opts.output.display() ); } SubCommand::GetParcel(gp_opts) => get_parcel(cache, gp_opts).await?, SubCommand::Yank(yank_opts) => { bindle_client.yank_invoice(&yank_opts.bindle_id).await?; println!("Bindle {} yanked", yank_opts.bindle_id); } SubCommand::Search(search_opts) => { // TODO: Do we want to use the cache for searching? let matches = bindle_client .query_invoices(search_opts.clone().into()) .await?; match search_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout() .write_all(&toml::to_vec(&matches)?) .await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&matches)?) .await? } Some(format) if &format == "table" => tablify(&matches), Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tablify(&matches), } } SubCommand::Get(get_opts) => get_all(cache, get_opts).await?, SubCommand::Push(push_opts) => push_all(bindle_client, push_opts).await?, SubCommand::PushInvoice(push_opts) => { let resp = bindle_client .create_invoice_from_file(push_opts.path) .await?; println!("Invoice {} created", resp.invoice.bindle.id); } SubCommand::SignInvoice(sign_opts) => { // Role let role = if let Some(r) = sign_opts.role { role_from_name(r)? } else { SignatureRole::Creator }; // Keyfile let keyfile = match sign_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; // Signing key let key = first_matching_key(keyfile, &role).await?; // Load the invoice and sign it. let mut inv: Invoice = bindle::client::load::toml(sign_opts.invoice.as_str()).await?; inv.sign(role.clone(), &key)?; // Write the signed invoice to a file. let outfile = sign_opts .destination .unwrap_or_else(|| format!("./invoice-{}.toml", inv.canonical_name())); println!( "Signed as {} with role {} and wrote to {}", sign_opts.invoice, role, outfile ); tokio::fs::write(outfile, toml::to_string(&inv)?).await?; } SubCommand::PushFile(push_opts) => { let label = generate_label(&push_opts.path, push_opts.name, push_opts.media_type).await?; println!("Uploading file {} to server", push_opts.path.display()); bindle_client .create_parcel_from_file(push_opts.bindle_id, &label.sha256, push_opts.path) .await?; println!("File successfully uploaded"); } SubCommand::GenerateLabel(generate_opts) => { let label = generate_label( generate_opts.path, generate_opts.name, generate_opts.media_type, ) .await?; println!("{}", toml::to_string_pretty(&label)?); } SubCommand::PrintKey(print_key_opts) => { let dir = match print_key_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; let keyfile = SecretKeyFile::load_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let matches: Vec<KeyEntry> = match print_key_opts.label { Some(name) => keyfile .key .iter() .filter_map(|k| { if !k.label.contains(&name) { return None; } match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), } }) .collect(), None => keyfile .key .iter() .filter_map(|k| match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), }) .collect(), }; let keyring = KeyRing::new(matches); let out = toml::to_string(&keyring).map_err(|e| ClientError::Other(e.to_string()))?; println!("{}", out); } SubCommand::CreateKey(create_opts) => { let dir = match create_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; println!("Writing keys to {}", dir.display()); match tokio::fs::metadata(&dir).await { Err(e) if matches!(e.kind(), std::io::ErrorKind::NotFound) => { println!("File {} does not exist. Creating it.", dir.display()); let mut keyfile = SecretKeyFile::default(); let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Ok(info) => { if !info.is_file() { eprint!("Path must point to a file."); return Err(ClientError::Other( "Keyfile cannot be directory or symlink".to_owned(), )); } let mut keyfile = SecretKeyFile::load_file(&dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Err(e) => return Err(e.into()), } } SubCommand::Login(_login_opts) => { // TODO: We'll use login opts when we enable additional login providers OidcToken::login(&opts.server_url, token_file).await?; println!("Login successful"); } } Ok(()) } async fn generate_label( file_path: impl AsRef<Path>, name: Option<String>, media_type: Option<String>, ) -> Result<bindle::Label> { let path = file_path.as_ref().to_owned(); let mut file = tokio::fs::File::open(&path).await?; let media_type = media_type.unwrap_or_else(|| { mime_guess::from_path(&path) .first_or_octet_stream() .to_string() }); info!("Using media type {}", media_type); // Note: Should be able to unwrap here because the file opening step would have // failed in conditions where this returns `None` let name = name.unwrap_or_else(|| path.file_name().unwrap().to_string_lossy().to_string()); info!("Using name {}", name); let size = file.metadata().await?.len(); let mut sha = bindle::async_util::AsyncSha256::new(); tokio::io::copy(&mut file, &mut sha).await?; let result = sha.into_inner().expect("data lock error").finalize(); Ok(bindle::Label { sha256: format!("{:x}", result), media_type, size, name, annotations: None, // TODO: allow annotations from command line ..bindle::Label::default() }) } async fn get_parcel<C: Cache + Send + Sync + Clone>(cache: C, opts: GetParcel) -> Result<()> { let parcel = cache .get_parcel(opts.bindle_id, &opts.sha) .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&opts.output) .await?; tokio::io::copy( &mut StreamReader::new( parcel.map(|res| res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))), ), &mut file, ) .await?; println!("Wrote parcel {} to {}", opts.sha, opts.output.display()); Ok(()) } async fn push_all<T: TokenManager + Send + Sync + Clone + 'static>( client: Client<T>, opts: Push, ) -> Result<()> { let standalone = StandaloneRead::new(opts.path, &opts.bindle_id).await?; standalone.push(&client).await?; println!("Pushed bindle {}", opts.bindle_id); Ok(()) } async fn get_all<C: Cache + Send + Sync + Clone>(cache: C, opts: Get) -> Result<()> { let inv = match opts.yanked { true => cache.get_invoice(opts.bindle_id), false => cache.get_yanked_invoice(opts.bindle_id), } .await .map_err(map_storage_error)?; println!("Fetched invoice. Starting fetch of parcels"); let parcels = Arc::new(Mutex::new(std::collections::HashMap::new())); let zero_vec = Vec::with_capacity(0); let is_export = opts.export.is_some(); let parcel_fetch = inv .parcel .as_ref() .unwrap_or(&zero_vec) .iter() .map(|p| { ( p.label.sha256.clone(), inv.bindle.id.clone(), cache.clone(), parcels.clone(), ) }) .map(|(sha, bindle_id, c, parcels)| async move { match c.get_parcel(bindle_id, &sha).await { Ok(p) => { println!("Fetched parcel {}", sha); if is_export { parcels.lock().await.insert( sha, StreamReader::new(p.map(|res| { res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) })), ); } } Err(e) => { match e { ProviderError::NotFound => warn!("Parcel {} does not exist", sha), ProviderError::ProxyError(err) if matches!(err, ClientError::ParcelNotFound) => { warn!("Parcel {} does not exist", sha) } // Only return an error if it isn't a not found error. By design, an invoice // can contain parcels that don't yet exist ProviderError::ProxyError(inner) => return Err(inner), _ => { return Err(ClientError::Other(format!( "Unable to get parcel {}: {:?}", sha, e ))) } } } } Ok(()) }); futures::future::join_all(parcel_fetch) .await .into_iter() .collect::<Result<Vec<_>>>()?; if let Some(p) = opts.export { let standalone = StandaloneWrite::new(p, &inv.bindle.id)?; standalone .write( inv, // All locks should be done at this point (as all futures exited), so panicing feels // right here as it is an unrecoverable condition Arc::try_unwrap(parcels) .map_err(|_| ClientError::Other("Unexpected lock error".to_string())) .unwrap() .into_inner(), ) .await?; } Ok(()) } async fn
(keyring: Option<PathBuf>) -> anyhow::Result<KeyRing> { // This takes an Option<PathBuf> because we want to wrap all of the flag handling in this // function, including setting the default if the kyering is None. let dir = keyring .unwrap_or_else(default_config_dir) .join("keyring.toml"); let kr = bindle::client::load::toml(dir).await?; Ok(kr) } fn map_storage_error(e: ProviderError) -> ClientError { match e { ProviderError::Io(e) => ClientError::Io(e), ProviderError::ProxyError(inner) => inner, ProviderError::InvalidId(parse_err) => ClientError::InvalidId(parse_err), _ => ClientError::Other(format!("{}", e)), } } fn default_config_dir() -> PathBuf { dirs::config_dir() .map(|v| v.join("bindle/")) .unwrap_or_else(|| "./bindle".into()) } /// Get the config dir, ensuring that it exists. /// /// This will return the default config directory. If that directory does not /// exist, it will be created before the path is returned. /// /// If the system does not have a configuration directory, this will create a directory named /// `bindle/` in the local working directory. /// /// This will return an error async fn ensure_config_dir() -> Result<PathBuf> { let dir = default_config_dir(); tokio::fs::create_dir_all(&dir).await?; Ok(dir) } fn role_from_name(name: String) -> Result<SignatureRole> { match name.as_str() { "c" | "creator" => Ok(SignatureRole::Creator), "h" | "host" => Ok(SignatureRole::Host), "a" | "approver" => Ok(SignatureRole::Approver), "p" | "proxy" => Ok(SignatureRole::Proxy), _ => Err(ClientError::Other("Unknown role".to_owned())), } } async fn first_matching_key(fpath: PathBuf, role: &SignatureRole) -> Result<SecretKeyEntry> { let keys = SecretKeyFile::load_file(&fpath).await.map_err(|e| { ClientError::Other(format!( "Error loading file {}: {}", fpath.display(), e.to_string() )) })?; keys.get_first_matching(role) .map(|k| k.to_owned()) .ok_or_else(|| ClientError::Other("No satisfactory key found".to_owned())) } fn tablify(matches: &bindle::search::Matches) { let last = matches.offset + matches.invoices.len() as u64; let trailer = if matches.more { format!(" - More results are available with --offset={}", last) } else { "".to_owned() }; for i in matches.invoices.iter() { println!( "{}:\t{}", &i.bindle.id, &i.bindle .description .clone() .unwrap_or_else(|| "[no description available]".to_string()) ) } if matches.total > 0 { println!( "=== Showing results {} to {} of {} (limit: {}){}", matches.offset + 1, last, matches.total, matches.limit, trailer, ); } else { println!("No matching bindles were found"); } }
load_keyring
identifier_name
fsevents.rs
#![allow(non_camel_case_types, non_uppercase_statics)] // C types use std::collections::{HashSet}; use std::c_str::CString; use std::io::{IoError, IoResult}; use std::io::fs::PathExtensions; use std::mem; use std::ptr; use std::raw::Slice; use std::os; use std::io::{Timer}; use std::time::Duration; //use super; use libc::{c_void, c_char, c_int, ENOENT}; use sync::{Arc, Mutex}; #[repr(C)] enum CFStringBuiltInEncodings { kCFStringEncodingUnicode = 0x01000000, kCFStringEncodingUTF8 = 0x08000100, } static kFSEventStreamCreateFlagNoDefer: u32 = 0x00000002; static kFSEventStreamCreateFlagFileEvents: u32 = 0x00000010; #[deriving(Show)] enum Event { Create(String), Remove(String), //ModifyMeta, Modify(String), RenameOld(String), RenameNew(String), } enum Control { Update(HashSet<String>), Exit, } #[repr(C)] struct FSEventStreamContext { version: c_int, info: *mut c_void, retain: *const c_void, release: *const c_void, desc: *const c_void, } type callback_t = extern "C" fn( stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64 ); #[repr(C)] enum FSEventStreamEventFlags { //kFSEventStreamEventFlagNone = 0x00000000, //kFSEventStreamEventFlagMustScanSubDirs = 0x00000001, //kFSEventStreamEventFlagUserDropped = 0x00000002, //kFSEventStreamEventFlagKernelDropped = 0x00000004, //kFSEventStreamEventFlagEventIdsWrapped = 0x00000008, //kFSEventStreamEventFlagHistoryDone = 0x00000010, //kFSEventStreamEventFlagRootChanged = 0x00000020, //kFSEventStreamEventFlagMount = 0x00000040, //kFSEventStreamEventFlagUnmount = 0x00000080, kFSEventStreamEventFlagItemCreated = 0x00000100, kFSEventStreamEventFlagItemRemoved = 0x00000200, //kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400, kFSEventStreamEventFlagItemRenamed = 0x00000800, //kFSEventStreamEventFlagItemModified = 0x00001000, //kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000, //kFSEventStreamEventFlagItemChangeOwner = 0x00004000, //kFSEventStreamEventFlagItemXattrMod = 0x00008000, kFSEventStreamEventFlagItemIsFile = 0x00010000, //kFSEventStreamEventFlagItemIsDir = 0x00020000, //kFSEventStreamEventFlagItemIsSymlink = 0x00040000, //kFSEventStreamEventFlagOwnEvent = 0x00080000 } static kFSEventStreamEventIdSinceNow: u64 = 0xFFFFFFFFFFFFFFFF; fn has_flag(event: u32, expected: FSEventStreamEventFlags) -> bool { event & expected as u32 == expected as u32 } extern "C" fn callback(_stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64) { let tx: &mut Sender<Event> = unsafe { &mut *(info as *mut Sender<Event>) };
data: events, len: size as uint, }) }; let ids: &[u64] = unsafe { mem::transmute(Slice { data: ids, len: size as uint, }) }; let paths: &[*const i8] = unsafe { mem::transmute(Slice { data: paths, len: size as uint, }) }; let paths = Vec::from_fn(size as uint, |id| { unsafe { CString::new(paths[id], false) } }); let mut renamed = false; for id in range(0, size as uint) { debug!("Received filesystem event: [id: {}, ev: {}] from '{}'", ids[id], events[id], paths[id]); let event = events[id]; let path = String::from_str(paths[id].as_str().unwrap()); if event & kFSEventStreamEventFlagItemIsFile as u32 == 0 { continue; } let path_ = Path::new(path.as_slice()); if has_flag(event, kFSEventStreamEventFlagItemCreated) && path_.exists() { tx.send(Create(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRemoved) && !path_.exists() { tx.send(Remove(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRenamed) { if renamed { tx.send(RenameOld(path)); } else { tx.send(RenameNew(path)); } renamed = !renamed; } } } struct CoreFoundationString { d: *const c_void, } impl CoreFoundationString { fn new(string: &str) -> CoreFoundationString { CoreFoundationString { d: unsafe { CFStringCreateWithCString( kCFAllocatorDefault, string.to_c_str().as_ptr(), kCFStringEncodingUTF8 ) } } } } impl Drop for CoreFoundationString { fn drop(&mut self) { unsafe { CFRelease(self.d) } } } struct CoreFoundationArray { d: *const c_void, items: Vec<CoreFoundationString>, // It's a RAII container. } impl CoreFoundationArray { fn new(collection: &HashSet<String>) -> CoreFoundationArray { let d = unsafe { CFArrayCreateMutable( kCFAllocatorDefault, collection.len() as i32, ptr::null::<c_void>() ) }; let mut items = Vec::new(); for item in collection.iter() { let item = CoreFoundationString::new(item.as_slice()); unsafe { CFArrayAppendValue(d, item.d); } items.push(item); } CoreFoundationArray { d: d, items: items, } } } impl Drop for CoreFoundationArray { fn drop(&mut self) { self.items.clear(); unsafe { CFRelease(self.d) } } } fn recreate_stream(eventloop: *mut c_void, context: *const FSEventStreamContext, paths: HashSet<String>) -> *mut c_void { let paths = CoreFoundationArray::new(&paths); let latency = 0.05f64; let stream = unsafe { FSEventStreamCreate( kCFAllocatorDefault, callback, context, paths.d, kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagFileEvents ) }; unsafe { FSEventStreamScheduleWithRunLoop(stream, eventloop, kCFRunLoopDefaultMode); FSEventStreamStart(stream); stream } } pub struct Watcher { pub rx: Receiver<Event>, ctx: SyncSender<Control>, paths: HashSet<String>, stream: Arc<Mutex<*mut c_void>>, eventloop: Arc<Mutex<*mut c_void>>, } impl Watcher { pub fn new() -> Watcher { let (mut tx, rx) = channel::<Event>(); let (ctx, crx) = sync_channel::<Control>(0); let eventloop = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let stream = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let watcher = Watcher { rx: rx, ctx: ctx, paths: HashSet::new(), stream: stream.clone(), eventloop: eventloop.clone(), }; spawn(proc() { debug!("Starting watcher thread ..."); unsafe { *eventloop.lock() = CFRunLoopGetCurrent(); let tx: *mut c_void = &mut tx as *mut _ as *mut c_void; let context = FSEventStreamContext { version: 0, info: tx, retain: ptr::null::<c_void>(), release: ptr::null::<c_void>(), desc: ptr::null::<c_void>(), }; loop { debug!("New watcher loop iteration"); match crx.recv() { Update(paths) => { debug!("Updating watcher loop with {}", paths); *stream.lock() = recreate_stream(*eventloop.lock(), &context, paths); CFRunLoopRun(); } Exit => { debug!("Received watcher exit event - performing graceful shutdown"); break } } } } }); watcher } pub fn watch(&mut self, path: &Path) -> IoResult<()> { if path.exists() { debug!("Adding '{}' to the watch", path.display()); let path = os::make_absolute(path); let path = match path.as_str() { Some(path) => String::from_str(path), None => return Err(IoError::from_errno(ENOENT as uint, false)) }; self.paths.insert(path.clone()); self.update(); Ok(()) } else { Err(IoError::from_errno(ENOENT as uint, false)) } } pub fn unwatch(&mut self, path: &String) -> IoResult<()> { self.paths.remove(path); self.update(); Ok(()) } fn update(&self) { self.stop_stream(); self.ctx.send(Update(self.paths.clone())); } fn stop_stream(&self) { let mut stream = self.stream.lock(); if !(*stream).is_null() { unsafe { FSEventStreamStop(*stream); FSEventStreamInvalidate(*stream); FSEventStreamRelease(*stream); CFRunLoopWakeUp(*self.eventloop.lock()); } } } } impl Drop for Watcher { fn drop(&mut self) { debug!("dropping! {:p}", self); self.stop_stream(); self.ctx.send(Exit); } } #[link(name = "Carbon", kind = "framework")] #[link(name = "CoreFoundation", kind = "framework")] extern { static kCFAllocatorDefault: *mut c_void; static kCFRunLoopDefaultMode: *mut c_void; fn CFStringCreateWithCString(allocator: *mut c_void, string: *const c_char, encoding: CFStringBuiltInEncodings) -> *const c_void; fn CFArrayCreateMutable(allocator: *mut c_void, size: c_int, callbacks: *const c_void) -> *const c_void; fn CFArrayAppendValue(array: *const c_void, value: *const c_void); fn FSEventStreamCreate(allocator: *mut c_void, cb: callback_t, context: *const FSEventStreamContext, paths: *const c_void, since: u64, latency: f64, flags: u32) -> *mut c_void; fn FSEventStreamScheduleWithRunLoop(stream: *mut c_void, eventloop: *mut c_void, mode: *mut c_void); fn FSEventStreamStart(stream: *mut c_void); fn FSEventStreamStop(stream: *mut c_void); fn FSEventStreamInvalidate(stream: *mut c_void); fn FSEventStreamRelease(stream: *mut c_void); fn CFRunLoopGetCurrent() -> *mut c_void; fn CFRunLoopRun(); fn CFRunLoopWakeUp(ev: *mut c_void); fn CFRelease(p: *const c_void); } pub struct Backend { pub watcher: Watcher, } impl Backend { pub fn new(period: Duration) -> Backend { Backend { watcher: Watcher::new() } } pub fn register(&mut self, paths: HashSet<Path>) { for path in paths.iter() { self.watcher.watch(path); } } pub fn transform(&self, ev: Event) -> super::Event { super::Unknown } }
let events: &[u32] = unsafe { mem::transmute(Slice {
random_line_split
fsevents.rs
#![allow(non_camel_case_types, non_uppercase_statics)] // C types use std::collections::{HashSet}; use std::c_str::CString; use std::io::{IoError, IoResult}; use std::io::fs::PathExtensions; use std::mem; use std::ptr; use std::raw::Slice; use std::os; use std::io::{Timer}; use std::time::Duration; //use super; use libc::{c_void, c_char, c_int, ENOENT}; use sync::{Arc, Mutex}; #[repr(C)] enum CFStringBuiltInEncodings { kCFStringEncodingUnicode = 0x01000000, kCFStringEncodingUTF8 = 0x08000100, } static kFSEventStreamCreateFlagNoDefer: u32 = 0x00000002; static kFSEventStreamCreateFlagFileEvents: u32 = 0x00000010; #[deriving(Show)] enum Event { Create(String), Remove(String), //ModifyMeta, Modify(String), RenameOld(String), RenameNew(String), } enum Control { Update(HashSet<String>), Exit, } #[repr(C)] struct FSEventStreamContext { version: c_int, info: *mut c_void, retain: *const c_void, release: *const c_void, desc: *const c_void, } type callback_t = extern "C" fn( stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64 ); #[repr(C)] enum FSEventStreamEventFlags { //kFSEventStreamEventFlagNone = 0x00000000, //kFSEventStreamEventFlagMustScanSubDirs = 0x00000001, //kFSEventStreamEventFlagUserDropped = 0x00000002, //kFSEventStreamEventFlagKernelDropped = 0x00000004, //kFSEventStreamEventFlagEventIdsWrapped = 0x00000008, //kFSEventStreamEventFlagHistoryDone = 0x00000010, //kFSEventStreamEventFlagRootChanged = 0x00000020, //kFSEventStreamEventFlagMount = 0x00000040, //kFSEventStreamEventFlagUnmount = 0x00000080, kFSEventStreamEventFlagItemCreated = 0x00000100, kFSEventStreamEventFlagItemRemoved = 0x00000200, //kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400, kFSEventStreamEventFlagItemRenamed = 0x00000800, //kFSEventStreamEventFlagItemModified = 0x00001000, //kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000, //kFSEventStreamEventFlagItemChangeOwner = 0x00004000, //kFSEventStreamEventFlagItemXattrMod = 0x00008000, kFSEventStreamEventFlagItemIsFile = 0x00010000, //kFSEventStreamEventFlagItemIsDir = 0x00020000, //kFSEventStreamEventFlagItemIsSymlink = 0x00040000, //kFSEventStreamEventFlagOwnEvent = 0x00080000 } static kFSEventStreamEventIdSinceNow: u64 = 0xFFFFFFFFFFFFFFFF; fn has_flag(event: u32, expected: FSEventStreamEventFlags) -> bool { event & expected as u32 == expected as u32 } extern "C" fn callback(_stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64) { let tx: &mut Sender<Event> = unsafe { &mut *(info as *mut Sender<Event>) }; let events: &[u32] = unsafe { mem::transmute(Slice { data: events, len: size as uint, }) }; let ids: &[u64] = unsafe { mem::transmute(Slice { data: ids, len: size as uint, }) }; let paths: &[*const i8] = unsafe { mem::transmute(Slice { data: paths, len: size as uint, }) }; let paths = Vec::from_fn(size as uint, |id| { unsafe { CString::new(paths[id], false) } }); let mut renamed = false; for id in range(0, size as uint) { debug!("Received filesystem event: [id: {}, ev: {}] from '{}'", ids[id], events[id], paths[id]); let event = events[id]; let path = String::from_str(paths[id].as_str().unwrap()); if event & kFSEventStreamEventFlagItemIsFile as u32 == 0 { continue; } let path_ = Path::new(path.as_slice()); if has_flag(event, kFSEventStreamEventFlagItemCreated) && path_.exists() { tx.send(Create(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRemoved) && !path_.exists() { tx.send(Remove(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRenamed) { if renamed { tx.send(RenameOld(path)); } else { tx.send(RenameNew(path)); } renamed = !renamed; } } } struct CoreFoundationString { d: *const c_void, } impl CoreFoundationString { fn new(string: &str) -> CoreFoundationString { CoreFoundationString { d: unsafe { CFStringCreateWithCString( kCFAllocatorDefault, string.to_c_str().as_ptr(), kCFStringEncodingUTF8 ) } } } } impl Drop for CoreFoundationString { fn drop(&mut self) { unsafe { CFRelease(self.d) } } } struct CoreFoundationArray { d: *const c_void, items: Vec<CoreFoundationString>, // It's a RAII container. } impl CoreFoundationArray { fn new(collection: &HashSet<String>) -> CoreFoundationArray { let d = unsafe { CFArrayCreateMutable( kCFAllocatorDefault, collection.len() as i32, ptr::null::<c_void>() ) }; let mut items = Vec::new(); for item in collection.iter() { let item = CoreFoundationString::new(item.as_slice()); unsafe { CFArrayAppendValue(d, item.d); } items.push(item); } CoreFoundationArray { d: d, items: items, } } } impl Drop for CoreFoundationArray { fn drop(&mut self) { self.items.clear(); unsafe { CFRelease(self.d) } } } fn recreate_stream(eventloop: *mut c_void, context: *const FSEventStreamContext, paths: HashSet<String>) -> *mut c_void { let paths = CoreFoundationArray::new(&paths); let latency = 0.05f64; let stream = unsafe { FSEventStreamCreate( kCFAllocatorDefault, callback, context, paths.d, kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagFileEvents ) }; unsafe { FSEventStreamScheduleWithRunLoop(stream, eventloop, kCFRunLoopDefaultMode); FSEventStreamStart(stream); stream } } pub struct Watcher { pub rx: Receiver<Event>, ctx: SyncSender<Control>, paths: HashSet<String>, stream: Arc<Mutex<*mut c_void>>, eventloop: Arc<Mutex<*mut c_void>>, } impl Watcher { pub fn new() -> Watcher { let (mut tx, rx) = channel::<Event>(); let (ctx, crx) = sync_channel::<Control>(0); let eventloop = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let stream = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let watcher = Watcher { rx: rx, ctx: ctx, paths: HashSet::new(), stream: stream.clone(), eventloop: eventloop.clone(), }; spawn(proc() { debug!("Starting watcher thread ..."); unsafe { *eventloop.lock() = CFRunLoopGetCurrent(); let tx: *mut c_void = &mut tx as *mut _ as *mut c_void; let context = FSEventStreamContext { version: 0, info: tx, retain: ptr::null::<c_void>(), release: ptr::null::<c_void>(), desc: ptr::null::<c_void>(), }; loop { debug!("New watcher loop iteration"); match crx.recv() { Update(paths) =>
Exit => { debug!("Received watcher exit event - performing graceful shutdown"); break } } } } }); watcher } pub fn watch(&mut self, path: &Path) -> IoResult<()> { if path.exists() { debug!("Adding '{}' to the watch", path.display()); let path = os::make_absolute(path); let path = match path.as_str() { Some(path) => String::from_str(path), None => return Err(IoError::from_errno(ENOENT as uint, false)) }; self.paths.insert(path.clone()); self.update(); Ok(()) } else { Err(IoError::from_errno(ENOENT as uint, false)) } } pub fn unwatch(&mut self, path: &String) -> IoResult<()> { self.paths.remove(path); self.update(); Ok(()) } fn update(&self) { self.stop_stream(); self.ctx.send(Update(self.paths.clone())); } fn stop_stream(&self) { let mut stream = self.stream.lock(); if !(*stream).is_null() { unsafe { FSEventStreamStop(*stream); FSEventStreamInvalidate(*stream); FSEventStreamRelease(*stream); CFRunLoopWakeUp(*self.eventloop.lock()); } } } } impl Drop for Watcher { fn drop(&mut self) { debug!("dropping! {:p}", self); self.stop_stream(); self.ctx.send(Exit); } } #[link(name = "Carbon", kind = "framework")] #[link(name = "CoreFoundation", kind = "framework")] extern { static kCFAllocatorDefault: *mut c_void; static kCFRunLoopDefaultMode: *mut c_void; fn CFStringCreateWithCString(allocator: *mut c_void, string: *const c_char, encoding: CFStringBuiltInEncodings) -> *const c_void; fn CFArrayCreateMutable(allocator: *mut c_void, size: c_int, callbacks: *const c_void) -> *const c_void; fn CFArrayAppendValue(array: *const c_void, value: *const c_void); fn FSEventStreamCreate(allocator: *mut c_void, cb: callback_t, context: *const FSEventStreamContext, paths: *const c_void, since: u64, latency: f64, flags: u32) -> *mut c_void; fn FSEventStreamScheduleWithRunLoop(stream: *mut c_void, eventloop: *mut c_void, mode: *mut c_void); fn FSEventStreamStart(stream: *mut c_void); fn FSEventStreamStop(stream: *mut c_void); fn FSEventStreamInvalidate(stream: *mut c_void); fn FSEventStreamRelease(stream: *mut c_void); fn CFRunLoopGetCurrent() -> *mut c_void; fn CFRunLoopRun(); fn CFRunLoopWakeUp(ev: *mut c_void); fn CFRelease(p: *const c_void); } pub struct Backend { pub watcher: Watcher, } impl Backend { pub fn new(period: Duration) -> Backend { Backend { watcher: Watcher::new() } } pub fn register(&mut self, paths: HashSet<Path>) { for path in paths.iter() { self.watcher.watch(path); } } pub fn transform(&self, ev: Event) -> super::Event { super::Unknown } }
{ debug!("Updating watcher loop with {}", paths); *stream.lock() = recreate_stream(*eventloop.lock(), &context, paths); CFRunLoopRun(); }
conditional_block
fsevents.rs
#![allow(non_camel_case_types, non_uppercase_statics)] // C types use std::collections::{HashSet}; use std::c_str::CString; use std::io::{IoError, IoResult}; use std::io::fs::PathExtensions; use std::mem; use std::ptr; use std::raw::Slice; use std::os; use std::io::{Timer}; use std::time::Duration; //use super; use libc::{c_void, c_char, c_int, ENOENT}; use sync::{Arc, Mutex}; #[repr(C)] enum CFStringBuiltInEncodings { kCFStringEncodingUnicode = 0x01000000, kCFStringEncodingUTF8 = 0x08000100, } static kFSEventStreamCreateFlagNoDefer: u32 = 0x00000002; static kFSEventStreamCreateFlagFileEvents: u32 = 0x00000010; #[deriving(Show)] enum
{ Create(String), Remove(String), //ModifyMeta, Modify(String), RenameOld(String), RenameNew(String), } enum Control { Update(HashSet<String>), Exit, } #[repr(C)] struct FSEventStreamContext { version: c_int, info: *mut c_void, retain: *const c_void, release: *const c_void, desc: *const c_void, } type callback_t = extern "C" fn( stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64 ); #[repr(C)] enum FSEventStreamEventFlags { //kFSEventStreamEventFlagNone = 0x00000000, //kFSEventStreamEventFlagMustScanSubDirs = 0x00000001, //kFSEventStreamEventFlagUserDropped = 0x00000002, //kFSEventStreamEventFlagKernelDropped = 0x00000004, //kFSEventStreamEventFlagEventIdsWrapped = 0x00000008, //kFSEventStreamEventFlagHistoryDone = 0x00000010, //kFSEventStreamEventFlagRootChanged = 0x00000020, //kFSEventStreamEventFlagMount = 0x00000040, //kFSEventStreamEventFlagUnmount = 0x00000080, kFSEventStreamEventFlagItemCreated = 0x00000100, kFSEventStreamEventFlagItemRemoved = 0x00000200, //kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400, kFSEventStreamEventFlagItemRenamed = 0x00000800, //kFSEventStreamEventFlagItemModified = 0x00001000, //kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000, //kFSEventStreamEventFlagItemChangeOwner = 0x00004000, //kFSEventStreamEventFlagItemXattrMod = 0x00008000, kFSEventStreamEventFlagItemIsFile = 0x00010000, //kFSEventStreamEventFlagItemIsDir = 0x00020000, //kFSEventStreamEventFlagItemIsSymlink = 0x00040000, //kFSEventStreamEventFlagOwnEvent = 0x00080000 } static kFSEventStreamEventIdSinceNow: u64 = 0xFFFFFFFFFFFFFFFF; fn has_flag(event: u32, expected: FSEventStreamEventFlags) -> bool { event & expected as u32 == expected as u32 } extern "C" fn callback(_stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64) { let tx: &mut Sender<Event> = unsafe { &mut *(info as *mut Sender<Event>) }; let events: &[u32] = unsafe { mem::transmute(Slice { data: events, len: size as uint, }) }; let ids: &[u64] = unsafe { mem::transmute(Slice { data: ids, len: size as uint, }) }; let paths: &[*const i8] = unsafe { mem::transmute(Slice { data: paths, len: size as uint, }) }; let paths = Vec::from_fn(size as uint, |id| { unsafe { CString::new(paths[id], false) } }); let mut renamed = false; for id in range(0, size as uint) { debug!("Received filesystem event: [id: {}, ev: {}] from '{}'", ids[id], events[id], paths[id]); let event = events[id]; let path = String::from_str(paths[id].as_str().unwrap()); if event & kFSEventStreamEventFlagItemIsFile as u32 == 0 { continue; } let path_ = Path::new(path.as_slice()); if has_flag(event, kFSEventStreamEventFlagItemCreated) && path_.exists() { tx.send(Create(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRemoved) && !path_.exists() { tx.send(Remove(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRenamed) { if renamed { tx.send(RenameOld(path)); } else { tx.send(RenameNew(path)); } renamed = !renamed; } } } struct CoreFoundationString { d: *const c_void, } impl CoreFoundationString { fn new(string: &str) -> CoreFoundationString { CoreFoundationString { d: unsafe { CFStringCreateWithCString( kCFAllocatorDefault, string.to_c_str().as_ptr(), kCFStringEncodingUTF8 ) } } } } impl Drop for CoreFoundationString { fn drop(&mut self) { unsafe { CFRelease(self.d) } } } struct CoreFoundationArray { d: *const c_void, items: Vec<CoreFoundationString>, // It's a RAII container. } impl CoreFoundationArray { fn new(collection: &HashSet<String>) -> CoreFoundationArray { let d = unsafe { CFArrayCreateMutable( kCFAllocatorDefault, collection.len() as i32, ptr::null::<c_void>() ) }; let mut items = Vec::new(); for item in collection.iter() { let item = CoreFoundationString::new(item.as_slice()); unsafe { CFArrayAppendValue(d, item.d); } items.push(item); } CoreFoundationArray { d: d, items: items, } } } impl Drop for CoreFoundationArray { fn drop(&mut self) { self.items.clear(); unsafe { CFRelease(self.d) } } } fn recreate_stream(eventloop: *mut c_void, context: *const FSEventStreamContext, paths: HashSet<String>) -> *mut c_void { let paths = CoreFoundationArray::new(&paths); let latency = 0.05f64; let stream = unsafe { FSEventStreamCreate( kCFAllocatorDefault, callback, context, paths.d, kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagFileEvents ) }; unsafe { FSEventStreamScheduleWithRunLoop(stream, eventloop, kCFRunLoopDefaultMode); FSEventStreamStart(stream); stream } } pub struct Watcher { pub rx: Receiver<Event>, ctx: SyncSender<Control>, paths: HashSet<String>, stream: Arc<Mutex<*mut c_void>>, eventloop: Arc<Mutex<*mut c_void>>, } impl Watcher { pub fn new() -> Watcher { let (mut tx, rx) = channel::<Event>(); let (ctx, crx) = sync_channel::<Control>(0); let eventloop = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let stream = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let watcher = Watcher { rx: rx, ctx: ctx, paths: HashSet::new(), stream: stream.clone(), eventloop: eventloop.clone(), }; spawn(proc() { debug!("Starting watcher thread ..."); unsafe { *eventloop.lock() = CFRunLoopGetCurrent(); let tx: *mut c_void = &mut tx as *mut _ as *mut c_void; let context = FSEventStreamContext { version: 0, info: tx, retain: ptr::null::<c_void>(), release: ptr::null::<c_void>(), desc: ptr::null::<c_void>(), }; loop { debug!("New watcher loop iteration"); match crx.recv() { Update(paths) => { debug!("Updating watcher loop with {}", paths); *stream.lock() = recreate_stream(*eventloop.lock(), &context, paths); CFRunLoopRun(); } Exit => { debug!("Received watcher exit event - performing graceful shutdown"); break } } } } }); watcher } pub fn watch(&mut self, path: &Path) -> IoResult<()> { if path.exists() { debug!("Adding '{}' to the watch", path.display()); let path = os::make_absolute(path); let path = match path.as_str() { Some(path) => String::from_str(path), None => return Err(IoError::from_errno(ENOENT as uint, false)) }; self.paths.insert(path.clone()); self.update(); Ok(()) } else { Err(IoError::from_errno(ENOENT as uint, false)) } } pub fn unwatch(&mut self, path: &String) -> IoResult<()> { self.paths.remove(path); self.update(); Ok(()) } fn update(&self) { self.stop_stream(); self.ctx.send(Update(self.paths.clone())); } fn stop_stream(&self) { let mut stream = self.stream.lock(); if !(*stream).is_null() { unsafe { FSEventStreamStop(*stream); FSEventStreamInvalidate(*stream); FSEventStreamRelease(*stream); CFRunLoopWakeUp(*self.eventloop.lock()); } } } } impl Drop for Watcher { fn drop(&mut self) { debug!("dropping! {:p}", self); self.stop_stream(); self.ctx.send(Exit); } } #[link(name = "Carbon", kind = "framework")] #[link(name = "CoreFoundation", kind = "framework")] extern { static kCFAllocatorDefault: *mut c_void; static kCFRunLoopDefaultMode: *mut c_void; fn CFStringCreateWithCString(allocator: *mut c_void, string: *const c_char, encoding: CFStringBuiltInEncodings) -> *const c_void; fn CFArrayCreateMutable(allocator: *mut c_void, size: c_int, callbacks: *const c_void) -> *const c_void; fn CFArrayAppendValue(array: *const c_void, value: *const c_void); fn FSEventStreamCreate(allocator: *mut c_void, cb: callback_t, context: *const FSEventStreamContext, paths: *const c_void, since: u64, latency: f64, flags: u32) -> *mut c_void; fn FSEventStreamScheduleWithRunLoop(stream: *mut c_void, eventloop: *mut c_void, mode: *mut c_void); fn FSEventStreamStart(stream: *mut c_void); fn FSEventStreamStop(stream: *mut c_void); fn FSEventStreamInvalidate(stream: *mut c_void); fn FSEventStreamRelease(stream: *mut c_void); fn CFRunLoopGetCurrent() -> *mut c_void; fn CFRunLoopRun(); fn CFRunLoopWakeUp(ev: *mut c_void); fn CFRelease(p: *const c_void); } pub struct Backend { pub watcher: Watcher, } impl Backend { pub fn new(period: Duration) -> Backend { Backend { watcher: Watcher::new() } } pub fn register(&mut self, paths: HashSet<Path>) { for path in paths.iter() { self.watcher.watch(path); } } pub fn transform(&self, ev: Event) -> super::Event { super::Unknown } }
Event
identifier_name
fsevents.rs
#![allow(non_camel_case_types, non_uppercase_statics)] // C types use std::collections::{HashSet}; use std::c_str::CString; use std::io::{IoError, IoResult}; use std::io::fs::PathExtensions; use std::mem; use std::ptr; use std::raw::Slice; use std::os; use std::io::{Timer}; use std::time::Duration; //use super; use libc::{c_void, c_char, c_int, ENOENT}; use sync::{Arc, Mutex}; #[repr(C)] enum CFStringBuiltInEncodings { kCFStringEncodingUnicode = 0x01000000, kCFStringEncodingUTF8 = 0x08000100, } static kFSEventStreamCreateFlagNoDefer: u32 = 0x00000002; static kFSEventStreamCreateFlagFileEvents: u32 = 0x00000010; #[deriving(Show)] enum Event { Create(String), Remove(String), //ModifyMeta, Modify(String), RenameOld(String), RenameNew(String), } enum Control { Update(HashSet<String>), Exit, } #[repr(C)] struct FSEventStreamContext { version: c_int, info: *mut c_void, retain: *const c_void, release: *const c_void, desc: *const c_void, } type callback_t = extern "C" fn( stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64 ); #[repr(C)] enum FSEventStreamEventFlags { //kFSEventStreamEventFlagNone = 0x00000000, //kFSEventStreamEventFlagMustScanSubDirs = 0x00000001, //kFSEventStreamEventFlagUserDropped = 0x00000002, //kFSEventStreamEventFlagKernelDropped = 0x00000004, //kFSEventStreamEventFlagEventIdsWrapped = 0x00000008, //kFSEventStreamEventFlagHistoryDone = 0x00000010, //kFSEventStreamEventFlagRootChanged = 0x00000020, //kFSEventStreamEventFlagMount = 0x00000040, //kFSEventStreamEventFlagUnmount = 0x00000080, kFSEventStreamEventFlagItemCreated = 0x00000100, kFSEventStreamEventFlagItemRemoved = 0x00000200, //kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400, kFSEventStreamEventFlagItemRenamed = 0x00000800, //kFSEventStreamEventFlagItemModified = 0x00001000, //kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000, //kFSEventStreamEventFlagItemChangeOwner = 0x00004000, //kFSEventStreamEventFlagItemXattrMod = 0x00008000, kFSEventStreamEventFlagItemIsFile = 0x00010000, //kFSEventStreamEventFlagItemIsDir = 0x00020000, //kFSEventStreamEventFlagItemIsSymlink = 0x00040000, //kFSEventStreamEventFlagOwnEvent = 0x00080000 } static kFSEventStreamEventIdSinceNow: u64 = 0xFFFFFFFFFFFFFFFF; fn has_flag(event: u32, expected: FSEventStreamEventFlags) -> bool { event & expected as u32 == expected as u32 } extern "C" fn callback(_stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64) { let tx: &mut Sender<Event> = unsafe { &mut *(info as *mut Sender<Event>) }; let events: &[u32] = unsafe { mem::transmute(Slice { data: events, len: size as uint, }) }; let ids: &[u64] = unsafe { mem::transmute(Slice { data: ids, len: size as uint, }) }; let paths: &[*const i8] = unsafe { mem::transmute(Slice { data: paths, len: size as uint, }) }; let paths = Vec::from_fn(size as uint, |id| { unsafe { CString::new(paths[id], false) } }); let mut renamed = false; for id in range(0, size as uint) { debug!("Received filesystem event: [id: {}, ev: {}] from '{}'", ids[id], events[id], paths[id]); let event = events[id]; let path = String::from_str(paths[id].as_str().unwrap()); if event & kFSEventStreamEventFlagItemIsFile as u32 == 0 { continue; } let path_ = Path::new(path.as_slice()); if has_flag(event, kFSEventStreamEventFlagItemCreated) && path_.exists() { tx.send(Create(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRemoved) && !path_.exists() { tx.send(Remove(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRenamed) { if renamed { tx.send(RenameOld(path)); } else { tx.send(RenameNew(path)); } renamed = !renamed; } } } struct CoreFoundationString { d: *const c_void, } impl CoreFoundationString { fn new(string: &str) -> CoreFoundationString { CoreFoundationString { d: unsafe { CFStringCreateWithCString( kCFAllocatorDefault, string.to_c_str().as_ptr(), kCFStringEncodingUTF8 ) } } } } impl Drop for CoreFoundationString { fn drop(&mut self) { unsafe { CFRelease(self.d) } } } struct CoreFoundationArray { d: *const c_void, items: Vec<CoreFoundationString>, // It's a RAII container. } impl CoreFoundationArray { fn new(collection: &HashSet<String>) -> CoreFoundationArray { let d = unsafe { CFArrayCreateMutable( kCFAllocatorDefault, collection.len() as i32, ptr::null::<c_void>() ) }; let mut items = Vec::new(); for item in collection.iter() { let item = CoreFoundationString::new(item.as_slice()); unsafe { CFArrayAppendValue(d, item.d); } items.push(item); } CoreFoundationArray { d: d, items: items, } } } impl Drop for CoreFoundationArray { fn drop(&mut self) { self.items.clear(); unsafe { CFRelease(self.d) } } } fn recreate_stream(eventloop: *mut c_void, context: *const FSEventStreamContext, paths: HashSet<String>) -> *mut c_void { let paths = CoreFoundationArray::new(&paths); let latency = 0.05f64; let stream = unsafe { FSEventStreamCreate( kCFAllocatorDefault, callback, context, paths.d, kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagFileEvents ) }; unsafe { FSEventStreamScheduleWithRunLoop(stream, eventloop, kCFRunLoopDefaultMode); FSEventStreamStart(stream); stream } } pub struct Watcher { pub rx: Receiver<Event>, ctx: SyncSender<Control>, paths: HashSet<String>, stream: Arc<Mutex<*mut c_void>>, eventloop: Arc<Mutex<*mut c_void>>, } impl Watcher { pub fn new() -> Watcher { let (mut tx, rx) = channel::<Event>(); let (ctx, crx) = sync_channel::<Control>(0); let eventloop = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let stream = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let watcher = Watcher { rx: rx, ctx: ctx, paths: HashSet::new(), stream: stream.clone(), eventloop: eventloop.clone(), }; spawn(proc() { debug!("Starting watcher thread ..."); unsafe { *eventloop.lock() = CFRunLoopGetCurrent(); let tx: *mut c_void = &mut tx as *mut _ as *mut c_void; let context = FSEventStreamContext { version: 0, info: tx, retain: ptr::null::<c_void>(), release: ptr::null::<c_void>(), desc: ptr::null::<c_void>(), }; loop { debug!("New watcher loop iteration"); match crx.recv() { Update(paths) => { debug!("Updating watcher loop with {}", paths); *stream.lock() = recreate_stream(*eventloop.lock(), &context, paths); CFRunLoopRun(); } Exit => { debug!("Received watcher exit event - performing graceful shutdown"); break } } } } }); watcher } pub fn watch(&mut self, path: &Path) -> IoResult<()> { if path.exists() { debug!("Adding '{}' to the watch", path.display()); let path = os::make_absolute(path); let path = match path.as_str() { Some(path) => String::from_str(path), None => return Err(IoError::from_errno(ENOENT as uint, false)) }; self.paths.insert(path.clone()); self.update(); Ok(()) } else { Err(IoError::from_errno(ENOENT as uint, false)) } } pub fn unwatch(&mut self, path: &String) -> IoResult<()> { self.paths.remove(path); self.update(); Ok(()) } fn update(&self) { self.stop_stream(); self.ctx.send(Update(self.paths.clone())); } fn stop_stream(&self) { let mut stream = self.stream.lock(); if !(*stream).is_null() { unsafe { FSEventStreamStop(*stream); FSEventStreamInvalidate(*stream); FSEventStreamRelease(*stream); CFRunLoopWakeUp(*self.eventloop.lock()); } } } } impl Drop for Watcher { fn drop(&mut self) { debug!("dropping! {:p}", self); self.stop_stream(); self.ctx.send(Exit); } } #[link(name = "Carbon", kind = "framework")] #[link(name = "CoreFoundation", kind = "framework")] extern { static kCFAllocatorDefault: *mut c_void; static kCFRunLoopDefaultMode: *mut c_void; fn CFStringCreateWithCString(allocator: *mut c_void, string: *const c_char, encoding: CFStringBuiltInEncodings) -> *const c_void; fn CFArrayCreateMutable(allocator: *mut c_void, size: c_int, callbacks: *const c_void) -> *const c_void; fn CFArrayAppendValue(array: *const c_void, value: *const c_void); fn FSEventStreamCreate(allocator: *mut c_void, cb: callback_t, context: *const FSEventStreamContext, paths: *const c_void, since: u64, latency: f64, flags: u32) -> *mut c_void; fn FSEventStreamScheduleWithRunLoop(stream: *mut c_void, eventloop: *mut c_void, mode: *mut c_void); fn FSEventStreamStart(stream: *mut c_void); fn FSEventStreamStop(stream: *mut c_void); fn FSEventStreamInvalidate(stream: *mut c_void); fn FSEventStreamRelease(stream: *mut c_void); fn CFRunLoopGetCurrent() -> *mut c_void; fn CFRunLoopRun(); fn CFRunLoopWakeUp(ev: *mut c_void); fn CFRelease(p: *const c_void); } pub struct Backend { pub watcher: Watcher, } impl Backend { pub fn new(period: Duration) -> Backend { Backend { watcher: Watcher::new() } } pub fn register(&mut self, paths: HashSet<Path>) { for path in paths.iter() { self.watcher.watch(path); } } pub fn transform(&self, ev: Event) -> super::Event
}
{ super::Unknown }
identifier_body
kmodules.rs
//! This file contains all the stuff needed by Kernel Modules use super::message::push_message; use super::process::get_file_content; use super::scheduler::Scheduler; use super::thread_group::Credentials; use super::vfs::{Path, VFS}; use super::{IpcResult, SysResult}; use alloc::boxed::Box; use alloc::vec::Vec; use ansi_escape_code::Colored; use elf_loader::{SegmentType, SymbolTable}; use fallible_collections::boxed::FallibleBox; use irq::Irq; use kernel_modules::{ ForeignAllocMethods, KernelEvent, KernelSymbolList, KeyboardConfig, ModConfig, ModResult, ModReturn, ModSpecificReturn, RTCConfig, SymbolList, }; use libc_binding::{Errno, FileType, OpenFlags}; use log::Record; use time::Date; use core::convert::{TryFrom, TryInto}; use core::slice; use core::sync::atomic::AtomicU32; use crate::drivers::PIC_8259; use crate::elf_loader::load_elf; use crate::memory::mmu::Entry; use crate::memory::tools::{AllocFlags, NbrPages, Page, Virt}; use crate::memory::HIGH_KERNEL_MEMORY; /// Main structure pub struct KernelModules { dummy: Option<Module>, rtc: Option<Module>, keyboard: Option<Module>, syslog: Option<Module>, pub second_cycle: Vec<fn()>, } #[allow(dead_code)] /// Stored structure of a given module struct Module { start_point: u32, symbol_table: Box<SymbolTable>, mod_return: ModReturn, alloc_table: AllocTable, } /// Main implementation impl KernelModules { pub fn new() -> Self { Self { dummy: None, rtc: None, keyboard: None, syslog: None, second_cycle: Vec::new(), } } } impl Scheduler { /// Try to insert a Kernel Module pub fn insert_module(&mut self, modname: &str) -> SysResult<u32> { let (module_opt, module_pathname, mod_config) = match modname { "dummy" => ( &mut self.kernel_modules.dummy, "/turbofish/mod/dummy.mod", ModConfig::Dummy, ), "rtc" => ( &mut self.kernel_modules.rtc, "/turbofish/mod/rtc.mod", ModConfig::RTC(RTCConfig { enable_irq, disable_irq, // May be set as volatile... current_unix_time: unsafe { &mut CURRENT_UNIX_TIME }, }), ), "keyboard" => ( &mut self.kernel_modules.keyboard, "/turbofish/mod/key.mod", ModConfig::Keyboard(KeyboardConfig { enable_irq, disable_irq, callback: push_message, }), ), "syslog" => ( &mut self.kernel_modules.syslog, "/turbofish/mod/syslog.mod", ModConfig::Syslog, ), _ => { log::warn!("Unknown module name"); return Ok(0); } }; if let Some(_) = module_opt { log::warn!("Module already active"); return Ok(0); } // Generate content from disk let content = get_module_raw_content(module_pathname)?; // Try to parse ELF let (eip, symbol_table, alloc_table) = load_module(&content)?; let symbol_table = match symbol_table { Some(s) => s, None => { log::error!("No Symtab for that Module"); return Err(Errno::EINVAL); } }; // Launch the module with his particulary context let start_point: u32 = eip as u32; let p: fn(SymbolList) -> ModResult = unsafe { core::mem::transmute(start_point) }; let mod_return = p(SymbolList { write, emergency_write, alloc_tools: ForeignAllocMethods { kmalloc, kcalloc, kfree, krealloc, }, kernel_callback: mod_config, kernel_symbol_list: KernelSymbolList::new(), }) .map_err(|_e| Errno::EINVAL)?; if let Some(configurable_callbacks) = &mod_return.configurable_callbacks_opt { // Ensure we have suffisant memory before binding something let mut second_cycle_chunk_reserved = 0; for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Second => second_cycle_chunk_reserved += 1, _ => {} } } self.kernel_modules .second_cycle .try_reserve(second_cycle_chunk_reserved)?; // Bind callbacks for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => { // We assume that a function bindable to Log event has fn(&Record) prototype. // Yes, it is really really unsafe... But Louis is asking for that // LOGGER is on a direct binding. Not passing through Scheduler let p: fn(&Record) = unsafe { core::mem::transmute(elem.what) }; unsafe { // It is a shame that only one module can be binded to the log ! terminal::log::LOGGER.bind(p); } } KernelEvent::Second => { // We assume that a function bindable to Log event has fn() prototype. let p: fn() = unsafe { core::mem::transmute(elem.what) }; self.kernel_modules.second_cycle.push(p); } } } } *module_opt = Some(Module { start_point, symbol_table, mod_return, alloc_table, }); Ok(0) } /// Try to remove a kernel module pub fn remove_module(&mut self, modname: &str) -> SysResult<u32> { let module_opt = match modname { "dummy" => &mut self.kernel_modules.dummy, "rtc" => &mut self.kernel_modules.rtc, "keyboard" => &mut self.kernel_modules.keyboard, "syslog" => &mut self.kernel_modules.syslog, _ => { log::warn!("Unknown module name"); return Ok(0); } }; match module_opt { None => { log::warn!("Module already inactive"); return Ok(0); } Some(module) => { // Disable callbacks if let Some(configurable_callbacks) = &module.mod_return.configurable_callbacks_opt { for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => unsafe { terminal::log::LOGGER.unbind(); }, KernelEvent::Second => { let p: fn() = unsafe { core::mem::transmute(elem.what) }; let _r = self .kernel_modules .second_cycle .drain_filter(|elem| *elem == p) .collect::<Vec<_>>(); } } } } // Halt the module (module.mod_return.stop)(); } } *module_opt = None; Ok(0) } /// List all loaded modules pub fn list_modules(&self) -> SysResult<u32> { if self.kernel_modules.dummy.is_some() { println!("- module loaded: {}", "DUMMY".yellow()); } if self.kernel_modules.keyboard.is_some() { println!("- module loaded: {}", "KEYBOARD".yellow()); } if self.kernel_modules.rtc.is_some() { println!("- module loaded: {}", "RTC".yellow()); } if self.kernel_modules.syslog.is_some() { println!("- module loaded: {}", "SYSLOG".yellow()); } Ok(0) } /// Keyboard driver method specific pub fn reboot_computer(&self) { if let Some(keyboard) = &self.kernel_modules.keyboard { if let ModSpecificReturn::Keyboard(keyboard_return) = &keyboard.mod_return.spec { (keyboard_return.reboot_computer)(); } else { panic!("Unexpected error"); } } else { log::error!("ps2_controler/Keyboard handler not loaded"); } } /// RTC driver method specific pub fn read_date(&self) -> Date { if let Some(rtc) = &self.kernel_modules.rtc { if let ModSpecificReturn::RTC(rtc_return) = &rtc.mod_return.spec { (rtc_return.read_date)() } else { panic!("Unexpected error"); } } else { Date::default() } } } /// RTC driver specific globale pub static mut CURRENT_UNIX_TIME: AtomicU32 = AtomicU32::new(0); /// Set IDT ENTRY fn: Usable by modules fn enable_irq(idt_gate: Irq, func: unsafe extern "C" fn()) { unsafe { PIC_8259.lock().enable_irq(idt_gate, Some(func)); } } /// Unset IDT ENTRY fn: Usable by modules fn disable_irq(idt_gate: Irq) { unsafe { PIC_8259.lock().disable_irq(idt_gate); } } /// Common Write method for modules fn write(s: &str) { log::info!("{}", s); } /// Ermergency Write method for modules fn
(s: &str) { eprint!("{}", s); } /// Just used for a symbol list test #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn symbol_list_test() { log::info!("symbol_list_test function sucessfully called by a module !"); } #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn add_syslog_entry(entry: &str) -> Result<(), Errno> { let cwd = Path::try_from("/")?; let path = Path::try_from("/var/syslog")?; let mode = FileType::from_bits(0o600).expect("Cannot set FileType"); let flags = OpenFlags::O_WRONLY | OpenFlags::O_CREAT | OpenFlags::O_APPEND; let creds = &Credentials::ROOT; VFS.force_unlock(); /* just in case of. This mutex could become very problematic */ let file_operator = match VFS.lock().open(&cwd, creds, path, flags, mode)? { IpcResult::Done(file_operator) => file_operator, IpcResult::Wait(file_operator, _) => file_operator, }; let mut m = file_operator.lock(); m.write(unsafe { core::slice::from_raw_parts(entry as *const _ as *const u8, entry.len()) })?; Ok(()) } /// Common allocator methods for modules extern "C" { fn kmalloc(len: usize) -> *mut u8; fn kcalloc(count: usize, size: usize) -> *mut u8; fn kfree(ptr: *mut u8); fn krealloc(addr: *mut u8, new_size: usize) -> *mut u8; } struct AllocTable(Vec<AllocEntry>); struct AllocEntry { page_index: Page<Virt>, nbr_pages: NbrPages, } impl AllocEntry { fn new(page_index: Page<Virt>, nbr_pages: NbrPages) -> Self { Self { page_index, nbr_pages, } } } impl Drop for AllocEntry { fn drop(&mut self) { unsafe { HIGH_KERNEL_MEMORY .as_mut() .unwrap() .dealloc_on(self.page_index, self.nbr_pages) .expect("Unexpected memory error"); } } } /// Load a module from ELF fn load_module(content: &[u8]) -> SysResult<(u32, Option<Box<SymbolTable>>, AllocTable)> { let mut alloc_table: AllocTable = AllocTable(Vec::new()); // Parse Elf and generate stuff let elf = load_elf(content)?; for h in &elf.program_header_table { if h.segment_type == SegmentType::Load { let segment = unsafe { let page_index: Page<Virt> = Virt(h.vaddr as usize).into(); let nbr_pages: NbrPages = (h.memsz as usize).into(); alloc_table.0.try_reserve(1)?; HIGH_KERNEL_MEMORY.as_mut().unwrap().alloc_on( page_index, nbr_pages, AllocFlags::KERNEL_MEMORY, )?; alloc_table.0.push(AllocEntry::new(page_index, nbr_pages)); slice::from_raw_parts_mut(h.vaddr as usize as *mut u8, h.memsz as usize) }; segment[0..h.filez as usize] .copy_from_slice(&content[h.offset as usize..h.offset as usize + h.filez as usize]); unsafe { // With BSS (so a NOBITS section), the memsz value exceed the filesz. Setting next bytes as 0 segment[h.filez as usize..h.memsz as usize] .as_mut_ptr() .write_bytes(0, h.memsz as usize - h.filez as usize); // Modify the rights on pages by following the ELF specific restrictions HIGH_KERNEL_MEMORY .as_mut() .unwrap() .change_range_page_entry( Page::containing(Virt(h.vaddr as usize)), (h.memsz as usize).into(), &mut |entry: &mut Entry| { *entry |= Entry::from( Into::<AllocFlags>::into(h.flags) | AllocFlags::KERNEL_MEMORY, ) }, )?; } } } Ok(( elf.header.entry_point as u32, match SymbolTable::try_new(content).ok() { Some(elem) => Some(Box::try_new(elem)?), None => None, }, alloc_table, )) } /// Get Data of a module fn get_module_raw_content(mod_pathname: &str) -> SysResult<Vec<u8>> { let path = mod_pathname.try_into()?; get_file_content( &Path::try_from("/").expect("no root"), &Credentials::ROOT, path, ) }
emergency_write
identifier_name
kmodules.rs
//! This file contains all the stuff needed by Kernel Modules use super::message::push_message; use super::process::get_file_content; use super::scheduler::Scheduler; use super::thread_group::Credentials; use super::vfs::{Path, VFS}; use super::{IpcResult, SysResult}; use alloc::boxed::Box; use alloc::vec::Vec; use ansi_escape_code::Colored; use elf_loader::{SegmentType, SymbolTable}; use fallible_collections::boxed::FallibleBox; use irq::Irq; use kernel_modules::{ ForeignAllocMethods, KernelEvent, KernelSymbolList, KeyboardConfig, ModConfig, ModResult, ModReturn, ModSpecificReturn, RTCConfig, SymbolList, }; use libc_binding::{Errno, FileType, OpenFlags}; use log::Record; use time::Date; use core::convert::{TryFrom, TryInto}; use core::slice; use core::sync::atomic::AtomicU32; use crate::drivers::PIC_8259; use crate::elf_loader::load_elf; use crate::memory::mmu::Entry; use crate::memory::tools::{AllocFlags, NbrPages, Page, Virt}; use crate::memory::HIGH_KERNEL_MEMORY; /// Main structure pub struct KernelModules { dummy: Option<Module>, rtc: Option<Module>, keyboard: Option<Module>, syslog: Option<Module>, pub second_cycle: Vec<fn()>, } #[allow(dead_code)] /// Stored structure of a given module struct Module { start_point: u32, symbol_table: Box<SymbolTable>, mod_return: ModReturn, alloc_table: AllocTable, } /// Main implementation impl KernelModules { pub fn new() -> Self { Self { dummy: None, rtc: None, keyboard: None, syslog: None, second_cycle: Vec::new(), } } } impl Scheduler { /// Try to insert a Kernel Module pub fn insert_module(&mut self, modname: &str) -> SysResult<u32> { let (module_opt, module_pathname, mod_config) = match modname { "dummy" => ( &mut self.kernel_modules.dummy, "/turbofish/mod/dummy.mod", ModConfig::Dummy, ), "rtc" => ( &mut self.kernel_modules.rtc, "/turbofish/mod/rtc.mod", ModConfig::RTC(RTCConfig { enable_irq, disable_irq, // May be set as volatile... current_unix_time: unsafe { &mut CURRENT_UNIX_TIME }, }), ), "keyboard" => ( &mut self.kernel_modules.keyboard, "/turbofish/mod/key.mod", ModConfig::Keyboard(KeyboardConfig { enable_irq, disable_irq, callback: push_message, }), ), "syslog" => ( &mut self.kernel_modules.syslog, "/turbofish/mod/syslog.mod", ModConfig::Syslog, ), _ => { log::warn!("Unknown module name"); return Ok(0); } }; if let Some(_) = module_opt { log::warn!("Module already active"); return Ok(0); } // Generate content from disk let content = get_module_raw_content(module_pathname)?; // Try to parse ELF let (eip, symbol_table, alloc_table) = load_module(&content)?; let symbol_table = match symbol_table { Some(s) => s, None => { log::error!("No Symtab for that Module"); return Err(Errno::EINVAL); } }; // Launch the module with his particulary context let start_point: u32 = eip as u32; let p: fn(SymbolList) -> ModResult = unsafe { core::mem::transmute(start_point) }; let mod_return = p(SymbolList { write, emergency_write, alloc_tools: ForeignAllocMethods { kmalloc, kcalloc, kfree, krealloc, }, kernel_callback: mod_config, kernel_symbol_list: KernelSymbolList::new(), }) .map_err(|_e| Errno::EINVAL)?; if let Some(configurable_callbacks) = &mod_return.configurable_callbacks_opt { // Ensure we have suffisant memory before binding something let mut second_cycle_chunk_reserved = 0; for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Second => second_cycle_chunk_reserved += 1, _ => {} } } self.kernel_modules .second_cycle .try_reserve(second_cycle_chunk_reserved)?; // Bind callbacks for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => { // We assume that a function bindable to Log event has fn(&Record) prototype. // Yes, it is really really unsafe... But Louis is asking for that // LOGGER is on a direct binding. Not passing through Scheduler let p: fn(&Record) = unsafe { core::mem::transmute(elem.what) }; unsafe { // It is a shame that only one module can be binded to the log ! terminal::log::LOGGER.bind(p); } } KernelEvent::Second => { // We assume that a function bindable to Log event has fn() prototype. let p: fn() = unsafe { core::mem::transmute(elem.what) }; self.kernel_modules.second_cycle.push(p); } } } } *module_opt = Some(Module { start_point, symbol_table, mod_return, alloc_table, }); Ok(0) } /// Try to remove a kernel module pub fn remove_module(&mut self, modname: &str) -> SysResult<u32> { let module_opt = match modname { "dummy" => &mut self.kernel_modules.dummy, "rtc" => &mut self.kernel_modules.rtc, "keyboard" => &mut self.kernel_modules.keyboard, "syslog" => &mut self.kernel_modules.syslog, _ => { log::warn!("Unknown module name"); return Ok(0); } }; match module_opt { None => { log::warn!("Module already inactive"); return Ok(0); } Some(module) => { // Disable callbacks if let Some(configurable_callbacks) = &module.mod_return.configurable_callbacks_opt { for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => unsafe { terminal::log::LOGGER.unbind(); }, KernelEvent::Second => { let p: fn() = unsafe { core::mem::transmute(elem.what) }; let _r = self .kernel_modules .second_cycle .drain_filter(|elem| *elem == p) .collect::<Vec<_>>(); } } } } // Halt the module (module.mod_return.stop)(); } } *module_opt = None; Ok(0) } /// List all loaded modules pub fn list_modules(&self) -> SysResult<u32> { if self.kernel_modules.dummy.is_some() { println!("- module loaded: {}", "DUMMY".yellow()); } if self.kernel_modules.keyboard.is_some() { println!("- module loaded: {}", "KEYBOARD".yellow()); } if self.kernel_modules.rtc.is_some() { println!("- module loaded: {}", "RTC".yellow()); } if self.kernel_modules.syslog.is_some() { println!("- module loaded: {}", "SYSLOG".yellow()); } Ok(0) } /// Keyboard driver method specific pub fn reboot_computer(&self) { if let Some(keyboard) = &self.kernel_modules.keyboard { if let ModSpecificReturn::Keyboard(keyboard_return) = &keyboard.mod_return.spec { (keyboard_return.reboot_computer)(); } else { panic!("Unexpected error"); } } else { log::error!("ps2_controler/Keyboard handler not loaded"); } } /// RTC driver method specific pub fn read_date(&self) -> Date { if let Some(rtc) = &self.kernel_modules.rtc { if let ModSpecificReturn::RTC(rtc_return) = &rtc.mod_return.spec { (rtc_return.read_date)() } else { panic!("Unexpected error"); } } else { Date::default() } } } /// RTC driver specific globale pub static mut CURRENT_UNIX_TIME: AtomicU32 = AtomicU32::new(0); /// Set IDT ENTRY fn: Usable by modules fn enable_irq(idt_gate: Irq, func: unsafe extern "C" fn()) { unsafe { PIC_8259.lock().enable_irq(idt_gate, Some(func)); } } /// Unset IDT ENTRY fn: Usable by modules fn disable_irq(idt_gate: Irq) { unsafe { PIC_8259.lock().disable_irq(idt_gate); } } /// Common Write method for modules fn write(s: &str) { log::info!("{}", s); } /// Ermergency Write method for modules fn emergency_write(s: &str) { eprint!("{}", s); } /// Just used for a symbol list test #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn symbol_list_test() { log::info!("symbol_list_test function sucessfully called by a module !"); } #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn add_syslog_entry(entry: &str) -> Result<(), Errno> { let cwd = Path::try_from("/")?; let path = Path::try_from("/var/syslog")?; let mode = FileType::from_bits(0o600).expect("Cannot set FileType"); let flags = OpenFlags::O_WRONLY | OpenFlags::O_CREAT | OpenFlags::O_APPEND; let creds = &Credentials::ROOT; VFS.force_unlock(); /* just in case of. This mutex could become very problematic */ let file_operator = match VFS.lock().open(&cwd, creds, path, flags, mode)? { IpcResult::Done(file_operator) => file_operator, IpcResult::Wait(file_operator, _) => file_operator, }; let mut m = file_operator.lock(); m.write(unsafe { core::slice::from_raw_parts(entry as *const _ as *const u8, entry.len()) })?; Ok(()) } /// Common allocator methods for modules extern "C" { fn kmalloc(len: usize) -> *mut u8; fn kcalloc(count: usize, size: usize) -> *mut u8; fn kfree(ptr: *mut u8); fn krealloc(addr: *mut u8, new_size: usize) -> *mut u8; } struct AllocTable(Vec<AllocEntry>); struct AllocEntry { page_index: Page<Virt>, nbr_pages: NbrPages, } impl AllocEntry { fn new(page_index: Page<Virt>, nbr_pages: NbrPages) -> Self { Self { page_index, nbr_pages, } } } impl Drop for AllocEntry { fn drop(&mut self) { unsafe { HIGH_KERNEL_MEMORY .as_mut() .unwrap() .dealloc_on(self.page_index, self.nbr_pages) .expect("Unexpected memory error"); } } } /// Load a module from ELF fn load_module(content: &[u8]) -> SysResult<(u32, Option<Box<SymbolTable>>, AllocTable)> { let mut alloc_table: AllocTable = AllocTable(Vec::new()); // Parse Elf and generate stuff let elf = load_elf(content)?; for h in &elf.program_header_table { if h.segment_type == SegmentType::Load { let segment = unsafe { let page_index: Page<Virt> = Virt(h.vaddr as usize).into(); let nbr_pages: NbrPages = (h.memsz as usize).into(); alloc_table.0.try_reserve(1)?; HIGH_KERNEL_MEMORY.as_mut().unwrap().alloc_on( page_index, nbr_pages, AllocFlags::KERNEL_MEMORY, )?; alloc_table.0.push(AllocEntry::new(page_index, nbr_pages)); slice::from_raw_parts_mut(h.vaddr as usize as *mut u8, h.memsz as usize) }; segment[0..h.filez as usize] .copy_from_slice(&content[h.offset as usize..h.offset as usize + h.filez as usize]); unsafe {
.as_mut_ptr() .write_bytes(0, h.memsz as usize - h.filez as usize); // Modify the rights on pages by following the ELF specific restrictions HIGH_KERNEL_MEMORY .as_mut() .unwrap() .change_range_page_entry( Page::containing(Virt(h.vaddr as usize)), (h.memsz as usize).into(), &mut |entry: &mut Entry| { *entry |= Entry::from( Into::<AllocFlags>::into(h.flags) | AllocFlags::KERNEL_MEMORY, ) }, )?; } } } Ok(( elf.header.entry_point as u32, match SymbolTable::try_new(content).ok() { Some(elem) => Some(Box::try_new(elem)?), None => None, }, alloc_table, )) } /// Get Data of a module fn get_module_raw_content(mod_pathname: &str) -> SysResult<Vec<u8>> { let path = mod_pathname.try_into()?; get_file_content( &Path::try_from("/").expect("no root"), &Credentials::ROOT, path, ) }
// With BSS (so a NOBITS section), the memsz value exceed the filesz. Setting next bytes as 0 segment[h.filez as usize..h.memsz as usize]
random_line_split
kmodules.rs
//! This file contains all the stuff needed by Kernel Modules use super::message::push_message; use super::process::get_file_content; use super::scheduler::Scheduler; use super::thread_group::Credentials; use super::vfs::{Path, VFS}; use super::{IpcResult, SysResult}; use alloc::boxed::Box; use alloc::vec::Vec; use ansi_escape_code::Colored; use elf_loader::{SegmentType, SymbolTable}; use fallible_collections::boxed::FallibleBox; use irq::Irq; use kernel_modules::{ ForeignAllocMethods, KernelEvent, KernelSymbolList, KeyboardConfig, ModConfig, ModResult, ModReturn, ModSpecificReturn, RTCConfig, SymbolList, }; use libc_binding::{Errno, FileType, OpenFlags}; use log::Record; use time::Date; use core::convert::{TryFrom, TryInto}; use core::slice; use core::sync::atomic::AtomicU32; use crate::drivers::PIC_8259; use crate::elf_loader::load_elf; use crate::memory::mmu::Entry; use crate::memory::tools::{AllocFlags, NbrPages, Page, Virt}; use crate::memory::HIGH_KERNEL_MEMORY; /// Main structure pub struct KernelModules { dummy: Option<Module>, rtc: Option<Module>, keyboard: Option<Module>, syslog: Option<Module>, pub second_cycle: Vec<fn()>, } #[allow(dead_code)] /// Stored structure of a given module struct Module { start_point: u32, symbol_table: Box<SymbolTable>, mod_return: ModReturn, alloc_table: AllocTable, } /// Main implementation impl KernelModules { pub fn new() -> Self { Self { dummy: None, rtc: None, keyboard: None, syslog: None, second_cycle: Vec::new(), } } } impl Scheduler { /// Try to insert a Kernel Module pub fn insert_module(&mut self, modname: &str) -> SysResult<u32>
/// Try to remove a kernel module pub fn remove_module(&mut self, modname: &str) -> SysResult<u32> { let module_opt = match modname { "dummy" => &mut self.kernel_modules.dummy, "rtc" => &mut self.kernel_modules.rtc, "keyboard" => &mut self.kernel_modules.keyboard, "syslog" => &mut self.kernel_modules.syslog, _ => { log::warn!("Unknown module name"); return Ok(0); } }; match module_opt { None => { log::warn!("Module already inactive"); return Ok(0); } Some(module) => { // Disable callbacks if let Some(configurable_callbacks) = &module.mod_return.configurable_callbacks_opt { for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => unsafe { terminal::log::LOGGER.unbind(); }, KernelEvent::Second => { let p: fn() = unsafe { core::mem::transmute(elem.what) }; let _r = self .kernel_modules .second_cycle .drain_filter(|elem| *elem == p) .collect::<Vec<_>>(); } } } } // Halt the module (module.mod_return.stop)(); } } *module_opt = None; Ok(0) } /// List all loaded modules pub fn list_modules(&self) -> SysResult<u32> { if self.kernel_modules.dummy.is_some() { println!("- module loaded: {}", "DUMMY".yellow()); } if self.kernel_modules.keyboard.is_some() { println!("- module loaded: {}", "KEYBOARD".yellow()); } if self.kernel_modules.rtc.is_some() { println!("- module loaded: {}", "RTC".yellow()); } if self.kernel_modules.syslog.is_some() { println!("- module loaded: {}", "SYSLOG".yellow()); } Ok(0) } /// Keyboard driver method specific pub fn reboot_computer(&self) { if let Some(keyboard) = &self.kernel_modules.keyboard { if let ModSpecificReturn::Keyboard(keyboard_return) = &keyboard.mod_return.spec { (keyboard_return.reboot_computer)(); } else { panic!("Unexpected error"); } } else { log::error!("ps2_controler/Keyboard handler not loaded"); } } /// RTC driver method specific pub fn read_date(&self) -> Date { if let Some(rtc) = &self.kernel_modules.rtc { if let ModSpecificReturn::RTC(rtc_return) = &rtc.mod_return.spec { (rtc_return.read_date)() } else { panic!("Unexpected error"); } } else { Date::default() } } } /// RTC driver specific globale pub static mut CURRENT_UNIX_TIME: AtomicU32 = AtomicU32::new(0); /// Set IDT ENTRY fn: Usable by modules fn enable_irq(idt_gate: Irq, func: unsafe extern "C" fn()) { unsafe { PIC_8259.lock().enable_irq(idt_gate, Some(func)); } } /// Unset IDT ENTRY fn: Usable by modules fn disable_irq(idt_gate: Irq) { unsafe { PIC_8259.lock().disable_irq(idt_gate); } } /// Common Write method for modules fn write(s: &str) { log::info!("{}", s); } /// Ermergency Write method for modules fn emergency_write(s: &str) { eprint!("{}", s); } /// Just used for a symbol list test #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn symbol_list_test() { log::info!("symbol_list_test function sucessfully called by a module !"); } #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn add_syslog_entry(entry: &str) -> Result<(), Errno> { let cwd = Path::try_from("/")?; let path = Path::try_from("/var/syslog")?; let mode = FileType::from_bits(0o600).expect("Cannot set FileType"); let flags = OpenFlags::O_WRONLY | OpenFlags::O_CREAT | OpenFlags::O_APPEND; let creds = &Credentials::ROOT; VFS.force_unlock(); /* just in case of. This mutex could become very problematic */ let file_operator = match VFS.lock().open(&cwd, creds, path, flags, mode)? { IpcResult::Done(file_operator) => file_operator, IpcResult::Wait(file_operator, _) => file_operator, }; let mut m = file_operator.lock(); m.write(unsafe { core::slice::from_raw_parts(entry as *const _ as *const u8, entry.len()) })?; Ok(()) } /// Common allocator methods for modules extern "C" { fn kmalloc(len: usize) -> *mut u8; fn kcalloc(count: usize, size: usize) -> *mut u8; fn kfree(ptr: *mut u8); fn krealloc(addr: *mut u8, new_size: usize) -> *mut u8; } struct AllocTable(Vec<AllocEntry>); struct AllocEntry { page_index: Page<Virt>, nbr_pages: NbrPages, } impl AllocEntry { fn new(page_index: Page<Virt>, nbr_pages: NbrPages) -> Self { Self { page_index, nbr_pages, } } } impl Drop for AllocEntry { fn drop(&mut self) { unsafe { HIGH_KERNEL_MEMORY .as_mut() .unwrap() .dealloc_on(self.page_index, self.nbr_pages) .expect("Unexpected memory error"); } } } /// Load a module from ELF fn load_module(content: &[u8]) -> SysResult<(u32, Option<Box<SymbolTable>>, AllocTable)> { let mut alloc_table: AllocTable = AllocTable(Vec::new()); // Parse Elf and generate stuff let elf = load_elf(content)?; for h in &elf.program_header_table { if h.segment_type == SegmentType::Load { let segment = unsafe { let page_index: Page<Virt> = Virt(h.vaddr as usize).into(); let nbr_pages: NbrPages = (h.memsz as usize).into(); alloc_table.0.try_reserve(1)?; HIGH_KERNEL_MEMORY.as_mut().unwrap().alloc_on( page_index, nbr_pages, AllocFlags::KERNEL_MEMORY, )?; alloc_table.0.push(AllocEntry::new(page_index, nbr_pages)); slice::from_raw_parts_mut(h.vaddr as usize as *mut u8, h.memsz as usize) }; segment[0..h.filez as usize] .copy_from_slice(&content[h.offset as usize..h.offset as usize + h.filez as usize]); unsafe { // With BSS (so a NOBITS section), the memsz value exceed the filesz. Setting next bytes as 0 segment[h.filez as usize..h.memsz as usize] .as_mut_ptr() .write_bytes(0, h.memsz as usize - h.filez as usize); // Modify the rights on pages by following the ELF specific restrictions HIGH_KERNEL_MEMORY .as_mut() .unwrap() .change_range_page_entry( Page::containing(Virt(h.vaddr as usize)), (h.memsz as usize).into(), &mut |entry: &mut Entry| { *entry |= Entry::from( Into::<AllocFlags>::into(h.flags) | AllocFlags::KERNEL_MEMORY, ) }, )?; } } } Ok(( elf.header.entry_point as u32, match SymbolTable::try_new(content).ok() { Some(elem) => Some(Box::try_new(elem)?), None => None, }, alloc_table, )) } /// Get Data of a module fn get_module_raw_content(mod_pathname: &str) -> SysResult<Vec<u8>> { let path = mod_pathname.try_into()?; get_file_content( &Path::try_from("/").expect("no root"), &Credentials::ROOT, path, ) }
{ let (module_opt, module_pathname, mod_config) = match modname { "dummy" => ( &mut self.kernel_modules.dummy, "/turbofish/mod/dummy.mod", ModConfig::Dummy, ), "rtc" => ( &mut self.kernel_modules.rtc, "/turbofish/mod/rtc.mod", ModConfig::RTC(RTCConfig { enable_irq, disable_irq, // May be set as volatile... current_unix_time: unsafe { &mut CURRENT_UNIX_TIME }, }), ), "keyboard" => ( &mut self.kernel_modules.keyboard, "/turbofish/mod/key.mod", ModConfig::Keyboard(KeyboardConfig { enable_irq, disable_irq, callback: push_message, }), ), "syslog" => ( &mut self.kernel_modules.syslog, "/turbofish/mod/syslog.mod", ModConfig::Syslog, ), _ => { log::warn!("Unknown module name"); return Ok(0); } }; if let Some(_) = module_opt { log::warn!("Module already active"); return Ok(0); } // Generate content from disk let content = get_module_raw_content(module_pathname)?; // Try to parse ELF let (eip, symbol_table, alloc_table) = load_module(&content)?; let symbol_table = match symbol_table { Some(s) => s, None => { log::error!("No Symtab for that Module"); return Err(Errno::EINVAL); } }; // Launch the module with his particulary context let start_point: u32 = eip as u32; let p: fn(SymbolList) -> ModResult = unsafe { core::mem::transmute(start_point) }; let mod_return = p(SymbolList { write, emergency_write, alloc_tools: ForeignAllocMethods { kmalloc, kcalloc, kfree, krealloc, }, kernel_callback: mod_config, kernel_symbol_list: KernelSymbolList::new(), }) .map_err(|_e| Errno::EINVAL)?; if let Some(configurable_callbacks) = &mod_return.configurable_callbacks_opt { // Ensure we have suffisant memory before binding something let mut second_cycle_chunk_reserved = 0; for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Second => second_cycle_chunk_reserved += 1, _ => {} } } self.kernel_modules .second_cycle .try_reserve(second_cycle_chunk_reserved)?; // Bind callbacks for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => { // We assume that a function bindable to Log event has fn(&Record) prototype. // Yes, it is really really unsafe... But Louis is asking for that // LOGGER is on a direct binding. Not passing through Scheduler let p: fn(&Record) = unsafe { core::mem::transmute(elem.what) }; unsafe { // It is a shame that only one module can be binded to the log ! terminal::log::LOGGER.bind(p); } } KernelEvent::Second => { // We assume that a function bindable to Log event has fn() prototype. let p: fn() = unsafe { core::mem::transmute(elem.what) }; self.kernel_modules.second_cycle.push(p); } } } } *module_opt = Some(Module { start_point, symbol_table, mod_return, alloc_table, }); Ok(0) }
identifier_body
Taguchi_Final_v0.0.py
# Taguchi Method Final from tkinter import * import pandas as pd import math import copy import tkinter.messagebox import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from tkinter.filedialog import askopenfile import csv main_window = Tk() #Heading = Label(main_window, text = "MaTrix") #Heading.grid(row=0,column=1) n = 0 t = 0 us = [] sn = [] og_n = 0 og_t = 0 og_bn = [] og_hn = [] sn_level_table = [] sn_change_table = [] Mt_o_table = [] useful_parameters = [] sn_case_table = [] predict_data = [] dataset = [] #def reset_values(): # n = 0 # t = 0 # us = [] # sn = [] # og_n = 0 # og_t = 0 # og_bn = [] # og_hn = [] # sn_level_table = [] # sn_change_table = [] # Mt_o_table = [] # useful_parameters = [] # sn_case_table = [] # predict_data = [] #tdata = [] main_window.title("MADEIN MaTrix") def run(data, d): #global tdata global n global t n = int(n) t = int(t) #print(n,t) #print(dataset[0][0].get()) global dataset #global data_file if d==0: for j in range(t): for i in range(n+1): dataset[j][i] = int(dataset[j][i].get()) else: dataset = data[1:] # for i in range(n+1): # tdata[j][i]=dataset[j][i].get() tdata = [] tdata = copy.deepcopy(dataset) #print(tdata) tally_table = [[1,1,1,1,1,1,1,1,1,1,1], [1,1,1,1,1,2,2,2,2,2,2], [1,1,2,2,2,1,1,1,2,2,2], [1,2,1,2,2,1,2,2,1,1,2], [1,2,2,1,2,2,1,2,1,2,1], [1,2,2,2,1,2,2,1,2,1,1], [2,1,2,2,1,1,2,2,1,2,1], [2,1,2,1,2,2,2,1,1,1,2], [2,1,1,2,2,2,1,2,2,1,1], [2,2,2,1,1,1,1,2,2,1,2], [2,2,1,2,1,2,1,1,1,2,2], [2,2,1,1,2,1,2,1,2,2,1]] sample_data =[[34.27,7.10,20.08,24.30,9.48,1.17,3.60,49.77], [26.78,21.71,15.23,23.84,7.00,1.74,3.70,53.73], [17.01,26.04,19.65,23.16,9.41,1.12,3.60,54.10], [23.77,22.25,15.40,25.67,7.00,2.21,3.70,54.29], [22.11,21.71,19.91,23.84,7.00,1.74,3.70,56.27], [22.14,30.49,11.15,23.88,7.00,1.74,3.60,56.45], [22.11,21.71,19.91,23.84,7.00,1.74,3.70,59.14], [20.81,21.05,19.25,26.56,7.00,1.63,3.69,59.89], [12.18,31.64,19.91,23.84,7.00,1.74,3.70,60.59], [19.66,23.15,21.35,22.75,7.00,2.37,3.71,61.51]] #test_data1 = [[6,15,0.5,0,0,0,10,0.57], # [6,15,0.5,3,5,5,13,0.43], # [6,25,2.5,0,0,5,13,0.51], # [6,25,2.5,3,5,0,10,0.51], # [10,15,2.5,0,5,0,13,0.51], # [10,15,2.5,3,0,5,10,0.55], # [10,25,0.5,0,5,5,10,0.5]] tdata = copy.deepcopy(sample_data) n = 7 t = 10 #adding parameter(strength in this case) to new list z = [] for j in range(t): z.append(tdata[j][-1]) #computing avg zavg = sum(z)/len(z) zsub = [] #finding two test case parameter values(strength) closest to avg for j in range(t): zsub.append([j,abs(z[j]-zavg)]) zsub = sorted(zsub, key = lambda a:a[1]) close2 = [zsub[0][0], zsub[1][0]] #creating unit space global us us = [] for i in range(n+1): us.append((tdata[zsub[0][0]][i]+tdata[zsub[1][0]][i])/2) #print(us) deleted_testcase_index = [zsub[0][0],zsub[1][0]] tdata.pop(zsub[0][0]) tdata.pop(zsub[1][0]) #testdata is now signal data for j in range(t-2): for i in range(n+1): tdata[j][i] = tdata[j][i] - us[i] #testdata is now normalized signal data nsdata = copy.deepcopy(tdata) global sn sn = [] global og_n og_n = n global og_bn og_bn = [] global og_hn og_hn = [] #SN Ratio Calculation Function def snr_calc(mat_used): global n global t global og_n tdata = copy.deepcopy(nsdata) i_c = 0 for i in range(n-1,-1,-1): if mat_used[i]==2: i_c += 1 for j in range(t-2): tdata[j].pop(i) n = n - i_c #Printing current rounded off tdata #for j in tdata: # for i in j: # print(round(i,2),end = " ") # print("") #b and sb calculation (propotional coefficient and variation of propotional term) bn = [] sbn = [] #calc r r = 0 for j in range(t-2): r = r + tdata[j][-1]**2 #adding each b for i in range(n): b = 0 sb = 0 for j in range(t-2): b = b + tdata[j][i]*tdata[j][-1] bn.append(b/r) sbn.append((b**2)/r) global og_bn if n == og_n: og_bn = bn ### Printing b values #print("Propotional Coefficient Values: ") #print(bn) #print("") ### Printing rounded off b values #for i in bn: # print(round(i,3), end=" ") #print("") #st calculation stn = [] for i in range(n): st = 0 for j in range(t-2): st = st + tdata[j][i]**2 stn.append(st) #se calculation sen = [] for i in range(n): sen.append(stn[i] - sbn[i]) #ve calculation ven = [] #l value l = t-2 for i in range(n): ve = sen[i]/(l-1) ven.append(ve) #comparing ve and sb to get h(sn ratio) value hn = [] for i in range(n): if sbn[i]>ven[i]: h = (sbn[i]-ven[i])/(r*ven[i]) else: h = 0 hn.append(h) #print(hn) global og_hn if n == og_n:
### Printing h values #print(hn) #print("") ### Printing rounded off h values #for i in hn: # print(round(i,3), end=" ") #print("") #rounding off #for i in range(n): # bn[i] = round(bn[i],3) # hn[i] = round(hn[i],3) #print(bn) #print(hn) #computation of integrated estimated strength value M for each data item Mt = [] hsum = 0 for i in hn: hsum = hsum + i for j in range(t-2): m = 0 for i in range(n): m += (hn[i] * tdata[j][i])/bn[i] Mt.append(m/hsum) #M est and actual table creation Mtable = [["Data No.","Measured M","Integrated Estimated M"]] for j in range(t-2): Mtable.append([j,tdata[j][-1],Mt[j]]) ### Printing M change table #for i in Mtable: # print(i) #print("") #propotional equation L calculation lm = 0 for j in range(t-2): lm += tdata[j][-1]*Mt[j] #total variation St calculation stm = 0 for j in range(t-2): stm += Mt[j]**2 #variation of propotional term calculation sbm = (lm**2)/r #error variation calculation sem = stm - sbm #error variance calculation vem = sem/(l-1) ### Integrated SN Ratio Calculation snh = (sbm - vem)/(r*vem) snh = 10*(math.log(snh,10)) #sn.append(round(snh,2)) n = og_n return snh #SN ratio for all cases for k in range(12): sn.append(snr_calc(tally_table[k])) ### Printing integrated SN ratio values #print(sn) ### Printing rounded off integrated SN ratio values #for i in sn: # print(round(i,2),end=" ") #print("") #material wise relative importance calculation # Level 1 = used ; Level 2 = not used global sn_level_table sn_level_table = [["Item/Parameter","Level 1","Level 2"]] for i in range(n): l1 = 0 l2 = 0 for j in range(12): if tally_table[j][i]==1: l1 += sn[j] else: l2 += sn[j] sn_level_table.append([i+1,l1/6,l2/6]) # SN Percentage Change Table global sn_change_table sn_change_table = [["Item/Parameter", "SN Ratio % Change"]] for i in range(n): sn_pchange = ((sn_level_table[i+1][2]-sn_level_table[i+1][1])/sn_level_table[i+1][2])*100 sn_change_table.append([i+1, sn_pchange]) ### Printing Level Table (SN Ratio Change) for i in range(n): for j in range(2): sn_level_table[i+1][j+1]=round(sn_level_table[i+1][j+1],2) for i in sn_level_table: print(i) ### CASE 1 # Printing SN ( h ) Values when all materials used #print(og_hn) #global og_hn case1_index = [] for i in range(n): if og_hn[i]>0: case1_index.append(i+1) case1_tally = [2,2,2,2,2,2,2,2,2,2,2] for i in case1_index: case1_tally[i-1]=1 #print(case1_index) ### CASE 2 case2_index = [] ''' for i in range(1,n+1): if sn_level_table[i][1]>sn_level_table[i][2]: case2_index.append(i) ''' for i in range(1,n+1): if sn_change_table[i][1]>1: case2_index.append(i) case2_tally = [2,2,2,2,2,2,2,2,2,2,2] for i in case2_index: case2_tally[i-1]=1 #print(case2_index) # Finding Useful Parameters global useful_parameters useful_parameters = [value for value in case1_index if value in case2_index] #useful_parameters.pop(2) #print(useful_parameters) # Printing original b values when all materials used #print(og_bn) # Computing integrated estimated M value under optimum conditions #global og_bn Mt_o = [] hsum_o = 0 for i in useful_parameters: hsum_o += og_hn[i-1] for j in range(t-2): M_o = 0 for i in useful_parameters: M_o += (og_hn[i-1]*nsdata[j][i-1])/og_bn[i-1] Mt_o.append(M_o/hsum_o) #print(Mt_o) global Mt_o_table Mt_o_table = [["Data No.", "Measured Value M", "Intergrated Estimate Value M"]] for j in range(t-2): Mt_o_table.append([j+1, round(nsdata[j][-1],2), round(Mt_o[j],2)]) # Printing Measured M value and integrated estimate M value in case 2 for i in Mt_o_table: print(i) #Printing Comparision of the Integrated Estimate SN Ratio for both Cases global sn_case_table sn_case_table = [["Case", "Used Items/Parameters", "Integrated Estimate SN Ratio (dB)"]] sn_case_table.append([1, case1_index, snr_calc(case1_tally)]) sn_case_table.append([2, useful_parameters, snr_calc(case2_tally)]) for i in sn_case_table: print(i) #input_data.withdraw() graph_data = [] for i in range(len(sn_level_table[1:])): sn_data = {"Level": ["L 1", "L 2"], "SN Ratio": [sn_level_table[i+1][1],sn_level_table[i+1][2]]} #"Material": [level_data[i+1][0]], graph_data.append(sn_data) plot_data = Toplevel(main_window) plot_data.title("Plotting Data") fig, axs = plt.subplots(1, len(sn_level_table[1:]), sharey=True, figsize=(15,5)) fig.suptitle('Parameter Wise SN Ratio Change') graph = FigureCanvasTkAgg(fig, plot_data) graph.get_tk_widget().pack() #side=tk.LEFT, fill=tk.BOTH #df1 = df1.groupby('Parameter and Level').sum() #axs = plt.subplots(1, len(level_data)-1, sharey=True) for i in range(len(sn_level_table[1:])): df = pd.DataFrame(graph_data[i], columns=["Level","SN Ratio"]) df = df.groupby("Level").sum() c = "" if sn_change_table[i+1][1]>2 and og_hn[i]>0: c = 'g' elif sn_change_table[i+1][1]>0 and og_hn[i]>0: c = 'y' else: c = 'r' df.plot(ax=axs[i], color=c, marker='o', fontsize=10) #axs[i].xaxis.set_major_locator(MultipleLocator(2)) axs[i].set_title("Parameter "+str(i+1)) def edit_data(): input_data.deiconify() plot_data.destroy() if d==0: edit_data_btn = Button(plot_data,text='Edit Data',command=edit_data) edit_data_btn.pack() ### Predicting Window def predict_input(): prediction_input = Toplevel(main_window) prediction_input.title("Enter Data to Predict Feature") parameter_vars = [] for i in range(int(n)): state = IntVar() select_parameter = Checkbutton(prediction_input, text = "Parameter "+str(i+1), variable=state) select_parameter.grid(row=0, column=i+1) parameter_vars.append(state) if i+1 in useful_parameters: select_parameter.select() feature_name = Label(prediction_input, text="Feature") feature_name.grid(row=0, column=int(n)+1) feature_prediction = Text(prediction_input, height=1,width=15) feature_prediction.grid(row=1, column=int(n)+1) def predict(): global useful_parameters predict_parameters = [] for i in range(len(useful_parameters)): predict_parameters.append(useful_parameters[i]) for i in range(int(n)): if parameter_vars[i].get()==1 and i+1 not in predict_parameters: predict_parameters.append(i+1) global predict_data #for i in range(len(predict_data)): predict_data = [23.77,22.25,15.40,25.67,7.00,2.21,3.70] #predict_data = [17.44,21.71,24.58,23.84,7.00,1.74,3.70] global us for i in range(len(predict_data)): predict_data[i] = predict_data[i] - us[i] M_p = 0 hsum_p = 0 for i in predict_parameters: hsum_p += og_hn[i-1] for i in predict_parameters: M_p += (og_hn[i-1]*predict_data[i-1])/og_bn[i-1] M_p = M_p/hsum_p M_predicted = us[-1]+M_p feature_prediction.delete(END) feature_prediction.insert(END, M_predicted) predict_button = Button(prediction_input, text="Predict", command=predict) predict_button.grid(row=1, column=0) #predict_more = Button(prediction_input, text="Clear Values", command=None) #predict_more.grid(row=1, column=0) global predict_data predict_data = [] for i in range(int(n)): predict_entry = Entry(prediction_input) predict_entry.grid(row=1, column=i+1) predict_data.append(predict_entry) def closeall(): prediction_input.quit() #plot_data.destroy() main_window.quit() prediction_input.protocol("WM WM_DELETE_WINDOW", closeall) prediction_input.mainloop() #plot_data.withdraw() predict_btn = Button(plot_data,text='Predict',command=predict_input) predict_btn.pack() def quit(): #sys.exit() plot_data.destroy() main_window.quit() #print(1) plot_data.protocol("WM_DELETE_WINDOW", quit) plot_data.mainloop() # input_data.withdraw() # print(1) def enter_data_details_window(): main_window.withdraw() data_details = Toplevel(main_window) data_details.title("Enter Data Details") parameter_label = Label(data_details, text="Enter Number of Parameters:") parameter_label.grid(row=0, column=0) parameter_entry = Entry(data_details) parameter_entry.grid(row=0, column=1) testcase_label = Label(data_details, text="Enter Number of Test Cases:") testcase_label.grid(row=1, column=0) testcase_entry = Entry(data_details) testcase_entry.grid(row=1, column=1) def input_data_window(): data_details.withdraw() global n n = parameter_entry.get() global t t = testcase_entry.get() #print(n,t) input_data = Toplevel(data_details) input_data.title("Input Data") #indentation run func def edit_details(): MsgBox = tkinter.messagebox.askquestion('Go Back','You will lose the input data, do you want to continue?',icon = 'warning') if MsgBox == 'yes': data_details.deiconify() input_data.destroy() edit_data_details = Button(input_data, text="Edit Data Details", command=edit_details) edit_data_details.grid(row=0, column=0) for i in range(int(n)): header = Label(input_data, text="Parameter "+str(i+1)) header.grid(row=0, column=i+1) feature_label = Label(input_data, text="Feature") feature_label.grid(row=0, column=int(n)+1) for j in range(int(t)): index = Label(input_data, text="Test Case "+str(j+1)) index.grid(row=j+1, column=0) # global dataset dataset = [] for j in range(int(t)): test_case_data=[] for i in range(int(n)+1): data_entry = Entry(input_data) data_entry.grid(row=j+1, column=i+1) test_case_data.append(data_entry) dataset.append(test_case_data) make_calculations = Button(input_data, text="Run", command=run(data=None, d=0)) make_calculations.grid(row=int(t)+2,column=int(n)+3) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() input_data.protocol("WM_DELETE_WINDOW", quitconfirm) input_data.mainloop() input_data_button = Button(data_details, text="Input Data", command=input_data_window) input_data_button.grid(row=2, column=1) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() data_details.protocol("WM_DELETE_WINDOW", quitconfirm) data_details.mainloop() Data_Input = Button(main_window, text="Enter Data Set", command=enter_data_details_window) Data_Input.grid(row=0, column=0, padx=90, pady=20) # This function will be used to open # file in read mode and only Python files # will be opened def load_data(): data_file_src = askopenfile(mode ='r', filetypes =[('CSV Files', '*.csv')]) #print(data_file) #global dataset data_file = csv.reader(data_file_src) data_load = [] for row in data_file: data_load.append(row) for j in dataload[1:]: for i in j: i = float(i) run(dataload, 1) main_window.withdraw() ### To Show Loaded Data #print(dataset) #if data_file is not None: #content = open(data_file, "r") #data = content.readlines() #content = pd.read_csv(data_file) #content.to_numpy() #print(content) #print(data) Data_Load = Button(main_window, text ='Load Data and Run', command = lambda:load_data()) Data_Load.grid(row=1, column=0, padx=90, pady=20) #main_window.filename = filedialog.askopenfilename(initialdir="/", title="Select A File", filetypes=(("jpg files", "*.jpg"),("all files", "*.*"))) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() main_window.protocol("WM_DELETE_WINDOW", quitconfirm) main_window.mainloop() #print(n,t)
og_hn = hn
conditional_block
Taguchi_Final_v0.0.py
# Taguchi Method Final from tkinter import * import pandas as pd import math import copy import tkinter.messagebox import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from tkinter.filedialog import askopenfile import csv main_window = Tk() #Heading = Label(main_window, text = "MaTrix") #Heading.grid(row=0,column=1) n = 0 t = 0
og_hn = [] sn_level_table = [] sn_change_table = [] Mt_o_table = [] useful_parameters = [] sn_case_table = [] predict_data = [] dataset = [] #def reset_values(): # n = 0 # t = 0 # us = [] # sn = [] # og_n = 0 # og_t = 0 # og_bn = [] # og_hn = [] # sn_level_table = [] # sn_change_table = [] # Mt_o_table = [] # useful_parameters = [] # sn_case_table = [] # predict_data = [] #tdata = [] main_window.title("MADEIN MaTrix") def run(data, d): #global tdata global n global t n = int(n) t = int(t) #print(n,t) #print(dataset[0][0].get()) global dataset #global data_file if d==0: for j in range(t): for i in range(n+1): dataset[j][i] = int(dataset[j][i].get()) else: dataset = data[1:] # for i in range(n+1): # tdata[j][i]=dataset[j][i].get() tdata = [] tdata = copy.deepcopy(dataset) #print(tdata) tally_table = [[1,1,1,1,1,1,1,1,1,1,1], [1,1,1,1,1,2,2,2,2,2,2], [1,1,2,2,2,1,1,1,2,2,2], [1,2,1,2,2,1,2,2,1,1,2], [1,2,2,1,2,2,1,2,1,2,1], [1,2,2,2,1,2,2,1,2,1,1], [2,1,2,2,1,1,2,2,1,2,1], [2,1,2,1,2,2,2,1,1,1,2], [2,1,1,2,2,2,1,2,2,1,1], [2,2,2,1,1,1,1,2,2,1,2], [2,2,1,2,1,2,1,1,1,2,2], [2,2,1,1,2,1,2,1,2,2,1]] sample_data =[[34.27,7.10,20.08,24.30,9.48,1.17,3.60,49.77], [26.78,21.71,15.23,23.84,7.00,1.74,3.70,53.73], [17.01,26.04,19.65,23.16,9.41,1.12,3.60,54.10], [23.77,22.25,15.40,25.67,7.00,2.21,3.70,54.29], [22.11,21.71,19.91,23.84,7.00,1.74,3.70,56.27], [22.14,30.49,11.15,23.88,7.00,1.74,3.60,56.45], [22.11,21.71,19.91,23.84,7.00,1.74,3.70,59.14], [20.81,21.05,19.25,26.56,7.00,1.63,3.69,59.89], [12.18,31.64,19.91,23.84,7.00,1.74,3.70,60.59], [19.66,23.15,21.35,22.75,7.00,2.37,3.71,61.51]] #test_data1 = [[6,15,0.5,0,0,0,10,0.57], # [6,15,0.5,3,5,5,13,0.43], # [6,25,2.5,0,0,5,13,0.51], # [6,25,2.5,3,5,0,10,0.51], # [10,15,2.5,0,5,0,13,0.51], # [10,15,2.5,3,0,5,10,0.55], # [10,25,0.5,0,5,5,10,0.5]] tdata = copy.deepcopy(sample_data) n = 7 t = 10 #adding parameter(strength in this case) to new list z = [] for j in range(t): z.append(tdata[j][-1]) #computing avg zavg = sum(z)/len(z) zsub = [] #finding two test case parameter values(strength) closest to avg for j in range(t): zsub.append([j,abs(z[j]-zavg)]) zsub = sorted(zsub, key = lambda a:a[1]) close2 = [zsub[0][0], zsub[1][0]] #creating unit space global us us = [] for i in range(n+1): us.append((tdata[zsub[0][0]][i]+tdata[zsub[1][0]][i])/2) #print(us) deleted_testcase_index = [zsub[0][0],zsub[1][0]] tdata.pop(zsub[0][0]) tdata.pop(zsub[1][0]) #testdata is now signal data for j in range(t-2): for i in range(n+1): tdata[j][i] = tdata[j][i] - us[i] #testdata is now normalized signal data nsdata = copy.deepcopy(tdata) global sn sn = [] global og_n og_n = n global og_bn og_bn = [] global og_hn og_hn = [] #SN Ratio Calculation Function def snr_calc(mat_used): global n global t global og_n tdata = copy.deepcopy(nsdata) i_c = 0 for i in range(n-1,-1,-1): if mat_used[i]==2: i_c += 1 for j in range(t-2): tdata[j].pop(i) n = n - i_c #Printing current rounded off tdata #for j in tdata: # for i in j: # print(round(i,2),end = " ") # print("") #b and sb calculation (propotional coefficient and variation of propotional term) bn = [] sbn = [] #calc r r = 0 for j in range(t-2): r = r + tdata[j][-1]**2 #adding each b for i in range(n): b = 0 sb = 0 for j in range(t-2): b = b + tdata[j][i]*tdata[j][-1] bn.append(b/r) sbn.append((b**2)/r) global og_bn if n == og_n: og_bn = bn ### Printing b values #print("Propotional Coefficient Values: ") #print(bn) #print("") ### Printing rounded off b values #for i in bn: # print(round(i,3), end=" ") #print("") #st calculation stn = [] for i in range(n): st = 0 for j in range(t-2): st = st + tdata[j][i]**2 stn.append(st) #se calculation sen = [] for i in range(n): sen.append(stn[i] - sbn[i]) #ve calculation ven = [] #l value l = t-2 for i in range(n): ve = sen[i]/(l-1) ven.append(ve) #comparing ve and sb to get h(sn ratio) value hn = [] for i in range(n): if sbn[i]>ven[i]: h = (sbn[i]-ven[i])/(r*ven[i]) else: h = 0 hn.append(h) #print(hn) global og_hn if n == og_n: og_hn = hn ### Printing h values #print(hn) #print("") ### Printing rounded off h values #for i in hn: # print(round(i,3), end=" ") #print("") #rounding off #for i in range(n): # bn[i] = round(bn[i],3) # hn[i] = round(hn[i],3) #print(bn) #print(hn) #computation of integrated estimated strength value M for each data item Mt = [] hsum = 0 for i in hn: hsum = hsum + i for j in range(t-2): m = 0 for i in range(n): m += (hn[i] * tdata[j][i])/bn[i] Mt.append(m/hsum) #M est and actual table creation Mtable = [["Data No.","Measured M","Integrated Estimated M"]] for j in range(t-2): Mtable.append([j,tdata[j][-1],Mt[j]]) ### Printing M change table #for i in Mtable: # print(i) #print("") #propotional equation L calculation lm = 0 for j in range(t-2): lm += tdata[j][-1]*Mt[j] #total variation St calculation stm = 0 for j in range(t-2): stm += Mt[j]**2 #variation of propotional term calculation sbm = (lm**2)/r #error variation calculation sem = stm - sbm #error variance calculation vem = sem/(l-1) ### Integrated SN Ratio Calculation snh = (sbm - vem)/(r*vem) snh = 10*(math.log(snh,10)) #sn.append(round(snh,2)) n = og_n return snh #SN ratio for all cases for k in range(12): sn.append(snr_calc(tally_table[k])) ### Printing integrated SN ratio values #print(sn) ### Printing rounded off integrated SN ratio values #for i in sn: # print(round(i,2),end=" ") #print("") #material wise relative importance calculation # Level 1 = used ; Level 2 = not used global sn_level_table sn_level_table = [["Item/Parameter","Level 1","Level 2"]] for i in range(n): l1 = 0 l2 = 0 for j in range(12): if tally_table[j][i]==1: l1 += sn[j] else: l2 += sn[j] sn_level_table.append([i+1,l1/6,l2/6]) # SN Percentage Change Table global sn_change_table sn_change_table = [["Item/Parameter", "SN Ratio % Change"]] for i in range(n): sn_pchange = ((sn_level_table[i+1][2]-sn_level_table[i+1][1])/sn_level_table[i+1][2])*100 sn_change_table.append([i+1, sn_pchange]) ### Printing Level Table (SN Ratio Change) for i in range(n): for j in range(2): sn_level_table[i+1][j+1]=round(sn_level_table[i+1][j+1],2) for i in sn_level_table: print(i) ### CASE 1 # Printing SN ( h ) Values when all materials used #print(og_hn) #global og_hn case1_index = [] for i in range(n): if og_hn[i]>0: case1_index.append(i+1) case1_tally = [2,2,2,2,2,2,2,2,2,2,2] for i in case1_index: case1_tally[i-1]=1 #print(case1_index) ### CASE 2 case2_index = [] ''' for i in range(1,n+1): if sn_level_table[i][1]>sn_level_table[i][2]: case2_index.append(i) ''' for i in range(1,n+1): if sn_change_table[i][1]>1: case2_index.append(i) case2_tally = [2,2,2,2,2,2,2,2,2,2,2] for i in case2_index: case2_tally[i-1]=1 #print(case2_index) # Finding Useful Parameters global useful_parameters useful_parameters = [value for value in case1_index if value in case2_index] #useful_parameters.pop(2) #print(useful_parameters) # Printing original b values when all materials used #print(og_bn) # Computing integrated estimated M value under optimum conditions #global og_bn Mt_o = [] hsum_o = 0 for i in useful_parameters: hsum_o += og_hn[i-1] for j in range(t-2): M_o = 0 for i in useful_parameters: M_o += (og_hn[i-1]*nsdata[j][i-1])/og_bn[i-1] Mt_o.append(M_o/hsum_o) #print(Mt_o) global Mt_o_table Mt_o_table = [["Data No.", "Measured Value M", "Intergrated Estimate Value M"]] for j in range(t-2): Mt_o_table.append([j+1, round(nsdata[j][-1],2), round(Mt_o[j],2)]) # Printing Measured M value and integrated estimate M value in case 2 for i in Mt_o_table: print(i) #Printing Comparision of the Integrated Estimate SN Ratio for both Cases global sn_case_table sn_case_table = [["Case", "Used Items/Parameters", "Integrated Estimate SN Ratio (dB)"]] sn_case_table.append([1, case1_index, snr_calc(case1_tally)]) sn_case_table.append([2, useful_parameters, snr_calc(case2_tally)]) for i in sn_case_table: print(i) #input_data.withdraw() graph_data = [] for i in range(len(sn_level_table[1:])): sn_data = {"Level": ["L 1", "L 2"], "SN Ratio": [sn_level_table[i+1][1],sn_level_table[i+1][2]]} #"Material": [level_data[i+1][0]], graph_data.append(sn_data) plot_data = Toplevel(main_window) plot_data.title("Plotting Data") fig, axs = plt.subplots(1, len(sn_level_table[1:]), sharey=True, figsize=(15,5)) fig.suptitle('Parameter Wise SN Ratio Change') graph = FigureCanvasTkAgg(fig, plot_data) graph.get_tk_widget().pack() #side=tk.LEFT, fill=tk.BOTH #df1 = df1.groupby('Parameter and Level').sum() #axs = plt.subplots(1, len(level_data)-1, sharey=True) for i in range(len(sn_level_table[1:])): df = pd.DataFrame(graph_data[i], columns=["Level","SN Ratio"]) df = df.groupby("Level").sum() c = "" if sn_change_table[i+1][1]>2 and og_hn[i]>0: c = 'g' elif sn_change_table[i+1][1]>0 and og_hn[i]>0: c = 'y' else: c = 'r' df.plot(ax=axs[i], color=c, marker='o', fontsize=10) #axs[i].xaxis.set_major_locator(MultipleLocator(2)) axs[i].set_title("Parameter "+str(i+1)) def edit_data(): input_data.deiconify() plot_data.destroy() if d==0: edit_data_btn = Button(plot_data,text='Edit Data',command=edit_data) edit_data_btn.pack() ### Predicting Window def predict_input(): prediction_input = Toplevel(main_window) prediction_input.title("Enter Data to Predict Feature") parameter_vars = [] for i in range(int(n)): state = IntVar() select_parameter = Checkbutton(prediction_input, text = "Parameter "+str(i+1), variable=state) select_parameter.grid(row=0, column=i+1) parameter_vars.append(state) if i+1 in useful_parameters: select_parameter.select() feature_name = Label(prediction_input, text="Feature") feature_name.grid(row=0, column=int(n)+1) feature_prediction = Text(prediction_input, height=1,width=15) feature_prediction.grid(row=1, column=int(n)+1) def predict(): global useful_parameters predict_parameters = [] for i in range(len(useful_parameters)): predict_parameters.append(useful_parameters[i]) for i in range(int(n)): if parameter_vars[i].get()==1 and i+1 not in predict_parameters: predict_parameters.append(i+1) global predict_data #for i in range(len(predict_data)): predict_data = [23.77,22.25,15.40,25.67,7.00,2.21,3.70] #predict_data = [17.44,21.71,24.58,23.84,7.00,1.74,3.70] global us for i in range(len(predict_data)): predict_data[i] = predict_data[i] - us[i] M_p = 0 hsum_p = 0 for i in predict_parameters: hsum_p += og_hn[i-1] for i in predict_parameters: M_p += (og_hn[i-1]*predict_data[i-1])/og_bn[i-1] M_p = M_p/hsum_p M_predicted = us[-1]+M_p feature_prediction.delete(END) feature_prediction.insert(END, M_predicted) predict_button = Button(prediction_input, text="Predict", command=predict) predict_button.grid(row=1, column=0) #predict_more = Button(prediction_input, text="Clear Values", command=None) #predict_more.grid(row=1, column=0) global predict_data predict_data = [] for i in range(int(n)): predict_entry = Entry(prediction_input) predict_entry.grid(row=1, column=i+1) predict_data.append(predict_entry) def closeall(): prediction_input.quit() #plot_data.destroy() main_window.quit() prediction_input.protocol("WM WM_DELETE_WINDOW", closeall) prediction_input.mainloop() #plot_data.withdraw() predict_btn = Button(plot_data,text='Predict',command=predict_input) predict_btn.pack() def quit(): #sys.exit() plot_data.destroy() main_window.quit() #print(1) plot_data.protocol("WM_DELETE_WINDOW", quit) plot_data.mainloop() # input_data.withdraw() # print(1) def enter_data_details_window(): main_window.withdraw() data_details = Toplevel(main_window) data_details.title("Enter Data Details") parameter_label = Label(data_details, text="Enter Number of Parameters:") parameter_label.grid(row=0, column=0) parameter_entry = Entry(data_details) parameter_entry.grid(row=0, column=1) testcase_label = Label(data_details, text="Enter Number of Test Cases:") testcase_label.grid(row=1, column=0) testcase_entry = Entry(data_details) testcase_entry.grid(row=1, column=1) def input_data_window(): data_details.withdraw() global n n = parameter_entry.get() global t t = testcase_entry.get() #print(n,t) input_data = Toplevel(data_details) input_data.title("Input Data") #indentation run func def edit_details(): MsgBox = tkinter.messagebox.askquestion('Go Back','You will lose the input data, do you want to continue?',icon = 'warning') if MsgBox == 'yes': data_details.deiconify() input_data.destroy() edit_data_details = Button(input_data, text="Edit Data Details", command=edit_details) edit_data_details.grid(row=0, column=0) for i in range(int(n)): header = Label(input_data, text="Parameter "+str(i+1)) header.grid(row=0, column=i+1) feature_label = Label(input_data, text="Feature") feature_label.grid(row=0, column=int(n)+1) for j in range(int(t)): index = Label(input_data, text="Test Case "+str(j+1)) index.grid(row=j+1, column=0) # global dataset dataset = [] for j in range(int(t)): test_case_data=[] for i in range(int(n)+1): data_entry = Entry(input_data) data_entry.grid(row=j+1, column=i+1) test_case_data.append(data_entry) dataset.append(test_case_data) make_calculations = Button(input_data, text="Run", command=run(data=None, d=0)) make_calculations.grid(row=int(t)+2,column=int(n)+3) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() input_data.protocol("WM_DELETE_WINDOW", quitconfirm) input_data.mainloop() input_data_button = Button(data_details, text="Input Data", command=input_data_window) input_data_button.grid(row=2, column=1) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() data_details.protocol("WM_DELETE_WINDOW", quitconfirm) data_details.mainloop() Data_Input = Button(main_window, text="Enter Data Set", command=enter_data_details_window) Data_Input.grid(row=0, column=0, padx=90, pady=20) # This function will be used to open # file in read mode and only Python files # will be opened def load_data(): data_file_src = askopenfile(mode ='r', filetypes =[('CSV Files', '*.csv')]) #print(data_file) #global dataset data_file = csv.reader(data_file_src) data_load = [] for row in data_file: data_load.append(row) for j in dataload[1:]: for i in j: i = float(i) run(dataload, 1) main_window.withdraw() ### To Show Loaded Data #print(dataset) #if data_file is not None: #content = open(data_file, "r") #data = content.readlines() #content = pd.read_csv(data_file) #content.to_numpy() #print(content) #print(data) Data_Load = Button(main_window, text ='Load Data and Run', command = lambda:load_data()) Data_Load.grid(row=1, column=0, padx=90, pady=20) #main_window.filename = filedialog.askopenfilename(initialdir="/", title="Select A File", filetypes=(("jpg files", "*.jpg"),("all files", "*.*"))) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() main_window.protocol("WM_DELETE_WINDOW", quitconfirm) main_window.mainloop() #print(n,t)
us = [] sn = [] og_n = 0 og_t = 0 og_bn = []
random_line_split
Taguchi_Final_v0.0.py
# Taguchi Method Final from tkinter import * import pandas as pd import math import copy import tkinter.messagebox import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from tkinter.filedialog import askopenfile import csv main_window = Tk() #Heading = Label(main_window, text = "MaTrix") #Heading.grid(row=0,column=1) n = 0 t = 0 us = [] sn = [] og_n = 0 og_t = 0 og_bn = [] og_hn = [] sn_level_table = [] sn_change_table = [] Mt_o_table = [] useful_parameters = [] sn_case_table = [] predict_data = [] dataset = [] #def reset_values(): # n = 0 # t = 0 # us = [] # sn = [] # og_n = 0 # og_t = 0 # og_bn = [] # og_hn = [] # sn_level_table = [] # sn_change_table = [] # Mt_o_table = [] # useful_parameters = [] # sn_case_table = [] # predict_data = [] #tdata = [] main_window.title("MADEIN MaTrix") def run(data, d): #global tdata global n global t n = int(n) t = int(t) #print(n,t) #print(dataset[0][0].get()) global dataset #global data_file if d==0: for j in range(t): for i in range(n+1): dataset[j][i] = int(dataset[j][i].get()) else: dataset = data[1:] # for i in range(n+1): # tdata[j][i]=dataset[j][i].get() tdata = [] tdata = copy.deepcopy(dataset) #print(tdata) tally_table = [[1,1,1,1,1,1,1,1,1,1,1], [1,1,1,1,1,2,2,2,2,2,2], [1,1,2,2,2,1,1,1,2,2,2], [1,2,1,2,2,1,2,2,1,1,2], [1,2,2,1,2,2,1,2,1,2,1], [1,2,2,2,1,2,2,1,2,1,1], [2,1,2,2,1,1,2,2,1,2,1], [2,1,2,1,2,2,2,1,1,1,2], [2,1,1,2,2,2,1,2,2,1,1], [2,2,2,1,1,1,1,2,2,1,2], [2,2,1,2,1,2,1,1,1,2,2], [2,2,1,1,2,1,2,1,2,2,1]] sample_data =[[34.27,7.10,20.08,24.30,9.48,1.17,3.60,49.77], [26.78,21.71,15.23,23.84,7.00,1.74,3.70,53.73], [17.01,26.04,19.65,23.16,9.41,1.12,3.60,54.10], [23.77,22.25,15.40,25.67,7.00,2.21,3.70,54.29], [22.11,21.71,19.91,23.84,7.00,1.74,3.70,56.27], [22.14,30.49,11.15,23.88,7.00,1.74,3.60,56.45], [22.11,21.71,19.91,23.84,7.00,1.74,3.70,59.14], [20.81,21.05,19.25,26.56,7.00,1.63,3.69,59.89], [12.18,31.64,19.91,23.84,7.00,1.74,3.70,60.59], [19.66,23.15,21.35,22.75,7.00,2.37,3.71,61.51]] #test_data1 = [[6,15,0.5,0,0,0,10,0.57], # [6,15,0.5,3,5,5,13,0.43], # [6,25,2.5,0,0,5,13,0.51], # [6,25,2.5,3,5,0,10,0.51], # [10,15,2.5,0,5,0,13,0.51], # [10,15,2.5,3,0,5,10,0.55], # [10,25,0.5,0,5,5,10,0.5]] tdata = copy.deepcopy(sample_data) n = 7 t = 10 #adding parameter(strength in this case) to new list z = [] for j in range(t): z.append(tdata[j][-1]) #computing avg zavg = sum(z)/len(z) zsub = [] #finding two test case parameter values(strength) closest to avg for j in range(t): zsub.append([j,abs(z[j]-zavg)]) zsub = sorted(zsub, key = lambda a:a[1]) close2 = [zsub[0][0], zsub[1][0]] #creating unit space global us us = [] for i in range(n+1): us.append((tdata[zsub[0][0]][i]+tdata[zsub[1][0]][i])/2) #print(us) deleted_testcase_index = [zsub[0][0],zsub[1][0]] tdata.pop(zsub[0][0]) tdata.pop(zsub[1][0]) #testdata is now signal data for j in range(t-2): for i in range(n+1): tdata[j][i] = tdata[j][i] - us[i] #testdata is now normalized signal data nsdata = copy.deepcopy(tdata) global sn sn = [] global og_n og_n = n global og_bn og_bn = [] global og_hn og_hn = [] #SN Ratio Calculation Function def snr_calc(mat_used): global n global t global og_n tdata = copy.deepcopy(nsdata) i_c = 0 for i in range(n-1,-1,-1): if mat_used[i]==2: i_c += 1 for j in range(t-2): tdata[j].pop(i) n = n - i_c #Printing current rounded off tdata #for j in tdata: # for i in j: # print(round(i,2),end = " ") # print("") #b and sb calculation (propotional coefficient and variation of propotional term) bn = [] sbn = [] #calc r r = 0 for j in range(t-2): r = r + tdata[j][-1]**2 #adding each b for i in range(n): b = 0 sb = 0 for j in range(t-2): b = b + tdata[j][i]*tdata[j][-1] bn.append(b/r) sbn.append((b**2)/r) global og_bn if n == og_n: og_bn = bn ### Printing b values #print("Propotional Coefficient Values: ") #print(bn) #print("") ### Printing rounded off b values #for i in bn: # print(round(i,3), end=" ") #print("") #st calculation stn = [] for i in range(n): st = 0 for j in range(t-2): st = st + tdata[j][i]**2 stn.append(st) #se calculation sen = [] for i in range(n): sen.append(stn[i] - sbn[i]) #ve calculation ven = [] #l value l = t-2 for i in range(n): ve = sen[i]/(l-1) ven.append(ve) #comparing ve and sb to get h(sn ratio) value hn = [] for i in range(n): if sbn[i]>ven[i]: h = (sbn[i]-ven[i])/(r*ven[i]) else: h = 0 hn.append(h) #print(hn) global og_hn if n == og_n: og_hn = hn ### Printing h values #print(hn) #print("") ### Printing rounded off h values #for i in hn: # print(round(i,3), end=" ") #print("") #rounding off #for i in range(n): # bn[i] = round(bn[i],3) # hn[i] = round(hn[i],3) #print(bn) #print(hn) #computation of integrated estimated strength value M for each data item Mt = [] hsum = 0 for i in hn: hsum = hsum + i for j in range(t-2): m = 0 for i in range(n): m += (hn[i] * tdata[j][i])/bn[i] Mt.append(m/hsum) #M est and actual table creation Mtable = [["Data No.","Measured M","Integrated Estimated M"]] for j in range(t-2): Mtable.append([j,tdata[j][-1],Mt[j]]) ### Printing M change table #for i in Mtable: # print(i) #print("") #propotional equation L calculation lm = 0 for j in range(t-2): lm += tdata[j][-1]*Mt[j] #total variation St calculation stm = 0 for j in range(t-2): stm += Mt[j]**2 #variation of propotional term calculation sbm = (lm**2)/r #error variation calculation sem = stm - sbm #error variance calculation vem = sem/(l-1) ### Integrated SN Ratio Calculation snh = (sbm - vem)/(r*vem) snh = 10*(math.log(snh,10)) #sn.append(round(snh,2)) n = og_n return snh #SN ratio for all cases for k in range(12): sn.append(snr_calc(tally_table[k])) ### Printing integrated SN ratio values #print(sn) ### Printing rounded off integrated SN ratio values #for i in sn: # print(round(i,2),end=" ") #print("") #material wise relative importance calculation # Level 1 = used ; Level 2 = not used global sn_level_table sn_level_table = [["Item/Parameter","Level 1","Level 2"]] for i in range(n): l1 = 0 l2 = 0 for j in range(12): if tally_table[j][i]==1: l1 += sn[j] else: l2 += sn[j] sn_level_table.append([i+1,l1/6,l2/6]) # SN Percentage Change Table global sn_change_table sn_change_table = [["Item/Parameter", "SN Ratio % Change"]] for i in range(n): sn_pchange = ((sn_level_table[i+1][2]-sn_level_table[i+1][1])/sn_level_table[i+1][2])*100 sn_change_table.append([i+1, sn_pchange]) ### Printing Level Table (SN Ratio Change) for i in range(n): for j in range(2): sn_level_table[i+1][j+1]=round(sn_level_table[i+1][j+1],2) for i in sn_level_table: print(i) ### CASE 1 # Printing SN ( h ) Values when all materials used #print(og_hn) #global og_hn case1_index = [] for i in range(n): if og_hn[i]>0: case1_index.append(i+1) case1_tally = [2,2,2,2,2,2,2,2,2,2,2] for i in case1_index: case1_tally[i-1]=1 #print(case1_index) ### CASE 2 case2_index = [] ''' for i in range(1,n+1): if sn_level_table[i][1]>sn_level_table[i][2]: case2_index.append(i) ''' for i in range(1,n+1): if sn_change_table[i][1]>1: case2_index.append(i) case2_tally = [2,2,2,2,2,2,2,2,2,2,2] for i in case2_index: case2_tally[i-1]=1 #print(case2_index) # Finding Useful Parameters global useful_parameters useful_parameters = [value for value in case1_index if value in case2_index] #useful_parameters.pop(2) #print(useful_parameters) # Printing original b values when all materials used #print(og_bn) # Computing integrated estimated M value under optimum conditions #global og_bn Mt_o = [] hsum_o = 0 for i in useful_parameters: hsum_o += og_hn[i-1] for j in range(t-2): M_o = 0 for i in useful_parameters: M_o += (og_hn[i-1]*nsdata[j][i-1])/og_bn[i-1] Mt_o.append(M_o/hsum_o) #print(Mt_o) global Mt_o_table Mt_o_table = [["Data No.", "Measured Value M", "Intergrated Estimate Value M"]] for j in range(t-2): Mt_o_table.append([j+1, round(nsdata[j][-1],2), round(Mt_o[j],2)]) # Printing Measured M value and integrated estimate M value in case 2 for i in Mt_o_table: print(i) #Printing Comparision of the Integrated Estimate SN Ratio for both Cases global sn_case_table sn_case_table = [["Case", "Used Items/Parameters", "Integrated Estimate SN Ratio (dB)"]] sn_case_table.append([1, case1_index, snr_calc(case1_tally)]) sn_case_table.append([2, useful_parameters, snr_calc(case2_tally)]) for i in sn_case_table: print(i) #input_data.withdraw() graph_data = [] for i in range(len(sn_level_table[1:])): sn_data = {"Level": ["L 1", "L 2"], "SN Ratio": [sn_level_table[i+1][1],sn_level_table[i+1][2]]} #"Material": [level_data[i+1][0]], graph_data.append(sn_data) plot_data = Toplevel(main_window) plot_data.title("Plotting Data") fig, axs = plt.subplots(1, len(sn_level_table[1:]), sharey=True, figsize=(15,5)) fig.suptitle('Parameter Wise SN Ratio Change') graph = FigureCanvasTkAgg(fig, plot_data) graph.get_tk_widget().pack() #side=tk.LEFT, fill=tk.BOTH #df1 = df1.groupby('Parameter and Level').sum() #axs = plt.subplots(1, len(level_data)-1, sharey=True) for i in range(len(sn_level_table[1:])): df = pd.DataFrame(graph_data[i], columns=["Level","SN Ratio"]) df = df.groupby("Level").sum() c = "" if sn_change_table[i+1][1]>2 and og_hn[i]>0: c = 'g' elif sn_change_table[i+1][1]>0 and og_hn[i]>0: c = 'y' else: c = 'r' df.plot(ax=axs[i], color=c, marker='o', fontsize=10) #axs[i].xaxis.set_major_locator(MultipleLocator(2)) axs[i].set_title("Parameter "+str(i+1)) def edit_data(): input_data.deiconify() plot_data.destroy() if d==0: edit_data_btn = Button(plot_data,text='Edit Data',command=edit_data) edit_data_btn.pack() ### Predicting Window def predict_input(): prediction_input = Toplevel(main_window) prediction_input.title("Enter Data to Predict Feature") parameter_vars = [] for i in range(int(n)): state = IntVar() select_parameter = Checkbutton(prediction_input, text = "Parameter "+str(i+1), variable=state) select_parameter.grid(row=0, column=i+1) parameter_vars.append(state) if i+1 in useful_parameters: select_parameter.select() feature_name = Label(prediction_input, text="Feature") feature_name.grid(row=0, column=int(n)+1) feature_prediction = Text(prediction_input, height=1,width=15) feature_prediction.grid(row=1, column=int(n)+1) def predict(): global useful_parameters predict_parameters = [] for i in range(len(useful_parameters)): predict_parameters.append(useful_parameters[i]) for i in range(int(n)): if parameter_vars[i].get()==1 and i+1 not in predict_parameters: predict_parameters.append(i+1) global predict_data #for i in range(len(predict_data)): predict_data = [23.77,22.25,15.40,25.67,7.00,2.21,3.70] #predict_data = [17.44,21.71,24.58,23.84,7.00,1.74,3.70] global us for i in range(len(predict_data)): predict_data[i] = predict_data[i] - us[i] M_p = 0 hsum_p = 0 for i in predict_parameters: hsum_p += og_hn[i-1] for i in predict_parameters: M_p += (og_hn[i-1]*predict_data[i-1])/og_bn[i-1] M_p = M_p/hsum_p M_predicted = us[-1]+M_p feature_prediction.delete(END) feature_prediction.insert(END, M_predicted) predict_button = Button(prediction_input, text="Predict", command=predict) predict_button.grid(row=1, column=0) #predict_more = Button(prediction_input, text="Clear Values", command=None) #predict_more.grid(row=1, column=0) global predict_data predict_data = [] for i in range(int(n)): predict_entry = Entry(prediction_input) predict_entry.grid(row=1, column=i+1) predict_data.append(predict_entry) def closeall(): prediction_input.quit() #plot_data.destroy() main_window.quit() prediction_input.protocol("WM WM_DELETE_WINDOW", closeall) prediction_input.mainloop() #plot_data.withdraw() predict_btn = Button(plot_data,text='Predict',command=predict_input) predict_btn.pack() def quit(): #sys.exit() plot_data.destroy() main_window.quit() #print(1) plot_data.protocol("WM_DELETE_WINDOW", quit) plot_data.mainloop() # input_data.withdraw() # print(1) def enter_data_details_window(): main_window.withdraw() data_details = Toplevel(main_window) data_details.title("Enter Data Details") parameter_label = Label(data_details, text="Enter Number of Parameters:") parameter_label.grid(row=0, column=0) parameter_entry = Entry(data_details) parameter_entry.grid(row=0, column=1) testcase_label = Label(data_details, text="Enter Number of Test Cases:") testcase_label.grid(row=1, column=0) testcase_entry = Entry(data_details) testcase_entry.grid(row=1, column=1) def input_data_window():
input_data_button = Button(data_details, text="Input Data", command=input_data_window) input_data_button.grid(row=2, column=1) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() data_details.protocol("WM_DELETE_WINDOW", quitconfirm) data_details.mainloop() Data_Input = Button(main_window, text="Enter Data Set", command=enter_data_details_window) Data_Input.grid(row=0, column=0, padx=90, pady=20) # This function will be used to open # file in read mode and only Python files # will be opened def load_data(): data_file_src = askopenfile(mode ='r', filetypes =[('CSV Files', '*.csv')]) #print(data_file) #global dataset data_file = csv.reader(data_file_src) data_load = [] for row in data_file: data_load.append(row) for j in dataload[1:]: for i in j: i = float(i) run(dataload, 1) main_window.withdraw() ### To Show Loaded Data #print(dataset) #if data_file is not None: #content = open(data_file, "r") #data = content.readlines() #content = pd.read_csv(data_file) #content.to_numpy() #print(content) #print(data) Data_Load = Button(main_window, text ='Load Data and Run', command = lambda:load_data()) Data_Load.grid(row=1, column=0, padx=90, pady=20) #main_window.filename = filedialog.askopenfilename(initialdir="/", title="Select A File", filetypes=(("jpg files", "*.jpg"),("all files", "*.*"))) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() main_window.protocol("WM_DELETE_WINDOW", quitconfirm) main_window.mainloop() #print(n,t)
data_details.withdraw() global n n = parameter_entry.get() global t t = testcase_entry.get() #print(n,t) input_data = Toplevel(data_details) input_data.title("Input Data") #indentation run func def edit_details(): MsgBox = tkinter.messagebox.askquestion('Go Back','You will lose the input data, do you want to continue?',icon = 'warning') if MsgBox == 'yes': data_details.deiconify() input_data.destroy() edit_data_details = Button(input_data, text="Edit Data Details", command=edit_details) edit_data_details.grid(row=0, column=0) for i in range(int(n)): header = Label(input_data, text="Parameter "+str(i+1)) header.grid(row=0, column=i+1) feature_label = Label(input_data, text="Feature") feature_label.grid(row=0, column=int(n)+1) for j in range(int(t)): index = Label(input_data, text="Test Case "+str(j+1)) index.grid(row=j+1, column=0) # global dataset dataset = [] for j in range(int(t)): test_case_data=[] for i in range(int(n)+1): data_entry = Entry(input_data) data_entry.grid(row=j+1, column=i+1) test_case_data.append(data_entry) dataset.append(test_case_data) make_calculations = Button(input_data, text="Run", command=run(data=None, d=0)) make_calculations.grid(row=int(t)+2,column=int(n)+3) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() input_data.protocol("WM_DELETE_WINDOW", quitconfirm) input_data.mainloop()
identifier_body
Taguchi_Final_v0.0.py
# Taguchi Method Final from tkinter import * import pandas as pd import math import copy import tkinter.messagebox import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from tkinter.filedialog import askopenfile import csv main_window = Tk() #Heading = Label(main_window, text = "MaTrix") #Heading.grid(row=0,column=1) n = 0 t = 0 us = [] sn = [] og_n = 0 og_t = 0 og_bn = [] og_hn = [] sn_level_table = [] sn_change_table = [] Mt_o_table = [] useful_parameters = [] sn_case_table = [] predict_data = [] dataset = [] #def reset_values(): # n = 0 # t = 0 # us = [] # sn = [] # og_n = 0 # og_t = 0 # og_bn = [] # og_hn = [] # sn_level_table = [] # sn_change_table = [] # Mt_o_table = [] # useful_parameters = [] # sn_case_table = [] # predict_data = [] #tdata = [] main_window.title("MADEIN MaTrix") def run(data, d): #global tdata global n global t n = int(n) t = int(t) #print(n,t) #print(dataset[0][0].get()) global dataset #global data_file if d==0: for j in range(t): for i in range(n+1): dataset[j][i] = int(dataset[j][i].get()) else: dataset = data[1:] # for i in range(n+1): # tdata[j][i]=dataset[j][i].get() tdata = [] tdata = copy.deepcopy(dataset) #print(tdata) tally_table = [[1,1,1,1,1,1,1,1,1,1,1], [1,1,1,1,1,2,2,2,2,2,2], [1,1,2,2,2,1,1,1,2,2,2], [1,2,1,2,2,1,2,2,1,1,2], [1,2,2,1,2,2,1,2,1,2,1], [1,2,2,2,1,2,2,1,2,1,1], [2,1,2,2,1,1,2,2,1,2,1], [2,1,2,1,2,2,2,1,1,1,2], [2,1,1,2,2,2,1,2,2,1,1], [2,2,2,1,1,1,1,2,2,1,2], [2,2,1,2,1,2,1,1,1,2,2], [2,2,1,1,2,1,2,1,2,2,1]] sample_data =[[34.27,7.10,20.08,24.30,9.48,1.17,3.60,49.77], [26.78,21.71,15.23,23.84,7.00,1.74,3.70,53.73], [17.01,26.04,19.65,23.16,9.41,1.12,3.60,54.10], [23.77,22.25,15.40,25.67,7.00,2.21,3.70,54.29], [22.11,21.71,19.91,23.84,7.00,1.74,3.70,56.27], [22.14,30.49,11.15,23.88,7.00,1.74,3.60,56.45], [22.11,21.71,19.91,23.84,7.00,1.74,3.70,59.14], [20.81,21.05,19.25,26.56,7.00,1.63,3.69,59.89], [12.18,31.64,19.91,23.84,7.00,1.74,3.70,60.59], [19.66,23.15,21.35,22.75,7.00,2.37,3.71,61.51]] #test_data1 = [[6,15,0.5,0,0,0,10,0.57], # [6,15,0.5,3,5,5,13,0.43], # [6,25,2.5,0,0,5,13,0.51], # [6,25,2.5,3,5,0,10,0.51], # [10,15,2.5,0,5,0,13,0.51], # [10,15,2.5,3,0,5,10,0.55], # [10,25,0.5,0,5,5,10,0.5]] tdata = copy.deepcopy(sample_data) n = 7 t = 10 #adding parameter(strength in this case) to new list z = [] for j in range(t): z.append(tdata[j][-1]) #computing avg zavg = sum(z)/len(z) zsub = [] #finding two test case parameter values(strength) closest to avg for j in range(t): zsub.append([j,abs(z[j]-zavg)]) zsub = sorted(zsub, key = lambda a:a[1]) close2 = [zsub[0][0], zsub[1][0]] #creating unit space global us us = [] for i in range(n+1): us.append((tdata[zsub[0][0]][i]+tdata[zsub[1][0]][i])/2) #print(us) deleted_testcase_index = [zsub[0][0],zsub[1][0]] tdata.pop(zsub[0][0]) tdata.pop(zsub[1][0]) #testdata is now signal data for j in range(t-2): for i in range(n+1): tdata[j][i] = tdata[j][i] - us[i] #testdata is now normalized signal data nsdata = copy.deepcopy(tdata) global sn sn = [] global og_n og_n = n global og_bn og_bn = [] global og_hn og_hn = [] #SN Ratio Calculation Function def snr_calc(mat_used): global n global t global og_n tdata = copy.deepcopy(nsdata) i_c = 0 for i in range(n-1,-1,-1): if mat_used[i]==2: i_c += 1 for j in range(t-2): tdata[j].pop(i) n = n - i_c #Printing current rounded off tdata #for j in tdata: # for i in j: # print(round(i,2),end = " ") # print("") #b and sb calculation (propotional coefficient and variation of propotional term) bn = [] sbn = [] #calc r r = 0 for j in range(t-2): r = r + tdata[j][-1]**2 #adding each b for i in range(n): b = 0 sb = 0 for j in range(t-2): b = b + tdata[j][i]*tdata[j][-1] bn.append(b/r) sbn.append((b**2)/r) global og_bn if n == og_n: og_bn = bn ### Printing b values #print("Propotional Coefficient Values: ") #print(bn) #print("") ### Printing rounded off b values #for i in bn: # print(round(i,3), end=" ") #print("") #st calculation stn = [] for i in range(n): st = 0 for j in range(t-2): st = st + tdata[j][i]**2 stn.append(st) #se calculation sen = [] for i in range(n): sen.append(stn[i] - sbn[i]) #ve calculation ven = [] #l value l = t-2 for i in range(n): ve = sen[i]/(l-1) ven.append(ve) #comparing ve and sb to get h(sn ratio) value hn = [] for i in range(n): if sbn[i]>ven[i]: h = (sbn[i]-ven[i])/(r*ven[i]) else: h = 0 hn.append(h) #print(hn) global og_hn if n == og_n: og_hn = hn ### Printing h values #print(hn) #print("") ### Printing rounded off h values #for i in hn: # print(round(i,3), end=" ") #print("") #rounding off #for i in range(n): # bn[i] = round(bn[i],3) # hn[i] = round(hn[i],3) #print(bn) #print(hn) #computation of integrated estimated strength value M for each data item Mt = [] hsum = 0 for i in hn: hsum = hsum + i for j in range(t-2): m = 0 for i in range(n): m += (hn[i] * tdata[j][i])/bn[i] Mt.append(m/hsum) #M est and actual table creation Mtable = [["Data No.","Measured M","Integrated Estimated M"]] for j in range(t-2): Mtable.append([j,tdata[j][-1],Mt[j]]) ### Printing M change table #for i in Mtable: # print(i) #print("") #propotional equation L calculation lm = 0 for j in range(t-2): lm += tdata[j][-1]*Mt[j] #total variation St calculation stm = 0 for j in range(t-2): stm += Mt[j]**2 #variation of propotional term calculation sbm = (lm**2)/r #error variation calculation sem = stm - sbm #error variance calculation vem = sem/(l-1) ### Integrated SN Ratio Calculation snh = (sbm - vem)/(r*vem) snh = 10*(math.log(snh,10)) #sn.append(round(snh,2)) n = og_n return snh #SN ratio for all cases for k in range(12): sn.append(snr_calc(tally_table[k])) ### Printing integrated SN ratio values #print(sn) ### Printing rounded off integrated SN ratio values #for i in sn: # print(round(i,2),end=" ") #print("") #material wise relative importance calculation # Level 1 = used ; Level 2 = not used global sn_level_table sn_level_table = [["Item/Parameter","Level 1","Level 2"]] for i in range(n): l1 = 0 l2 = 0 for j in range(12): if tally_table[j][i]==1: l1 += sn[j] else: l2 += sn[j] sn_level_table.append([i+1,l1/6,l2/6]) # SN Percentage Change Table global sn_change_table sn_change_table = [["Item/Parameter", "SN Ratio % Change"]] for i in range(n): sn_pchange = ((sn_level_table[i+1][2]-sn_level_table[i+1][1])/sn_level_table[i+1][2])*100 sn_change_table.append([i+1, sn_pchange]) ### Printing Level Table (SN Ratio Change) for i in range(n): for j in range(2): sn_level_table[i+1][j+1]=round(sn_level_table[i+1][j+1],2) for i in sn_level_table: print(i) ### CASE 1 # Printing SN ( h ) Values when all materials used #print(og_hn) #global og_hn case1_index = [] for i in range(n): if og_hn[i]>0: case1_index.append(i+1) case1_tally = [2,2,2,2,2,2,2,2,2,2,2] for i in case1_index: case1_tally[i-1]=1 #print(case1_index) ### CASE 2 case2_index = [] ''' for i in range(1,n+1): if sn_level_table[i][1]>sn_level_table[i][2]: case2_index.append(i) ''' for i in range(1,n+1): if sn_change_table[i][1]>1: case2_index.append(i) case2_tally = [2,2,2,2,2,2,2,2,2,2,2] for i in case2_index: case2_tally[i-1]=1 #print(case2_index) # Finding Useful Parameters global useful_parameters useful_parameters = [value for value in case1_index if value in case2_index] #useful_parameters.pop(2) #print(useful_parameters) # Printing original b values when all materials used #print(og_bn) # Computing integrated estimated M value under optimum conditions #global og_bn Mt_o = [] hsum_o = 0 for i in useful_parameters: hsum_o += og_hn[i-1] for j in range(t-2): M_o = 0 for i in useful_parameters: M_o += (og_hn[i-1]*nsdata[j][i-1])/og_bn[i-1] Mt_o.append(M_o/hsum_o) #print(Mt_o) global Mt_o_table Mt_o_table = [["Data No.", "Measured Value M", "Intergrated Estimate Value M"]] for j in range(t-2): Mt_o_table.append([j+1, round(nsdata[j][-1],2), round(Mt_o[j],2)]) # Printing Measured M value and integrated estimate M value in case 2 for i in Mt_o_table: print(i) #Printing Comparision of the Integrated Estimate SN Ratio for both Cases global sn_case_table sn_case_table = [["Case", "Used Items/Parameters", "Integrated Estimate SN Ratio (dB)"]] sn_case_table.append([1, case1_index, snr_calc(case1_tally)]) sn_case_table.append([2, useful_parameters, snr_calc(case2_tally)]) for i in sn_case_table: print(i) #input_data.withdraw() graph_data = [] for i in range(len(sn_level_table[1:])): sn_data = {"Level": ["L 1", "L 2"], "SN Ratio": [sn_level_table[i+1][1],sn_level_table[i+1][2]]} #"Material": [level_data[i+1][0]], graph_data.append(sn_data) plot_data = Toplevel(main_window) plot_data.title("Plotting Data") fig, axs = plt.subplots(1, len(sn_level_table[1:]), sharey=True, figsize=(15,5)) fig.suptitle('Parameter Wise SN Ratio Change') graph = FigureCanvasTkAgg(fig, plot_data) graph.get_tk_widget().pack() #side=tk.LEFT, fill=tk.BOTH #df1 = df1.groupby('Parameter and Level').sum() #axs = plt.subplots(1, len(level_data)-1, sharey=True) for i in range(len(sn_level_table[1:])): df = pd.DataFrame(graph_data[i], columns=["Level","SN Ratio"]) df = df.groupby("Level").sum() c = "" if sn_change_table[i+1][1]>2 and og_hn[i]>0: c = 'g' elif sn_change_table[i+1][1]>0 and og_hn[i]>0: c = 'y' else: c = 'r' df.plot(ax=axs[i], color=c, marker='o', fontsize=10) #axs[i].xaxis.set_major_locator(MultipleLocator(2)) axs[i].set_title("Parameter "+str(i+1)) def edit_data(): input_data.deiconify() plot_data.destroy() if d==0: edit_data_btn = Button(plot_data,text='Edit Data',command=edit_data) edit_data_btn.pack() ### Predicting Window def predict_input(): prediction_input = Toplevel(main_window) prediction_input.title("Enter Data to Predict Feature") parameter_vars = [] for i in range(int(n)): state = IntVar() select_parameter = Checkbutton(prediction_input, text = "Parameter "+str(i+1), variable=state) select_parameter.grid(row=0, column=i+1) parameter_vars.append(state) if i+1 in useful_parameters: select_parameter.select() feature_name = Label(prediction_input, text="Feature") feature_name.grid(row=0, column=int(n)+1) feature_prediction = Text(prediction_input, height=1,width=15) feature_prediction.grid(row=1, column=int(n)+1) def
(): global useful_parameters predict_parameters = [] for i in range(len(useful_parameters)): predict_parameters.append(useful_parameters[i]) for i in range(int(n)): if parameter_vars[i].get()==1 and i+1 not in predict_parameters: predict_parameters.append(i+1) global predict_data #for i in range(len(predict_data)): predict_data = [23.77,22.25,15.40,25.67,7.00,2.21,3.70] #predict_data = [17.44,21.71,24.58,23.84,7.00,1.74,3.70] global us for i in range(len(predict_data)): predict_data[i] = predict_data[i] - us[i] M_p = 0 hsum_p = 0 for i in predict_parameters: hsum_p += og_hn[i-1] for i in predict_parameters: M_p += (og_hn[i-1]*predict_data[i-1])/og_bn[i-1] M_p = M_p/hsum_p M_predicted = us[-1]+M_p feature_prediction.delete(END) feature_prediction.insert(END, M_predicted) predict_button = Button(prediction_input, text="Predict", command=predict) predict_button.grid(row=1, column=0) #predict_more = Button(prediction_input, text="Clear Values", command=None) #predict_more.grid(row=1, column=0) global predict_data predict_data = [] for i in range(int(n)): predict_entry = Entry(prediction_input) predict_entry.grid(row=1, column=i+1) predict_data.append(predict_entry) def closeall(): prediction_input.quit() #plot_data.destroy() main_window.quit() prediction_input.protocol("WM WM_DELETE_WINDOW", closeall) prediction_input.mainloop() #plot_data.withdraw() predict_btn = Button(plot_data,text='Predict',command=predict_input) predict_btn.pack() def quit(): #sys.exit() plot_data.destroy() main_window.quit() #print(1) plot_data.protocol("WM_DELETE_WINDOW", quit) plot_data.mainloop() # input_data.withdraw() # print(1) def enter_data_details_window(): main_window.withdraw() data_details = Toplevel(main_window) data_details.title("Enter Data Details") parameter_label = Label(data_details, text="Enter Number of Parameters:") parameter_label.grid(row=0, column=0) parameter_entry = Entry(data_details) parameter_entry.grid(row=0, column=1) testcase_label = Label(data_details, text="Enter Number of Test Cases:") testcase_label.grid(row=1, column=0) testcase_entry = Entry(data_details) testcase_entry.grid(row=1, column=1) def input_data_window(): data_details.withdraw() global n n = parameter_entry.get() global t t = testcase_entry.get() #print(n,t) input_data = Toplevel(data_details) input_data.title("Input Data") #indentation run func def edit_details(): MsgBox = tkinter.messagebox.askquestion('Go Back','You will lose the input data, do you want to continue?',icon = 'warning') if MsgBox == 'yes': data_details.deiconify() input_data.destroy() edit_data_details = Button(input_data, text="Edit Data Details", command=edit_details) edit_data_details.grid(row=0, column=0) for i in range(int(n)): header = Label(input_data, text="Parameter "+str(i+1)) header.grid(row=0, column=i+1) feature_label = Label(input_data, text="Feature") feature_label.grid(row=0, column=int(n)+1) for j in range(int(t)): index = Label(input_data, text="Test Case "+str(j+1)) index.grid(row=j+1, column=0) # global dataset dataset = [] for j in range(int(t)): test_case_data=[] for i in range(int(n)+1): data_entry = Entry(input_data) data_entry.grid(row=j+1, column=i+1) test_case_data.append(data_entry) dataset.append(test_case_data) make_calculations = Button(input_data, text="Run", command=run(data=None, d=0)) make_calculations.grid(row=int(t)+2,column=int(n)+3) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() input_data.protocol("WM_DELETE_WINDOW", quitconfirm) input_data.mainloop() input_data_button = Button(data_details, text="Input Data", command=input_data_window) input_data_button.grid(row=2, column=1) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() data_details.protocol("WM_DELETE_WINDOW", quitconfirm) data_details.mainloop() Data_Input = Button(main_window, text="Enter Data Set", command=enter_data_details_window) Data_Input.grid(row=0, column=0, padx=90, pady=20) # This function will be used to open # file in read mode and only Python files # will be opened def load_data(): data_file_src = askopenfile(mode ='r', filetypes =[('CSV Files', '*.csv')]) #print(data_file) #global dataset data_file = csv.reader(data_file_src) data_load = [] for row in data_file: data_load.append(row) for j in dataload[1:]: for i in j: i = float(i) run(dataload, 1) main_window.withdraw() ### To Show Loaded Data #print(dataset) #if data_file is not None: #content = open(data_file, "r") #data = content.readlines() #content = pd.read_csv(data_file) #content.to_numpy() #print(content) #print(data) Data_Load = Button(main_window, text ='Load Data and Run', command = lambda:load_data()) Data_Load.grid(row=1, column=0, padx=90, pady=20) #main_window.filename = filedialog.askopenfilename(initialdir="/", title="Select A File", filetypes=(("jpg files", "*.jpg"),("all files", "*.*"))) def quitconfirm(): if tkinter.messagebox.askokcancel("Quit", "Do you really wish to quit?"): main_window.destroy() main_window.protocol("WM_DELETE_WINDOW", quitconfirm) main_window.mainloop() #print(n,t)
predict
identifier_name
mail.py
#!/usr/bin/env python2.7 from __future__ import absolute_import from __future__ import print_function from html.parser import HTMLParser import io from datetime import datetime import email import email.header import json import os import re import tempfile import time import zipfile import logging import requests #from frplib.log import LOG from util import fmt from util import strip_newlines from util import to_list from util import unquote from util import Error import s3 def extract_forwarded_email(message): try: forwarded = [p for p in message.get_payload() if p.get_content_type() == "message/rfc822"] except Exception as _: raise Error("forwarded attachment expected but missing") if not forwarded: raise Error("forwarded attachment expected but missing") if len(forwarded) > 1: # NOTE: all emails should be placed with the 'Forward as # Attachment' option in outlook. so there should only be a single # attachment containing the forwarded email raise Error("expected 1 forwarded attachment. got", len(forwarded)) return forwarded[0].get_payload()[0] def decode_email_header(header, charset="utf8"): """ returns an email header as a unquoted unicode string """ dec = email.header.decode_header(header)[0] hdr = dec[0] if dec[1] is not None: hdr = hdr.dec[1] return unquote(hdr) def decode_email_address(address, charset="utf8"): """ returns an email address as a unquoted unicode string """ name = decode_email_header(address[0]) addr = address[1] addr = "<" + addr + ">" if not name: return addr return name + " " + addr def clean_link(text): return re.sub(r'\s+', ' ', strip_newlines(text)).strip() class LinkParser(HTMLParser): curr = None links = [] def handle_starttag(self, tag, attrs): if tag == "a": for a in attrs: if a[0] == "href": self.curr = a[1] break def handle_endtag(self, tag): self.curr = None def handle_data(self, data): if self.curr is not None: self.links.append([clean_link(data), self.curr]) class Email(object): def __init__(self, key, msg, original_date=None): self.key = key self.text = "" self.html = "" self.attachments = [] self.links = [] self.date = original_date self.subject = None self.fr = None self.to = None self.cc = None if msg: self.date = email.utils.parsedate(msg.get("Date")) or original_date charset = msg.get_content_charset() or "utf8" self.subject = strip_newlines(decode_email_header(msg.get("Subject"), charset)) fr_addr = email.utils.getaddresses(msg.get_all("From", [])) to_addr = email.utils.getaddresses(msg.get_all("To", [])) cc_addr = email.utils.getaddresses(msg.get_all("Cc", [])) self.fr = "; ".join([decode_email_address(f, charset) for f in fr_addr]) self.to = "; ".join([decode_email_address(t, charset) for t in to_addr]) self.cc = "; ".join([decode_email_address(t, charset) for t in cc_addr]) self.errors = [] self.destinations = [] def __str__(self): return self.info() def info(self, include_links=True): atch_names = [get_attachment_name(a) for a in self.attachments] lines = [ fmt(self.key), fmt("From :", self.fr), fmt("To :", self.to), fmt("CC :", self.cc), fmt("Subject :", self.subject), fmt("Date :", self.date), fmt("Attachments :", atch_names), ] if include_links: lines.append(fmt("Links :", self.links)) if self.errors: for error in self.errors: lines.append(fmt("ERROR :", error)) else: lines.append(fmt("OKAY :", dest=self.destinations[0])) return "\n" + "\n".join(lines) def validate(self, destinations): self.destinations = destinations # edge-case if len(destinations) > 1: self.errors.append(fmt("multiple destinations found:", destinations)) if not destinations: self.errors.append("no destinations found") if not self.date: self.errors.append("unable to extract received date") if not self.attachments: self.errors.append("no attachments found") def parse_email(key, file_obj, forwarded=False): msg = email.message_from_file(open( file_obj.name, "r")) original_date = email.utils.parsedate(msg.get("Date")) if forwarded: try: tmp = extract_forwarded_email(msg) msg = tmp except Error as error: logging.info("not a forwarded email?",error) eml = Email(key, msg, original_date) try: body = [None, None] for part in msg.get_payload(): if part.get_filename(): eml.attachments.append(part) else: for subpart in part.walk(): subcharset = subpart.get_content_charset() or "utf8" if subpart.get_content_type() == "text/plain": text = subpart.get_payload(decode=True) eml.text = text.decode(subcharset) elif subpart.get_content_type() == "text/html": html = subpart.get_payload(decode=True) eml.html = html.decode(subcharset) if eml.html: parser = LinkParser() parser.feed(eml.html) eml.links = parser.links if eml.date: timestamp = time.mktime(eml.date) eml.date = datetime.fromtimestamp(timestamp) except Exception as excp: eml.errors.append(fmt(excp)) return eml class DisneyParser(HTMLParser): curr = None filename = None def handle_starttag(self, tag, attrs): if tag == "div": for a in attrs: if a[0] == "class" and a[1] == "fileNameDisplay": self.curr = a[1] break def handle_endtag(self, tag): self.curr = None def handle_data(self, data): if self.curr is not None: self.filename = data def extract_disney(eml, dest_dir): link = next((l[1] for l in eml.links if l[0] == "Click Here to Download Your File"), None) if not link: raise Error("disney email: could not find download link") page = requests.get(link).content parser = LinkParser() parser.feed(page) link = next((l[1] for l in parser.links if l[0] == "download"), None) if not link: raise Error("disney email: could not find download link on webpage") parser = DisneyParser() parser.feed(page) if not parser.filename: raise Error("disney email: could not find filename") link = "https://www.relayit.net/" + link headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"} response = requests.get(link, headers=headers, stream=True) dest_file = os.path.join(dest_dir, parser.filename) with open(parser.filename, "wb") as f: for chunk in response.iter_content(chunk_size=4096): if chunk: f.write(chunk) return dest_file class
(object): """ EmailProcessor extract attachments from emails in an s3 bucket """ def __init__(self, rules_file, s3_base_url=None, s3_atch_dir=None, aws_access_key_id=None, aws_secret_access_key=None): with open(rules_file, "rb") as file_handle: self.rules = json.loads(file_handle.read().decode("utf-8")) # '__rootdir__' is the root directory in s3 we expect to find the email # files. save it to the object, then delete it from the rules (since # it's not actually a rule) if "__rootdir__" in self.rules: del self.rules["__rootdir__"] for dest in self.rules: rule_sets = to_list(self.rules[dest]) for rule_set in rule_sets: for key in rule_set: rule_set[key] = to_list(rule_set[key]) self.rules[dest] = rule_sets self.client = None if not s3_base_url: return # split the 's3_base_url' into the bucket / directory format that the # boto library expects. s3_base_url = s3_base_url.strip("/ ") if s3_base_url.startswith("s3://"): s3_base_url = s3_base_url[5:] parts = s3_base_url.split("/") self.s3_bucket = parts[0] self.s3_base_dir = "/".join(parts[1:]) self.attachment_dest_dir = self.s3_base_dir + "/" + s3_atch_dir #logging.info("email processor", self.s3_bucket, self.s3_base_dir, self.attachment_dest_dir) self.client = s3.Client(key=aws_access_key_id, secret=aws_secret_access_key) def check_mail(self, update=False): """ checks a given s3 location for new emails """ return self.check_mail_dir(update=update) def check_mail_dir(self, directory="", update=False): """ checks a given s3 location for new emails """ directory = (self.s3_base_dir + "/" + directory.strip("/")).strip("/") objects = self.client.list_objects(self.s3_bucket, directory) results = [] for obj in objects: tmp = tempfile.NamedTemporaryFile(mode='w',delete=False) tmp.close() with open(tmp.name, "wb") as file_handle: self.client.get_object(self.s3_bucket, obj["Key"], file_handle) eml = self.check_object(tmp, obj["Key"], update) if eml: #logging.info(eml) #logging.info(50*"-") results.append(eml) tmp.close() #os.remove(tmp.name) return results def check_object(self, file_obj, key, update=False): """ download a file from s3 as a in-memory object and attempts to parse it as an email / extract possible attachments """ #logging.info(" ==> checking email:", key) try: eml = parse_email(key, file_obj, forwarded=True) except Exception as excp: eml = Email(key) eml.errors.append(fmt(excp)) return eml try: # edge cases destinations = email_destinations(eml, self.rules) is_local = False if not eml.errors and update and self.client: destination_dir = os.path.join(self.attachment_dest_dir, destinations[0]) self.place_attachments(eml, destination_dir, is_local) self.move_email(key, "Processed", eml.date) except Error as error: eml.errors.append(fmt(error)) except Exception as excp: eml.errors.append(fmt(excp)) return eml def place_attachments(self, eml, destination_dir, attachment_is_local=False): for attachment in eml.attachments: timestamp = eml.date.strftime("%Y%m%d_%H%M%S_") if attachment_is_local: filename = os.path.basename(attachment) data = open(attachment, "rb") else: filename = get_attachment_name(attachment) data = io.BytesIO(attachment.get_payload(decode=True)) ext = os.path.splitext(filename)[1].lower() # check for zip file. we extract zip file members as part of the # email processing so the audit file configuration can target # specific / multiple files. if ext == ".zip": #logging.info("opening zip archive", filename) zipf = zipfile.ZipFile(data, "r") for member in zipf.namelist(): dest_key = os.path.join(destination_dir, timestamp + os.path.basename(member)) dest_key = dest_key.replace('\\','/') #logging.info("extracting zip file", member, "to", dest_key) data = zipf.open(member, "r").read() self.client.put_object(data, self.s3_bucket, dest_key) else: dest_key = os.path.join(destination_dir, timestamp + filename) dest_key = dest_key.replace('\\','/') #print(self.s3_bucket, dest_key) #logging.info("extracting attachment", filename, "to", dest_key) self.client.put_object(data, self.s3_bucket, dest_key) def move_email(self, s3key, directory, date): """ move the original s3 file to a final location to prevent re-processing it """ dest_key = os.path.join(self.s3_base_dir, directory) if date is not None: dest_key = os.path.join(dest_key, date.strftime("%Y%m%d"), os.path.basename(s3key)) else: dest_key = os.path.join(dest_key, os.path.basename(s3key)) if dest_key == s3key: return dest_key = dest_key.replace('\\','/') #logging.info("moving file", repr(s3key), "->", repr(dest_key)) self.client.copy_object(self.s3_bucket, s3key, self.s3_bucket, dest_key) self.client.delete_object(self.s3_bucket, s3key) def get_attachment_name(attachment): name = attachment if not isinstance(name, str): name = attachment.get_filename() return decode_email_header(name) def email_destinations(message, rules): """ returns the email rules that match this message. """ matching = set() for dest in rules: if email_matches_rule(message, rules[dest]): matching.add(dest) return list(matching) def email_matches_rule(eml, rule_sets): def val(eml, key): if key == "From": return eml.fr.lower() elif key == "Subject": return eml.subject.lower() elif key == "Body": return eml.text.lower() + " " + eml.html.lower() return "" for rule_set in rule_sets: okay = True for key in rule_set: options = rule_set[key] if not any([o.lower() in val(eml, key) for o in options]): # the key (and rule_set) are not a match okay = False break if okay: return True return False # vim: set sw=4 sts=4 ts=4 et:
EmailProcessor
identifier_name
mail.py
#!/usr/bin/env python2.7 from __future__ import absolute_import from __future__ import print_function from html.parser import HTMLParser import io from datetime import datetime import email import email.header import json import os import re import tempfile import time import zipfile import logging import requests #from frplib.log import LOG from util import fmt from util import strip_newlines from util import to_list from util import unquote from util import Error import s3 def extract_forwarded_email(message): try: forwarded = [p for p in message.get_payload() if p.get_content_type() == "message/rfc822"] except Exception as _: raise Error("forwarded attachment expected but missing") if not forwarded: raise Error("forwarded attachment expected but missing") if len(forwarded) > 1: # NOTE: all emails should be placed with the 'Forward as # Attachment' option in outlook. so there should only be a single # attachment containing the forwarded email raise Error("expected 1 forwarded attachment. got", len(forwarded)) return forwarded[0].get_payload()[0] def decode_email_header(header, charset="utf8"): """ returns an email header as a unquoted unicode string """ dec = email.header.decode_header(header)[0] hdr = dec[0] if dec[1] is not None: hdr = hdr.dec[1] return unquote(hdr) def decode_email_address(address, charset="utf8"): """ returns an email address as a unquoted unicode string """ name = decode_email_header(address[0]) addr = address[1] addr = "<" + addr + ">" if not name: return addr return name + " " + addr def clean_link(text): return re.sub(r'\s+', ' ', strip_newlines(text)).strip() class LinkParser(HTMLParser): curr = None links = [] def handle_starttag(self, tag, attrs): if tag == "a": for a in attrs: if a[0] == "href": self.curr = a[1] break def handle_endtag(self, tag):
def handle_data(self, data): if self.curr is not None: self.links.append([clean_link(data), self.curr]) class Email(object): def __init__(self, key, msg, original_date=None): self.key = key self.text = "" self.html = "" self.attachments = [] self.links = [] self.date = original_date self.subject = None self.fr = None self.to = None self.cc = None if msg: self.date = email.utils.parsedate(msg.get("Date")) or original_date charset = msg.get_content_charset() or "utf8" self.subject = strip_newlines(decode_email_header(msg.get("Subject"), charset)) fr_addr = email.utils.getaddresses(msg.get_all("From", [])) to_addr = email.utils.getaddresses(msg.get_all("To", [])) cc_addr = email.utils.getaddresses(msg.get_all("Cc", [])) self.fr = "; ".join([decode_email_address(f, charset) for f in fr_addr]) self.to = "; ".join([decode_email_address(t, charset) for t in to_addr]) self.cc = "; ".join([decode_email_address(t, charset) for t in cc_addr]) self.errors = [] self.destinations = [] def __str__(self): return self.info() def info(self, include_links=True): atch_names = [get_attachment_name(a) for a in self.attachments] lines = [ fmt(self.key), fmt("From :", self.fr), fmt("To :", self.to), fmt("CC :", self.cc), fmt("Subject :", self.subject), fmt("Date :", self.date), fmt("Attachments :", atch_names), ] if include_links: lines.append(fmt("Links :", self.links)) if self.errors: for error in self.errors: lines.append(fmt("ERROR :", error)) else: lines.append(fmt("OKAY :", dest=self.destinations[0])) return "\n" + "\n".join(lines) def validate(self, destinations): self.destinations = destinations # edge-case if len(destinations) > 1: self.errors.append(fmt("multiple destinations found:", destinations)) if not destinations: self.errors.append("no destinations found") if not self.date: self.errors.append("unable to extract received date") if not self.attachments: self.errors.append("no attachments found") def parse_email(key, file_obj, forwarded=False): msg = email.message_from_file(open( file_obj.name, "r")) original_date = email.utils.parsedate(msg.get("Date")) if forwarded: try: tmp = extract_forwarded_email(msg) msg = tmp except Error as error: logging.info("not a forwarded email?",error) eml = Email(key, msg, original_date) try: body = [None, None] for part in msg.get_payload(): if part.get_filename(): eml.attachments.append(part) else: for subpart in part.walk(): subcharset = subpart.get_content_charset() or "utf8" if subpart.get_content_type() == "text/plain": text = subpart.get_payload(decode=True) eml.text = text.decode(subcharset) elif subpart.get_content_type() == "text/html": html = subpart.get_payload(decode=True) eml.html = html.decode(subcharset) if eml.html: parser = LinkParser() parser.feed(eml.html) eml.links = parser.links if eml.date: timestamp = time.mktime(eml.date) eml.date = datetime.fromtimestamp(timestamp) except Exception as excp: eml.errors.append(fmt(excp)) return eml class DisneyParser(HTMLParser): curr = None filename = None def handle_starttag(self, tag, attrs): if tag == "div": for a in attrs: if a[0] == "class" and a[1] == "fileNameDisplay": self.curr = a[1] break def handle_endtag(self, tag): self.curr = None def handle_data(self, data): if self.curr is not None: self.filename = data def extract_disney(eml, dest_dir): link = next((l[1] for l in eml.links if l[0] == "Click Here to Download Your File"), None) if not link: raise Error("disney email: could not find download link") page = requests.get(link).content parser = LinkParser() parser.feed(page) link = next((l[1] for l in parser.links if l[0] == "download"), None) if not link: raise Error("disney email: could not find download link on webpage") parser = DisneyParser() parser.feed(page) if not parser.filename: raise Error("disney email: could not find filename") link = "https://www.relayit.net/" + link headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"} response = requests.get(link, headers=headers, stream=True) dest_file = os.path.join(dest_dir, parser.filename) with open(parser.filename, "wb") as f: for chunk in response.iter_content(chunk_size=4096): if chunk: f.write(chunk) return dest_file class EmailProcessor(object): """ EmailProcessor extract attachments from emails in an s3 bucket """ def __init__(self, rules_file, s3_base_url=None, s3_atch_dir=None, aws_access_key_id=None, aws_secret_access_key=None): with open(rules_file, "rb") as file_handle: self.rules = json.loads(file_handle.read().decode("utf-8")) # '__rootdir__' is the root directory in s3 we expect to find the email # files. save it to the object, then delete it from the rules (since # it's not actually a rule) if "__rootdir__" in self.rules: del self.rules["__rootdir__"] for dest in self.rules: rule_sets = to_list(self.rules[dest]) for rule_set in rule_sets: for key in rule_set: rule_set[key] = to_list(rule_set[key]) self.rules[dest] = rule_sets self.client = None if not s3_base_url: return # split the 's3_base_url' into the bucket / directory format that the # boto library expects. s3_base_url = s3_base_url.strip("/ ") if s3_base_url.startswith("s3://"): s3_base_url = s3_base_url[5:] parts = s3_base_url.split("/") self.s3_bucket = parts[0] self.s3_base_dir = "/".join(parts[1:]) self.attachment_dest_dir = self.s3_base_dir + "/" + s3_atch_dir #logging.info("email processor", self.s3_bucket, self.s3_base_dir, self.attachment_dest_dir) self.client = s3.Client(key=aws_access_key_id, secret=aws_secret_access_key) def check_mail(self, update=False): """ checks a given s3 location for new emails """ return self.check_mail_dir(update=update) def check_mail_dir(self, directory="", update=False): """ checks a given s3 location for new emails """ directory = (self.s3_base_dir + "/" + directory.strip("/")).strip("/") objects = self.client.list_objects(self.s3_bucket, directory) results = [] for obj in objects: tmp = tempfile.NamedTemporaryFile(mode='w',delete=False) tmp.close() with open(tmp.name, "wb") as file_handle: self.client.get_object(self.s3_bucket, obj["Key"], file_handle) eml = self.check_object(tmp, obj["Key"], update) if eml: #logging.info(eml) #logging.info(50*"-") results.append(eml) tmp.close() #os.remove(tmp.name) return results def check_object(self, file_obj, key, update=False): """ download a file from s3 as a in-memory object and attempts to parse it as an email / extract possible attachments """ #logging.info(" ==> checking email:", key) try: eml = parse_email(key, file_obj, forwarded=True) except Exception as excp: eml = Email(key) eml.errors.append(fmt(excp)) return eml try: # edge cases destinations = email_destinations(eml, self.rules) is_local = False if not eml.errors and update and self.client: destination_dir = os.path.join(self.attachment_dest_dir, destinations[0]) self.place_attachments(eml, destination_dir, is_local) self.move_email(key, "Processed", eml.date) except Error as error: eml.errors.append(fmt(error)) except Exception as excp: eml.errors.append(fmt(excp)) return eml def place_attachments(self, eml, destination_dir, attachment_is_local=False): for attachment in eml.attachments: timestamp = eml.date.strftime("%Y%m%d_%H%M%S_") if attachment_is_local: filename = os.path.basename(attachment) data = open(attachment, "rb") else: filename = get_attachment_name(attachment) data = io.BytesIO(attachment.get_payload(decode=True)) ext = os.path.splitext(filename)[1].lower() # check for zip file. we extract zip file members as part of the # email processing so the audit file configuration can target # specific / multiple files. if ext == ".zip": #logging.info("opening zip archive", filename) zipf = zipfile.ZipFile(data, "r") for member in zipf.namelist(): dest_key = os.path.join(destination_dir, timestamp + os.path.basename(member)) dest_key = dest_key.replace('\\','/') #logging.info("extracting zip file", member, "to", dest_key) data = zipf.open(member, "r").read() self.client.put_object(data, self.s3_bucket, dest_key) else: dest_key = os.path.join(destination_dir, timestamp + filename) dest_key = dest_key.replace('\\','/') #print(self.s3_bucket, dest_key) #logging.info("extracting attachment", filename, "to", dest_key) self.client.put_object(data, self.s3_bucket, dest_key) def move_email(self, s3key, directory, date): """ move the original s3 file to a final location to prevent re-processing it """ dest_key = os.path.join(self.s3_base_dir, directory) if date is not None: dest_key = os.path.join(dest_key, date.strftime("%Y%m%d"), os.path.basename(s3key)) else: dest_key = os.path.join(dest_key, os.path.basename(s3key)) if dest_key == s3key: return dest_key = dest_key.replace('\\','/') #logging.info("moving file", repr(s3key), "->", repr(dest_key)) self.client.copy_object(self.s3_bucket, s3key, self.s3_bucket, dest_key) self.client.delete_object(self.s3_bucket, s3key) def get_attachment_name(attachment): name = attachment if not isinstance(name, str): name = attachment.get_filename() return decode_email_header(name) def email_destinations(message, rules): """ returns the email rules that match this message. """ matching = set() for dest in rules: if email_matches_rule(message, rules[dest]): matching.add(dest) return list(matching) def email_matches_rule(eml, rule_sets): def val(eml, key): if key == "From": return eml.fr.lower() elif key == "Subject": return eml.subject.lower() elif key == "Body": return eml.text.lower() + " " + eml.html.lower() return "" for rule_set in rule_sets: okay = True for key in rule_set: options = rule_set[key] if not any([o.lower() in val(eml, key) for o in options]): # the key (and rule_set) are not a match okay = False break if okay: return True return False # vim: set sw=4 sts=4 ts=4 et:
self.curr = None
identifier_body
mail.py
#!/usr/bin/env python2.7 from __future__ import absolute_import from __future__ import print_function from html.parser import HTMLParser import io from datetime import datetime import email import email.header import json import os import re import tempfile import time import zipfile import logging import requests #from frplib.log import LOG from util import fmt from util import strip_newlines from util import to_list from util import unquote from util import Error import s3 def extract_forwarded_email(message): try: forwarded = [p for p in message.get_payload() if p.get_content_type() == "message/rfc822"] except Exception as _: raise Error("forwarded attachment expected but missing") if not forwarded: raise Error("forwarded attachment expected but missing") if len(forwarded) > 1: # NOTE: all emails should be placed with the 'Forward as # Attachment' option in outlook. so there should only be a single # attachment containing the forwarded email raise Error("expected 1 forwarded attachment. got", len(forwarded)) return forwarded[0].get_payload()[0] def decode_email_header(header, charset="utf8"): """ returns an email header as a unquoted unicode string """ dec = email.header.decode_header(header)[0] hdr = dec[0] if dec[1] is not None: hdr = hdr.dec[1] return unquote(hdr) def decode_email_address(address, charset="utf8"): """ returns an email address as a unquoted unicode string """ name = decode_email_header(address[0]) addr = address[1] addr = "<" + addr + ">" if not name: return addr return name + " " + addr def clean_link(text): return re.sub(r'\s+', ' ', strip_newlines(text)).strip() class LinkParser(HTMLParser): curr = None links = [] def handle_starttag(self, tag, attrs): if tag == "a": for a in attrs: if a[0] == "href": self.curr = a[1] break def handle_endtag(self, tag): self.curr = None def handle_data(self, data): if self.curr is not None: self.links.append([clean_link(data), self.curr]) class Email(object): def __init__(self, key, msg, original_date=None): self.key = key self.text = "" self.html = "" self.attachments = [] self.links = [] self.date = original_date self.subject = None self.fr = None self.to = None self.cc = None if msg: self.date = email.utils.parsedate(msg.get("Date")) or original_date charset = msg.get_content_charset() or "utf8" self.subject = strip_newlines(decode_email_header(msg.get("Subject"), charset)) fr_addr = email.utils.getaddresses(msg.get_all("From", [])) to_addr = email.utils.getaddresses(msg.get_all("To", [])) cc_addr = email.utils.getaddresses(msg.get_all("Cc", [])) self.fr = "; ".join([decode_email_address(f, charset) for f in fr_addr]) self.to = "; ".join([decode_email_address(t, charset) for t in to_addr]) self.cc = "; ".join([decode_email_address(t, charset) for t in cc_addr]) self.errors = [] self.destinations = [] def __str__(self): return self.info() def info(self, include_links=True): atch_names = [get_attachment_name(a) for a in self.attachments] lines = [ fmt(self.key), fmt("From :", self.fr), fmt("To :", self.to), fmt("CC :", self.cc), fmt("Subject :", self.subject),
fmt("Attachments :", atch_names), ] if include_links: lines.append(fmt("Links :", self.links)) if self.errors: for error in self.errors: lines.append(fmt("ERROR :", error)) else: lines.append(fmt("OKAY :", dest=self.destinations[0])) return "\n" + "\n".join(lines) def validate(self, destinations): self.destinations = destinations # edge-case if len(destinations) > 1: self.errors.append(fmt("multiple destinations found:", destinations)) if not destinations: self.errors.append("no destinations found") if not self.date: self.errors.append("unable to extract received date") if not self.attachments: self.errors.append("no attachments found") def parse_email(key, file_obj, forwarded=False): msg = email.message_from_file(open( file_obj.name, "r")) original_date = email.utils.parsedate(msg.get("Date")) if forwarded: try: tmp = extract_forwarded_email(msg) msg = tmp except Error as error: logging.info("not a forwarded email?",error) eml = Email(key, msg, original_date) try: body = [None, None] for part in msg.get_payload(): if part.get_filename(): eml.attachments.append(part) else: for subpart in part.walk(): subcharset = subpart.get_content_charset() or "utf8" if subpart.get_content_type() == "text/plain": text = subpart.get_payload(decode=True) eml.text = text.decode(subcharset) elif subpart.get_content_type() == "text/html": html = subpart.get_payload(decode=True) eml.html = html.decode(subcharset) if eml.html: parser = LinkParser() parser.feed(eml.html) eml.links = parser.links if eml.date: timestamp = time.mktime(eml.date) eml.date = datetime.fromtimestamp(timestamp) except Exception as excp: eml.errors.append(fmt(excp)) return eml class DisneyParser(HTMLParser): curr = None filename = None def handle_starttag(self, tag, attrs): if tag == "div": for a in attrs: if a[0] == "class" and a[1] == "fileNameDisplay": self.curr = a[1] break def handle_endtag(self, tag): self.curr = None def handle_data(self, data): if self.curr is not None: self.filename = data def extract_disney(eml, dest_dir): link = next((l[1] for l in eml.links if l[0] == "Click Here to Download Your File"), None) if not link: raise Error("disney email: could not find download link") page = requests.get(link).content parser = LinkParser() parser.feed(page) link = next((l[1] for l in parser.links if l[0] == "download"), None) if not link: raise Error("disney email: could not find download link on webpage") parser = DisneyParser() parser.feed(page) if not parser.filename: raise Error("disney email: could not find filename") link = "https://www.relayit.net/" + link headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"} response = requests.get(link, headers=headers, stream=True) dest_file = os.path.join(dest_dir, parser.filename) with open(parser.filename, "wb") as f: for chunk in response.iter_content(chunk_size=4096): if chunk: f.write(chunk) return dest_file class EmailProcessor(object): """ EmailProcessor extract attachments from emails in an s3 bucket """ def __init__(self, rules_file, s3_base_url=None, s3_atch_dir=None, aws_access_key_id=None, aws_secret_access_key=None): with open(rules_file, "rb") as file_handle: self.rules = json.loads(file_handle.read().decode("utf-8")) # '__rootdir__' is the root directory in s3 we expect to find the email # files. save it to the object, then delete it from the rules (since # it's not actually a rule) if "__rootdir__" in self.rules: del self.rules["__rootdir__"] for dest in self.rules: rule_sets = to_list(self.rules[dest]) for rule_set in rule_sets: for key in rule_set: rule_set[key] = to_list(rule_set[key]) self.rules[dest] = rule_sets self.client = None if not s3_base_url: return # split the 's3_base_url' into the bucket / directory format that the # boto library expects. s3_base_url = s3_base_url.strip("/ ") if s3_base_url.startswith("s3://"): s3_base_url = s3_base_url[5:] parts = s3_base_url.split("/") self.s3_bucket = parts[0] self.s3_base_dir = "/".join(parts[1:]) self.attachment_dest_dir = self.s3_base_dir + "/" + s3_atch_dir #logging.info("email processor", self.s3_bucket, self.s3_base_dir, self.attachment_dest_dir) self.client = s3.Client(key=aws_access_key_id, secret=aws_secret_access_key) def check_mail(self, update=False): """ checks a given s3 location for new emails """ return self.check_mail_dir(update=update) def check_mail_dir(self, directory="", update=False): """ checks a given s3 location for new emails """ directory = (self.s3_base_dir + "/" + directory.strip("/")).strip("/") objects = self.client.list_objects(self.s3_bucket, directory) results = [] for obj in objects: tmp = tempfile.NamedTemporaryFile(mode='w',delete=False) tmp.close() with open(tmp.name, "wb") as file_handle: self.client.get_object(self.s3_bucket, obj["Key"], file_handle) eml = self.check_object(tmp, obj["Key"], update) if eml: #logging.info(eml) #logging.info(50*"-") results.append(eml) tmp.close() #os.remove(tmp.name) return results def check_object(self, file_obj, key, update=False): """ download a file from s3 as a in-memory object and attempts to parse it as an email / extract possible attachments """ #logging.info(" ==> checking email:", key) try: eml = parse_email(key, file_obj, forwarded=True) except Exception as excp: eml = Email(key) eml.errors.append(fmt(excp)) return eml try: # edge cases destinations = email_destinations(eml, self.rules) is_local = False if not eml.errors and update and self.client: destination_dir = os.path.join(self.attachment_dest_dir, destinations[0]) self.place_attachments(eml, destination_dir, is_local) self.move_email(key, "Processed", eml.date) except Error as error: eml.errors.append(fmt(error)) except Exception as excp: eml.errors.append(fmt(excp)) return eml def place_attachments(self, eml, destination_dir, attachment_is_local=False): for attachment in eml.attachments: timestamp = eml.date.strftime("%Y%m%d_%H%M%S_") if attachment_is_local: filename = os.path.basename(attachment) data = open(attachment, "rb") else: filename = get_attachment_name(attachment) data = io.BytesIO(attachment.get_payload(decode=True)) ext = os.path.splitext(filename)[1].lower() # check for zip file. we extract zip file members as part of the # email processing so the audit file configuration can target # specific / multiple files. if ext == ".zip": #logging.info("opening zip archive", filename) zipf = zipfile.ZipFile(data, "r") for member in zipf.namelist(): dest_key = os.path.join(destination_dir, timestamp + os.path.basename(member)) dest_key = dest_key.replace('\\','/') #logging.info("extracting zip file", member, "to", dest_key) data = zipf.open(member, "r").read() self.client.put_object(data, self.s3_bucket, dest_key) else: dest_key = os.path.join(destination_dir, timestamp + filename) dest_key = dest_key.replace('\\','/') #print(self.s3_bucket, dest_key) #logging.info("extracting attachment", filename, "to", dest_key) self.client.put_object(data, self.s3_bucket, dest_key) def move_email(self, s3key, directory, date): """ move the original s3 file to a final location to prevent re-processing it """ dest_key = os.path.join(self.s3_base_dir, directory) if date is not None: dest_key = os.path.join(dest_key, date.strftime("%Y%m%d"), os.path.basename(s3key)) else: dest_key = os.path.join(dest_key, os.path.basename(s3key)) if dest_key == s3key: return dest_key = dest_key.replace('\\','/') #logging.info("moving file", repr(s3key), "->", repr(dest_key)) self.client.copy_object(self.s3_bucket, s3key, self.s3_bucket, dest_key) self.client.delete_object(self.s3_bucket, s3key) def get_attachment_name(attachment): name = attachment if not isinstance(name, str): name = attachment.get_filename() return decode_email_header(name) def email_destinations(message, rules): """ returns the email rules that match this message. """ matching = set() for dest in rules: if email_matches_rule(message, rules[dest]): matching.add(dest) return list(matching) def email_matches_rule(eml, rule_sets): def val(eml, key): if key == "From": return eml.fr.lower() elif key == "Subject": return eml.subject.lower() elif key == "Body": return eml.text.lower() + " " + eml.html.lower() return "" for rule_set in rule_sets: okay = True for key in rule_set: options = rule_set[key] if not any([o.lower() in val(eml, key) for o in options]): # the key (and rule_set) are not a match okay = False break if okay: return True return False # vim: set sw=4 sts=4 ts=4 et:
fmt("Date :", self.date),
random_line_split
mail.py
#!/usr/bin/env python2.7 from __future__ import absolute_import from __future__ import print_function from html.parser import HTMLParser import io from datetime import datetime import email import email.header import json import os import re import tempfile import time import zipfile import logging import requests #from frplib.log import LOG from util import fmt from util import strip_newlines from util import to_list from util import unquote from util import Error import s3 def extract_forwarded_email(message): try: forwarded = [p for p in message.get_payload() if p.get_content_type() == "message/rfc822"] except Exception as _: raise Error("forwarded attachment expected but missing") if not forwarded:
if len(forwarded) > 1: # NOTE: all emails should be placed with the 'Forward as # Attachment' option in outlook. so there should only be a single # attachment containing the forwarded email raise Error("expected 1 forwarded attachment. got", len(forwarded)) return forwarded[0].get_payload()[0] def decode_email_header(header, charset="utf8"): """ returns an email header as a unquoted unicode string """ dec = email.header.decode_header(header)[0] hdr = dec[0] if dec[1] is not None: hdr = hdr.dec[1] return unquote(hdr) def decode_email_address(address, charset="utf8"): """ returns an email address as a unquoted unicode string """ name = decode_email_header(address[0]) addr = address[1] addr = "<" + addr + ">" if not name: return addr return name + " " + addr def clean_link(text): return re.sub(r'\s+', ' ', strip_newlines(text)).strip() class LinkParser(HTMLParser): curr = None links = [] def handle_starttag(self, tag, attrs): if tag == "a": for a in attrs: if a[0] == "href": self.curr = a[1] break def handle_endtag(self, tag): self.curr = None def handle_data(self, data): if self.curr is not None: self.links.append([clean_link(data), self.curr]) class Email(object): def __init__(self, key, msg, original_date=None): self.key = key self.text = "" self.html = "" self.attachments = [] self.links = [] self.date = original_date self.subject = None self.fr = None self.to = None self.cc = None if msg: self.date = email.utils.parsedate(msg.get("Date")) or original_date charset = msg.get_content_charset() or "utf8" self.subject = strip_newlines(decode_email_header(msg.get("Subject"), charset)) fr_addr = email.utils.getaddresses(msg.get_all("From", [])) to_addr = email.utils.getaddresses(msg.get_all("To", [])) cc_addr = email.utils.getaddresses(msg.get_all("Cc", [])) self.fr = "; ".join([decode_email_address(f, charset) for f in fr_addr]) self.to = "; ".join([decode_email_address(t, charset) for t in to_addr]) self.cc = "; ".join([decode_email_address(t, charset) for t in cc_addr]) self.errors = [] self.destinations = [] def __str__(self): return self.info() def info(self, include_links=True): atch_names = [get_attachment_name(a) for a in self.attachments] lines = [ fmt(self.key), fmt("From :", self.fr), fmt("To :", self.to), fmt("CC :", self.cc), fmt("Subject :", self.subject), fmt("Date :", self.date), fmt("Attachments :", atch_names), ] if include_links: lines.append(fmt("Links :", self.links)) if self.errors: for error in self.errors: lines.append(fmt("ERROR :", error)) else: lines.append(fmt("OKAY :", dest=self.destinations[0])) return "\n" + "\n".join(lines) def validate(self, destinations): self.destinations = destinations # edge-case if len(destinations) > 1: self.errors.append(fmt("multiple destinations found:", destinations)) if not destinations: self.errors.append("no destinations found") if not self.date: self.errors.append("unable to extract received date") if not self.attachments: self.errors.append("no attachments found") def parse_email(key, file_obj, forwarded=False): msg = email.message_from_file(open( file_obj.name, "r")) original_date = email.utils.parsedate(msg.get("Date")) if forwarded: try: tmp = extract_forwarded_email(msg) msg = tmp except Error as error: logging.info("not a forwarded email?",error) eml = Email(key, msg, original_date) try: body = [None, None] for part in msg.get_payload(): if part.get_filename(): eml.attachments.append(part) else: for subpart in part.walk(): subcharset = subpart.get_content_charset() or "utf8" if subpart.get_content_type() == "text/plain": text = subpart.get_payload(decode=True) eml.text = text.decode(subcharset) elif subpart.get_content_type() == "text/html": html = subpart.get_payload(decode=True) eml.html = html.decode(subcharset) if eml.html: parser = LinkParser() parser.feed(eml.html) eml.links = parser.links if eml.date: timestamp = time.mktime(eml.date) eml.date = datetime.fromtimestamp(timestamp) except Exception as excp: eml.errors.append(fmt(excp)) return eml class DisneyParser(HTMLParser): curr = None filename = None def handle_starttag(self, tag, attrs): if tag == "div": for a in attrs: if a[0] == "class" and a[1] == "fileNameDisplay": self.curr = a[1] break def handle_endtag(self, tag): self.curr = None def handle_data(self, data): if self.curr is not None: self.filename = data def extract_disney(eml, dest_dir): link = next((l[1] for l in eml.links if l[0] == "Click Here to Download Your File"), None) if not link: raise Error("disney email: could not find download link") page = requests.get(link).content parser = LinkParser() parser.feed(page) link = next((l[1] for l in parser.links if l[0] == "download"), None) if not link: raise Error("disney email: could not find download link on webpage") parser = DisneyParser() parser.feed(page) if not parser.filename: raise Error("disney email: could not find filename") link = "https://www.relayit.net/" + link headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"} response = requests.get(link, headers=headers, stream=True) dest_file = os.path.join(dest_dir, parser.filename) with open(parser.filename, "wb") as f: for chunk in response.iter_content(chunk_size=4096): if chunk: f.write(chunk) return dest_file class EmailProcessor(object): """ EmailProcessor extract attachments from emails in an s3 bucket """ def __init__(self, rules_file, s3_base_url=None, s3_atch_dir=None, aws_access_key_id=None, aws_secret_access_key=None): with open(rules_file, "rb") as file_handle: self.rules = json.loads(file_handle.read().decode("utf-8")) # '__rootdir__' is the root directory in s3 we expect to find the email # files. save it to the object, then delete it from the rules (since # it's not actually a rule) if "__rootdir__" in self.rules: del self.rules["__rootdir__"] for dest in self.rules: rule_sets = to_list(self.rules[dest]) for rule_set in rule_sets: for key in rule_set: rule_set[key] = to_list(rule_set[key]) self.rules[dest] = rule_sets self.client = None if not s3_base_url: return # split the 's3_base_url' into the bucket / directory format that the # boto library expects. s3_base_url = s3_base_url.strip("/ ") if s3_base_url.startswith("s3://"): s3_base_url = s3_base_url[5:] parts = s3_base_url.split("/") self.s3_bucket = parts[0] self.s3_base_dir = "/".join(parts[1:]) self.attachment_dest_dir = self.s3_base_dir + "/" + s3_atch_dir #logging.info("email processor", self.s3_bucket, self.s3_base_dir, self.attachment_dest_dir) self.client = s3.Client(key=aws_access_key_id, secret=aws_secret_access_key) def check_mail(self, update=False): """ checks a given s3 location for new emails """ return self.check_mail_dir(update=update) def check_mail_dir(self, directory="", update=False): """ checks a given s3 location for new emails """ directory = (self.s3_base_dir + "/" + directory.strip("/")).strip("/") objects = self.client.list_objects(self.s3_bucket, directory) results = [] for obj in objects: tmp = tempfile.NamedTemporaryFile(mode='w',delete=False) tmp.close() with open(tmp.name, "wb") as file_handle: self.client.get_object(self.s3_bucket, obj["Key"], file_handle) eml = self.check_object(tmp, obj["Key"], update) if eml: #logging.info(eml) #logging.info(50*"-") results.append(eml) tmp.close() #os.remove(tmp.name) return results def check_object(self, file_obj, key, update=False): """ download a file from s3 as a in-memory object and attempts to parse it as an email / extract possible attachments """ #logging.info(" ==> checking email:", key) try: eml = parse_email(key, file_obj, forwarded=True) except Exception as excp: eml = Email(key) eml.errors.append(fmt(excp)) return eml try: # edge cases destinations = email_destinations(eml, self.rules) is_local = False if not eml.errors and update and self.client: destination_dir = os.path.join(self.attachment_dest_dir, destinations[0]) self.place_attachments(eml, destination_dir, is_local) self.move_email(key, "Processed", eml.date) except Error as error: eml.errors.append(fmt(error)) except Exception as excp: eml.errors.append(fmt(excp)) return eml def place_attachments(self, eml, destination_dir, attachment_is_local=False): for attachment in eml.attachments: timestamp = eml.date.strftime("%Y%m%d_%H%M%S_") if attachment_is_local: filename = os.path.basename(attachment) data = open(attachment, "rb") else: filename = get_attachment_name(attachment) data = io.BytesIO(attachment.get_payload(decode=True)) ext = os.path.splitext(filename)[1].lower() # check for zip file. we extract zip file members as part of the # email processing so the audit file configuration can target # specific / multiple files. if ext == ".zip": #logging.info("opening zip archive", filename) zipf = zipfile.ZipFile(data, "r") for member in zipf.namelist(): dest_key = os.path.join(destination_dir, timestamp + os.path.basename(member)) dest_key = dest_key.replace('\\','/') #logging.info("extracting zip file", member, "to", dest_key) data = zipf.open(member, "r").read() self.client.put_object(data, self.s3_bucket, dest_key) else: dest_key = os.path.join(destination_dir, timestamp + filename) dest_key = dest_key.replace('\\','/') #print(self.s3_bucket, dest_key) #logging.info("extracting attachment", filename, "to", dest_key) self.client.put_object(data, self.s3_bucket, dest_key) def move_email(self, s3key, directory, date): """ move the original s3 file to a final location to prevent re-processing it """ dest_key = os.path.join(self.s3_base_dir, directory) if date is not None: dest_key = os.path.join(dest_key, date.strftime("%Y%m%d"), os.path.basename(s3key)) else: dest_key = os.path.join(dest_key, os.path.basename(s3key)) if dest_key == s3key: return dest_key = dest_key.replace('\\','/') #logging.info("moving file", repr(s3key), "->", repr(dest_key)) self.client.copy_object(self.s3_bucket, s3key, self.s3_bucket, dest_key) self.client.delete_object(self.s3_bucket, s3key) def get_attachment_name(attachment): name = attachment if not isinstance(name, str): name = attachment.get_filename() return decode_email_header(name) def email_destinations(message, rules): """ returns the email rules that match this message. """ matching = set() for dest in rules: if email_matches_rule(message, rules[dest]): matching.add(dest) return list(matching) def email_matches_rule(eml, rule_sets): def val(eml, key): if key == "From": return eml.fr.lower() elif key == "Subject": return eml.subject.lower() elif key == "Body": return eml.text.lower() + " " + eml.html.lower() return "" for rule_set in rule_sets: okay = True for key in rule_set: options = rule_set[key] if not any([o.lower() in val(eml, key) for o in options]): # the key (and rule_set) are not a match okay = False break if okay: return True return False # vim: set sw=4 sts=4 ts=4 et:
raise Error("forwarded attachment expected but missing")
conditional_block
mod.rs
#![warn(missing_docs)] //! Contains all structures and methods to create and manage scenes. //! //! Scene is container for graph nodes, animations and physics. pub mod base; pub mod camera; pub mod graph; pub mod light; pub mod mesh; pub mod node; pub mod particle_system; pub mod sprite; pub mod transform; use crate::{ animation::AnimationContainer, core::{ math::vec2::Vec2, pool::{Handle, Pool, PoolIterator, PoolIteratorMut}, visitor::{Visit, VisitError, VisitResult, Visitor}, }, engine::resource_manager::ResourceManager, physics::{rigid_body::RigidBody, Physics}, resource::texture::Texture, scene::{graph::Graph, node::Node}, utils::{lightmap::Lightmap, log::Log}, }; use std::{ collections::HashMap, ops::{Index, IndexMut}, path::Path, sync::{Arc, Mutex}, }; /// Physics binder is used to link graph nodes with rigid bodies. Scene will /// sync transform of node with its associated rigid body. #[derive(Clone, Debug)] pub struct PhysicsBinder { node_rigid_body_map: HashMap<Handle<Node>, Handle<RigidBody>>, } impl Default for PhysicsBinder { fn default() -> Self { Self { node_rigid_body_map: Default::default(), } } } impl PhysicsBinder { /// Links given graph node with specified rigid body. pub fn bind( &mut self, node: Handle<Node>, rigid_body: Handle<RigidBody>, ) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.insert(node, rigid_body) } /// Unlinks given graph node from its associated rigid body (if any). pub fn unbind(&mut self, node: Handle<Node>) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.remove(&node) } /// Unlinks given body from a node that is linked with the body. /// /// # Performance /// /// This method is slow because of two reasons: /// /// 1) Search is linear /// 2) Additional memory is allocated /// /// So it is not advised to call it in performance critical places. pub fn unbind_by_body(&mut self, body: Handle<RigidBody>) -> Handle<Node> { let mut node = Handle::NONE; self.node_rigid_body_map = self .node_rigid_body_map .clone() .into_iter() .filter(|&(n, b)| { if b == body { node = n; false } else { true } }) .collect(); node } /// Returns handle of rigid body associated with given node. It will return /// Handle::NONE if given node isn't linked to a rigid body. pub fn body_of(&self, node: Handle<Node>) -> Handle<RigidBody> { self.node_rigid_body_map .get(&node) .copied() .unwrap_or_default() } } impl Visit for PhysicsBinder { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.node_rigid_body_map.visit("Map", visitor)?; visitor.leave_region() } } /// See module docs. #[derive(Debug)] pub struct Scene { /// Graph is main container for all scene nodes. It calculates global transforms for nodes, /// updates them and performs all other important work. See `graph` module docs for more /// info. pub graph: Graph, /// Animations container controls all animation on scene. Each animation can have tracks which /// has handles to graph nodes. See `animation` module docs for more info. pub animations: AnimationContainer, /// Physics world. Allows you create various physics objects such as static geometries and /// rigid bodies. Rigid bodies then should be linked with graph nodes using binder. pub physics: Physics, /// Physics binder is a bridge between physics world and scene graph. If a rigid body is linked /// to a graph node, then rigid body will control local transform of node. pub physics_binder: PhysicsBinder, /// Texture to draw scene to. If empty, scene will be drawn on screen directly. /// It is useful to "embed" some scene into other by drawing a quad with this /// texture. This can be used to make in-game video conference - you can make /// separate scene with your characters and draw scene into texture, then in /// main scene you can attach this texture to some quad which will be used as /// monitor. Other usage could be previewer of models, like pictogram of character /// in real-time strategies, in other words there are plenty of possible uses. pub render_target: Option<Arc<Mutex<Texture>>>, lightmap: Option<Lightmap>, } impl Default for Scene { fn default() -> Self { Self { graph: Default::default(), animations: Default::default(), physics: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } } impl Scene { /// Creates new scene with single root node. /// /// # Notes /// /// This method differs from Default trait implementation! Scene::default() creates /// empty graph with no nodes. #[inline] pub fn new() -> Self { Self { // Graph must be created with `new` method because it differs from `default` graph: Graph::new(), physics: Default::default(), animations: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } /// Tries to load scene from given file. File can contain any scene in native engine format. /// Such scenes can be made in rusty editor. pub fn from_file<P: AsRef<Path>>( path: P, resource_manager: &mut ResourceManager, ) -> Result<Self, VisitError> { let mut scene = Scene::default(); let mut visitor = Visitor::load_binary(path.as_ref())?; scene.visit("Scene", &mut visitor)?; // Restore pointers to resources. Scene saves only paths to resources, here we must // find real resources instead. for node in scene.graph.linear_iter_mut() { if let Some(shallow_resource) = node.resource.clone() { node.resource = resource_manager.request_model(&shallow_resource.lock().unwrap().path); } } // And do resolve to extract correct graphical data and so on. scene.resolve(); Ok(scene) } fn update_physics(&mut self, dt: f32) { self.physics.step(dt); // Keep pair when node and body are both alive. let graph = &self.graph; let physics = &self.physics; self.physics_binder .node_rigid_body_map .retain(|node, body| { graph.is_valid_handle(*node) && physics.is_valid_body_handle(*body) }); // Sync node positions with assigned physics bodies for (node, body) in self.physics_binder.node_rigid_body_map.iter() { let body = physics.borrow_body(*body); self.graph[*node] .local_transform_mut() .set_position(body.get_position()); } } /// Removes node from scene with all associated entities, like animations etc. /// /// # Panics /// /// Panics if handle is invalid. pub fn remove_node(&mut self, handle: Handle<Node>) { for descendant in self.graph.traverse_handle_iter(handle) { // Remove all associated animations. self.animations.retain(|animation| { for track in animation.get_tracks() { if track.get_node() == descendant { return false; } } true }); } self.graph.remove_node(handle) } pub(in crate) fn resolve(&mut self) { Log::writeln("Starting resolve...".to_owned()); self.graph.resolve(); self.animations.resolve(&self.graph); Log::writeln("Resolve succeeded!".to_owned()); } /// Tries to set new lightmap to scene. pub fn set_lightmap(&mut self, lightmap: Lightmap) -> Result<Option<Lightmap>, &'static str> { // Assign textures to surfaces. for (handle, lightmaps) in lightmap.map.iter() { if let Node::Mesh(mesh) = &mut self.graph[*handle] { if mesh.surfaces().len() != lightmaps.len() { return Err("failed to set lightmap, surface count mismatch"); } for (surface, entry) in mesh.surfaces_mut().iter_mut().zip(lightmaps) { // This unwrap() call must never panic in normal conditions, because texture wrapped in Option // only to implement Default trait to be serializable. let texture = entry.texture.clone().unwrap(); surface.set_lightmap_texture(texture) } } } Ok(std::mem::replace(&mut self.lightmap, Some(lightmap))) } /// Performs single update tick with given delta time from last frame. Internally /// it updates physics, animations, and each graph node. In most cases there is /// no need to call it directly, engine automatically updates all available scenes. pub fn update(&mut self, frame_size: Vec2, dt: f32) { self.update_physics(dt); self.animations.update_animations(dt); self.graph.update_nodes(frame_size, dt); } /// Creates deep copy of a scene, filter predicate allows you to filter out nodes /// by your criteria. pub fn clone<F>(&self, filter: &mut F) -> Self where F: FnMut(Handle<Node>, &Node) -> bool, { let (graph, old_new_map) = self.graph.clone(filter); let mut animations = self.animations.clone(); for animation in animations.iter_mut() { // Remove all tracks for nodes that were filtered out. animation.retain_tracks(|track| old_new_map.contains_key(&track.get_node())); // Remap track nodes. for track in animation.get_tracks_mut() { track.set_node(old_new_map[&track.get_node()]); } } let physics = self.physics.clone(); let mut physics_binder = PhysicsBinder::default(); for (node, &body) in self.physics_binder.node_rigid_body_map.iter() { // Make sure we bind existing node with new physical body. if let Some(&new_node) = old_new_map.get(node)
} Self { graph, animations, physics, physics_binder, render_target: Default::default(), lightmap: self.lightmap.clone(), } } } impl Visit for Scene { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.physics_binder.visit("PhysicsBinder", visitor)?; self.graph.visit("Graph", visitor)?; self.animations.visit("Animations", visitor)?; self.physics.visit("Physics", visitor)?; let _ = self.lightmap.visit("Lightmap", visitor); visitor.leave_region() } } /// Container for scenes in the engine. It just a simple wrapper around Pool. pub struct SceneContainer { pool: Pool<Scene>, } impl SceneContainer { pub(in crate) fn new() -> Self { Self { pool: Pool::new() } } /// Creates new iterator over scenes in container. #[inline] pub fn iter(&self) -> PoolIterator<Scene> { self.pool.iter() } /// Creates new mutable iterator over scenes in container. #[inline] pub fn iter_mut(&mut self) -> PoolIteratorMut<Scene> { self.pool.iter_mut() } /// Adds new scene into container. #[inline] pub fn add(&mut self, scene: Scene) -> Handle<Scene> { self.pool.spawn(scene) } /// Removes all scenes from container. #[inline] pub fn clear(&mut self) { self.pool.clear() } /// Removes given scene from container. #[inline] pub fn remove(&mut self, handle: Handle<Scene>) { self.pool.free(handle); } } impl Index<Handle<Scene>> for SceneContainer { type Output = Scene; #[inline] fn index(&self, index: Handle<Scene>) -> &Self::Output { &self.pool[index] } } impl IndexMut<Handle<Scene>> for SceneContainer { #[inline] fn index_mut(&mut self, index: Handle<Scene>) -> &mut Self::Output { &mut self.pool[index] } } impl Default for SceneContainer { fn default() -> Self { Self { pool: Pool::new() } } } impl Visit for SceneContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
{ // Re-use of body handle is fine here because physics copy bodies // directly and handles from previous pool is still suitable for copy. physics_binder.bind(new_node, body); }
conditional_block
mod.rs
#![warn(missing_docs)] //! Contains all structures and methods to create and manage scenes. //! //! Scene is container for graph nodes, animations and physics. pub mod base; pub mod camera; pub mod graph; pub mod light; pub mod mesh; pub mod node; pub mod particle_system; pub mod sprite; pub mod transform; use crate::{ animation::AnimationContainer, core::{ math::vec2::Vec2, pool::{Handle, Pool, PoolIterator, PoolIteratorMut}, visitor::{Visit, VisitError, VisitResult, Visitor}, }, engine::resource_manager::ResourceManager, physics::{rigid_body::RigidBody, Physics}, resource::texture::Texture, scene::{graph::Graph, node::Node}, utils::{lightmap::Lightmap, log::Log}, }; use std::{ collections::HashMap, ops::{Index, IndexMut}, path::Path, sync::{Arc, Mutex}, }; /// Physics binder is used to link graph nodes with rigid bodies. Scene will /// sync transform of node with its associated rigid body. #[derive(Clone, Debug)] pub struct PhysicsBinder { node_rigid_body_map: HashMap<Handle<Node>, Handle<RigidBody>>, } impl Default for PhysicsBinder { fn default() -> Self { Self { node_rigid_body_map: Default::default(), } } } impl PhysicsBinder { /// Links given graph node with specified rigid body. pub fn bind( &mut self, node: Handle<Node>, rigid_body: Handle<RigidBody>, ) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.insert(node, rigid_body) } /// Unlinks given graph node from its associated rigid body (if any). pub fn unbind(&mut self, node: Handle<Node>) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.remove(&node) } /// Unlinks given body from a node that is linked with the body. /// /// # Performance /// /// This method is slow because of two reasons: /// /// 1) Search is linear /// 2) Additional memory is allocated /// /// So it is not advised to call it in performance critical places. pub fn unbind_by_body(&mut self, body: Handle<RigidBody>) -> Handle<Node> { let mut node = Handle::NONE; self.node_rigid_body_map = self .node_rigid_body_map .clone() .into_iter() .filter(|&(n, b)| { if b == body { node = n; false } else { true } }) .collect(); node } /// Returns handle of rigid body associated with given node. It will return /// Handle::NONE if given node isn't linked to a rigid body. pub fn body_of(&self, node: Handle<Node>) -> Handle<RigidBody> { self.node_rigid_body_map .get(&node) .copied() .unwrap_or_default() } } impl Visit for PhysicsBinder { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.node_rigid_body_map.visit("Map", visitor)?; visitor.leave_region() } } /// See module docs. #[derive(Debug)] pub struct Scene { /// Graph is main container for all scene nodes. It calculates global transforms for nodes, /// updates them and performs all other important work. See `graph` module docs for more /// info. pub graph: Graph, /// Animations container controls all animation on scene. Each animation can have tracks which /// has handles to graph nodes. See `animation` module docs for more info. pub animations: AnimationContainer, /// Physics world. Allows you create various physics objects such as static geometries and /// rigid bodies. Rigid bodies then should be linked with graph nodes using binder. pub physics: Physics, /// Physics binder is a bridge between physics world and scene graph. If a rigid body is linked /// to a graph node, then rigid body will control local transform of node. pub physics_binder: PhysicsBinder, /// Texture to draw scene to. If empty, scene will be drawn on screen directly. /// It is useful to "embed" some scene into other by drawing a quad with this /// texture. This can be used to make in-game video conference - you can make /// separate scene with your characters and draw scene into texture, then in /// main scene you can attach this texture to some quad which will be used as /// monitor. Other usage could be previewer of models, like pictogram of character /// in real-time strategies, in other words there are plenty of possible uses. pub render_target: Option<Arc<Mutex<Texture>>>, lightmap: Option<Lightmap>, } impl Default for Scene { fn default() -> Self { Self { graph: Default::default(), animations: Default::default(), physics: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } } impl Scene { /// Creates new scene with single root node. /// /// # Notes /// /// This method differs from Default trait implementation! Scene::default() creates /// empty graph with no nodes. #[inline] pub fn new() -> Self { Self { // Graph must be created with `new` method because it differs from `default` graph: Graph::new(), physics: Default::default(), animations: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } /// Tries to load scene from given file. File can contain any scene in native engine format. /// Such scenes can be made in rusty editor. pub fn from_file<P: AsRef<Path>>( path: P, resource_manager: &mut ResourceManager, ) -> Result<Self, VisitError> { let mut scene = Scene::default(); let mut visitor = Visitor::load_binary(path.as_ref())?; scene.visit("Scene", &mut visitor)?; // Restore pointers to resources. Scene saves only paths to resources, here we must // find real resources instead. for node in scene.graph.linear_iter_mut() { if let Some(shallow_resource) = node.resource.clone() { node.resource = resource_manager.request_model(&shallow_resource.lock().unwrap().path); } } // And do resolve to extract correct graphical data and so on. scene.resolve(); Ok(scene) } fn update_physics(&mut self, dt: f32) { self.physics.step(dt); // Keep pair when node and body are both alive. let graph = &self.graph; let physics = &self.physics; self.physics_binder .node_rigid_body_map .retain(|node, body| { graph.is_valid_handle(*node) && physics.is_valid_body_handle(*body) }); // Sync node positions with assigned physics bodies for (node, body) in self.physics_binder.node_rigid_body_map.iter() { let body = physics.borrow_body(*body); self.graph[*node] .local_transform_mut() .set_position(body.get_position()); } } /// Removes node from scene with all associated entities, like animations etc. /// /// # Panics /// /// Panics if handle is invalid. pub fn remove_node(&mut self, handle: Handle<Node>)
pub(in crate) fn resolve(&mut self) { Log::writeln("Starting resolve...".to_owned()); self.graph.resolve(); self.animations.resolve(&self.graph); Log::writeln("Resolve succeeded!".to_owned()); } /// Tries to set new lightmap to scene. pub fn set_lightmap(&mut self, lightmap: Lightmap) -> Result<Option<Lightmap>, &'static str> { // Assign textures to surfaces. for (handle, lightmaps) in lightmap.map.iter() { if let Node::Mesh(mesh) = &mut self.graph[*handle] { if mesh.surfaces().len() != lightmaps.len() { return Err("failed to set lightmap, surface count mismatch"); } for (surface, entry) in mesh.surfaces_mut().iter_mut().zip(lightmaps) { // This unwrap() call must never panic in normal conditions, because texture wrapped in Option // only to implement Default trait to be serializable. let texture = entry.texture.clone().unwrap(); surface.set_lightmap_texture(texture) } } } Ok(std::mem::replace(&mut self.lightmap, Some(lightmap))) } /// Performs single update tick with given delta time from last frame. Internally /// it updates physics, animations, and each graph node. In most cases there is /// no need to call it directly, engine automatically updates all available scenes. pub fn update(&mut self, frame_size: Vec2, dt: f32) { self.update_physics(dt); self.animations.update_animations(dt); self.graph.update_nodes(frame_size, dt); } /// Creates deep copy of a scene, filter predicate allows you to filter out nodes /// by your criteria. pub fn clone<F>(&self, filter: &mut F) -> Self where F: FnMut(Handle<Node>, &Node) -> bool, { let (graph, old_new_map) = self.graph.clone(filter); let mut animations = self.animations.clone(); for animation in animations.iter_mut() { // Remove all tracks for nodes that were filtered out. animation.retain_tracks(|track| old_new_map.contains_key(&track.get_node())); // Remap track nodes. for track in animation.get_tracks_mut() { track.set_node(old_new_map[&track.get_node()]); } } let physics = self.physics.clone(); let mut physics_binder = PhysicsBinder::default(); for (node, &body) in self.physics_binder.node_rigid_body_map.iter() { // Make sure we bind existing node with new physical body. if let Some(&new_node) = old_new_map.get(node) { // Re-use of body handle is fine here because physics copy bodies // directly and handles from previous pool is still suitable for copy. physics_binder.bind(new_node, body); } } Self { graph, animations, physics, physics_binder, render_target: Default::default(), lightmap: self.lightmap.clone(), } } } impl Visit for Scene { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.physics_binder.visit("PhysicsBinder", visitor)?; self.graph.visit("Graph", visitor)?; self.animations.visit("Animations", visitor)?; self.physics.visit("Physics", visitor)?; let _ = self.lightmap.visit("Lightmap", visitor); visitor.leave_region() } } /// Container for scenes in the engine. It just a simple wrapper around Pool. pub struct SceneContainer { pool: Pool<Scene>, } impl SceneContainer { pub(in crate) fn new() -> Self { Self { pool: Pool::new() } } /// Creates new iterator over scenes in container. #[inline] pub fn iter(&self) -> PoolIterator<Scene> { self.pool.iter() } /// Creates new mutable iterator over scenes in container. #[inline] pub fn iter_mut(&mut self) -> PoolIteratorMut<Scene> { self.pool.iter_mut() } /// Adds new scene into container. #[inline] pub fn add(&mut self, scene: Scene) -> Handle<Scene> { self.pool.spawn(scene) } /// Removes all scenes from container. #[inline] pub fn clear(&mut self) { self.pool.clear() } /// Removes given scene from container. #[inline] pub fn remove(&mut self, handle: Handle<Scene>) { self.pool.free(handle); } } impl Index<Handle<Scene>> for SceneContainer { type Output = Scene; #[inline] fn index(&self, index: Handle<Scene>) -> &Self::Output { &self.pool[index] } } impl IndexMut<Handle<Scene>> for SceneContainer { #[inline] fn index_mut(&mut self, index: Handle<Scene>) -> &mut Self::Output { &mut self.pool[index] } } impl Default for SceneContainer { fn default() -> Self { Self { pool: Pool::new() } } } impl Visit for SceneContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
{ for descendant in self.graph.traverse_handle_iter(handle) { // Remove all associated animations. self.animations.retain(|animation| { for track in animation.get_tracks() { if track.get_node() == descendant { return false; } } true }); } self.graph.remove_node(handle) }
identifier_body
mod.rs
#![warn(missing_docs)] //! Contains all structures and methods to create and manage scenes. //! //! Scene is container for graph nodes, animations and physics. pub mod base; pub mod camera; pub mod graph; pub mod light; pub mod mesh; pub mod node; pub mod particle_system; pub mod sprite; pub mod transform; use crate::{ animation::AnimationContainer, core::{ math::vec2::Vec2, pool::{Handle, Pool, PoolIterator, PoolIteratorMut}, visitor::{Visit, VisitError, VisitResult, Visitor}, }, engine::resource_manager::ResourceManager, physics::{rigid_body::RigidBody, Physics}, resource::texture::Texture, scene::{graph::Graph, node::Node}, utils::{lightmap::Lightmap, log::Log}, }; use std::{ collections::HashMap, ops::{Index, IndexMut}, path::Path, sync::{Arc, Mutex}, }; /// Physics binder is used to link graph nodes with rigid bodies. Scene will /// sync transform of node with its associated rigid body. #[derive(Clone, Debug)] pub struct PhysicsBinder { node_rigid_body_map: HashMap<Handle<Node>, Handle<RigidBody>>, } impl Default for PhysicsBinder { fn default() -> Self { Self { node_rigid_body_map: Default::default(), } } } impl PhysicsBinder { /// Links given graph node with specified rigid body. pub fn bind( &mut self, node: Handle<Node>, rigid_body: Handle<RigidBody>, ) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.insert(node, rigid_body) } /// Unlinks given graph node from its associated rigid body (if any). pub fn unbind(&mut self, node: Handle<Node>) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.remove(&node) } /// Unlinks given body from a node that is linked with the body. /// /// # Performance /// /// This method is slow because of two reasons: /// /// 1) Search is linear /// 2) Additional memory is allocated /// /// So it is not advised to call it in performance critical places. pub fn unbind_by_body(&mut self, body: Handle<RigidBody>) -> Handle<Node> { let mut node = Handle::NONE; self.node_rigid_body_map = self .node_rigid_body_map .clone() .into_iter() .filter(|&(n, b)| { if b == body { node = n; false } else { true } }) .collect(); node } /// Returns handle of rigid body associated with given node. It will return /// Handle::NONE if given node isn't linked to a rigid body. pub fn body_of(&self, node: Handle<Node>) -> Handle<RigidBody> { self.node_rigid_body_map .get(&node) .copied() .unwrap_or_default() } } impl Visit for PhysicsBinder { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.node_rigid_body_map.visit("Map", visitor)?; visitor.leave_region() } } /// See module docs. #[derive(Debug)] pub struct Scene { /// Graph is main container for all scene nodes. It calculates global transforms for nodes, /// updates them and performs all other important work. See `graph` module docs for more /// info. pub graph: Graph, /// Animations container controls all animation on scene. Each animation can have tracks which /// has handles to graph nodes. See `animation` module docs for more info. pub animations: AnimationContainer, /// Physics world. Allows you create various physics objects such as static geometries and /// rigid bodies. Rigid bodies then should be linked with graph nodes using binder. pub physics: Physics, /// Physics binder is a bridge between physics world and scene graph. If a rigid body is linked /// to a graph node, then rigid body will control local transform of node. pub physics_binder: PhysicsBinder, /// Texture to draw scene to. If empty, scene will be drawn on screen directly. /// It is useful to "embed" some scene into other by drawing a quad with this /// texture. This can be used to make in-game video conference - you can make /// separate scene with your characters and draw scene into texture, then in /// main scene you can attach this texture to some quad which will be used as /// monitor. Other usage could be previewer of models, like pictogram of character /// in real-time strategies, in other words there are plenty of possible uses. pub render_target: Option<Arc<Mutex<Texture>>>, lightmap: Option<Lightmap>, } impl Default for Scene { fn default() -> Self { Self { graph: Default::default(), animations: Default::default(), physics: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } }
impl Scene { /// Creates new scene with single root node. /// /// # Notes /// /// This method differs from Default trait implementation! Scene::default() creates /// empty graph with no nodes. #[inline] pub fn new() -> Self { Self { // Graph must be created with `new` method because it differs from `default` graph: Graph::new(), physics: Default::default(), animations: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } /// Tries to load scene from given file. File can contain any scene in native engine format. /// Such scenes can be made in rusty editor. pub fn from_file<P: AsRef<Path>>( path: P, resource_manager: &mut ResourceManager, ) -> Result<Self, VisitError> { let mut scene = Scene::default(); let mut visitor = Visitor::load_binary(path.as_ref())?; scene.visit("Scene", &mut visitor)?; // Restore pointers to resources. Scene saves only paths to resources, here we must // find real resources instead. for node in scene.graph.linear_iter_mut() { if let Some(shallow_resource) = node.resource.clone() { node.resource = resource_manager.request_model(&shallow_resource.lock().unwrap().path); } } // And do resolve to extract correct graphical data and so on. scene.resolve(); Ok(scene) } fn update_physics(&mut self, dt: f32) { self.physics.step(dt); // Keep pair when node and body are both alive. let graph = &self.graph; let physics = &self.physics; self.physics_binder .node_rigid_body_map .retain(|node, body| { graph.is_valid_handle(*node) && physics.is_valid_body_handle(*body) }); // Sync node positions with assigned physics bodies for (node, body) in self.physics_binder.node_rigid_body_map.iter() { let body = physics.borrow_body(*body); self.graph[*node] .local_transform_mut() .set_position(body.get_position()); } } /// Removes node from scene with all associated entities, like animations etc. /// /// # Panics /// /// Panics if handle is invalid. pub fn remove_node(&mut self, handle: Handle<Node>) { for descendant in self.graph.traverse_handle_iter(handle) { // Remove all associated animations. self.animations.retain(|animation| { for track in animation.get_tracks() { if track.get_node() == descendant { return false; } } true }); } self.graph.remove_node(handle) } pub(in crate) fn resolve(&mut self) { Log::writeln("Starting resolve...".to_owned()); self.graph.resolve(); self.animations.resolve(&self.graph); Log::writeln("Resolve succeeded!".to_owned()); } /// Tries to set new lightmap to scene. pub fn set_lightmap(&mut self, lightmap: Lightmap) -> Result<Option<Lightmap>, &'static str> { // Assign textures to surfaces. for (handle, lightmaps) in lightmap.map.iter() { if let Node::Mesh(mesh) = &mut self.graph[*handle] { if mesh.surfaces().len() != lightmaps.len() { return Err("failed to set lightmap, surface count mismatch"); } for (surface, entry) in mesh.surfaces_mut().iter_mut().zip(lightmaps) { // This unwrap() call must never panic in normal conditions, because texture wrapped in Option // only to implement Default trait to be serializable. let texture = entry.texture.clone().unwrap(); surface.set_lightmap_texture(texture) } } } Ok(std::mem::replace(&mut self.lightmap, Some(lightmap))) } /// Performs single update tick with given delta time from last frame. Internally /// it updates physics, animations, and each graph node. In most cases there is /// no need to call it directly, engine automatically updates all available scenes. pub fn update(&mut self, frame_size: Vec2, dt: f32) { self.update_physics(dt); self.animations.update_animations(dt); self.graph.update_nodes(frame_size, dt); } /// Creates deep copy of a scene, filter predicate allows you to filter out nodes /// by your criteria. pub fn clone<F>(&self, filter: &mut F) -> Self where F: FnMut(Handle<Node>, &Node) -> bool, { let (graph, old_new_map) = self.graph.clone(filter); let mut animations = self.animations.clone(); for animation in animations.iter_mut() { // Remove all tracks for nodes that were filtered out. animation.retain_tracks(|track| old_new_map.contains_key(&track.get_node())); // Remap track nodes. for track in animation.get_tracks_mut() { track.set_node(old_new_map[&track.get_node()]); } } let physics = self.physics.clone(); let mut physics_binder = PhysicsBinder::default(); for (node, &body) in self.physics_binder.node_rigid_body_map.iter() { // Make sure we bind existing node with new physical body. if let Some(&new_node) = old_new_map.get(node) { // Re-use of body handle is fine here because physics copy bodies // directly and handles from previous pool is still suitable for copy. physics_binder.bind(new_node, body); } } Self { graph, animations, physics, physics_binder, render_target: Default::default(), lightmap: self.lightmap.clone(), } } } impl Visit for Scene { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.physics_binder.visit("PhysicsBinder", visitor)?; self.graph.visit("Graph", visitor)?; self.animations.visit("Animations", visitor)?; self.physics.visit("Physics", visitor)?; let _ = self.lightmap.visit("Lightmap", visitor); visitor.leave_region() } } /// Container for scenes in the engine. It just a simple wrapper around Pool. pub struct SceneContainer { pool: Pool<Scene>, } impl SceneContainer { pub(in crate) fn new() -> Self { Self { pool: Pool::new() } } /// Creates new iterator over scenes in container. #[inline] pub fn iter(&self) -> PoolIterator<Scene> { self.pool.iter() } /// Creates new mutable iterator over scenes in container. #[inline] pub fn iter_mut(&mut self) -> PoolIteratorMut<Scene> { self.pool.iter_mut() } /// Adds new scene into container. #[inline] pub fn add(&mut self, scene: Scene) -> Handle<Scene> { self.pool.spawn(scene) } /// Removes all scenes from container. #[inline] pub fn clear(&mut self) { self.pool.clear() } /// Removes given scene from container. #[inline] pub fn remove(&mut self, handle: Handle<Scene>) { self.pool.free(handle); } } impl Index<Handle<Scene>> for SceneContainer { type Output = Scene; #[inline] fn index(&self, index: Handle<Scene>) -> &Self::Output { &self.pool[index] } } impl IndexMut<Handle<Scene>> for SceneContainer { #[inline] fn index_mut(&mut self, index: Handle<Scene>) -> &mut Self::Output { &mut self.pool[index] } } impl Default for SceneContainer { fn default() -> Self { Self { pool: Pool::new() } } } impl Visit for SceneContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
}
random_line_split
mod.rs
#![warn(missing_docs)] //! Contains all structures and methods to create and manage scenes. //! //! Scene is container for graph nodes, animations and physics. pub mod base; pub mod camera; pub mod graph; pub mod light; pub mod mesh; pub mod node; pub mod particle_system; pub mod sprite; pub mod transform; use crate::{ animation::AnimationContainer, core::{ math::vec2::Vec2, pool::{Handle, Pool, PoolIterator, PoolIteratorMut}, visitor::{Visit, VisitError, VisitResult, Visitor}, }, engine::resource_manager::ResourceManager, physics::{rigid_body::RigidBody, Physics}, resource::texture::Texture, scene::{graph::Graph, node::Node}, utils::{lightmap::Lightmap, log::Log}, }; use std::{ collections::HashMap, ops::{Index, IndexMut}, path::Path, sync::{Arc, Mutex}, }; /// Physics binder is used to link graph nodes with rigid bodies. Scene will /// sync transform of node with its associated rigid body. #[derive(Clone, Debug)] pub struct PhysicsBinder { node_rigid_body_map: HashMap<Handle<Node>, Handle<RigidBody>>, } impl Default for PhysicsBinder { fn default() -> Self { Self { node_rigid_body_map: Default::default(), } } } impl PhysicsBinder { /// Links given graph node with specified rigid body. pub fn bind( &mut self, node: Handle<Node>, rigid_body: Handle<RigidBody>, ) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.insert(node, rigid_body) } /// Unlinks given graph node from its associated rigid body (if any). pub fn unbind(&mut self, node: Handle<Node>) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.remove(&node) } /// Unlinks given body from a node that is linked with the body. /// /// # Performance /// /// This method is slow because of two reasons: /// /// 1) Search is linear /// 2) Additional memory is allocated /// /// So it is not advised to call it in performance critical places. pub fn unbind_by_body(&mut self, body: Handle<RigidBody>) -> Handle<Node> { let mut node = Handle::NONE; self.node_rigid_body_map = self .node_rigid_body_map .clone() .into_iter() .filter(|&(n, b)| { if b == body { node = n; false } else { true } }) .collect(); node } /// Returns handle of rigid body associated with given node. It will return /// Handle::NONE if given node isn't linked to a rigid body. pub fn body_of(&self, node: Handle<Node>) -> Handle<RigidBody> { self.node_rigid_body_map .get(&node) .copied() .unwrap_or_default() } } impl Visit for PhysicsBinder { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.node_rigid_body_map.visit("Map", visitor)?; visitor.leave_region() } } /// See module docs. #[derive(Debug)] pub struct Scene { /// Graph is main container for all scene nodes. It calculates global transforms for nodes, /// updates them and performs all other important work. See `graph` module docs for more /// info. pub graph: Graph, /// Animations container controls all animation on scene. Each animation can have tracks which /// has handles to graph nodes. See `animation` module docs for more info. pub animations: AnimationContainer, /// Physics world. Allows you create various physics objects such as static geometries and /// rigid bodies. Rigid bodies then should be linked with graph nodes using binder. pub physics: Physics, /// Physics binder is a bridge between physics world and scene graph. If a rigid body is linked /// to a graph node, then rigid body will control local transform of node. pub physics_binder: PhysicsBinder, /// Texture to draw scene to. If empty, scene will be drawn on screen directly. /// It is useful to "embed" some scene into other by drawing a quad with this /// texture. This can be used to make in-game video conference - you can make /// separate scene with your characters and draw scene into texture, then in /// main scene you can attach this texture to some quad which will be used as /// monitor. Other usage could be previewer of models, like pictogram of character /// in real-time strategies, in other words there are plenty of possible uses. pub render_target: Option<Arc<Mutex<Texture>>>, lightmap: Option<Lightmap>, } impl Default for Scene { fn default() -> Self { Self { graph: Default::default(), animations: Default::default(), physics: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } } impl Scene { /// Creates new scene with single root node. /// /// # Notes /// /// This method differs from Default trait implementation! Scene::default() creates /// empty graph with no nodes. #[inline] pub fn new() -> Self { Self { // Graph must be created with `new` method because it differs from `default` graph: Graph::new(), physics: Default::default(), animations: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } /// Tries to load scene from given file. File can contain any scene in native engine format. /// Such scenes can be made in rusty editor. pub fn from_file<P: AsRef<Path>>( path: P, resource_manager: &mut ResourceManager, ) -> Result<Self, VisitError> { let mut scene = Scene::default(); let mut visitor = Visitor::load_binary(path.as_ref())?; scene.visit("Scene", &mut visitor)?; // Restore pointers to resources. Scene saves only paths to resources, here we must // find real resources instead. for node in scene.graph.linear_iter_mut() { if let Some(shallow_resource) = node.resource.clone() { node.resource = resource_manager.request_model(&shallow_resource.lock().unwrap().path); } } // And do resolve to extract correct graphical data and so on. scene.resolve(); Ok(scene) } fn update_physics(&mut self, dt: f32) { self.physics.step(dt); // Keep pair when node and body are both alive. let graph = &self.graph; let physics = &self.physics; self.physics_binder .node_rigid_body_map .retain(|node, body| { graph.is_valid_handle(*node) && physics.is_valid_body_handle(*body) }); // Sync node positions with assigned physics bodies for (node, body) in self.physics_binder.node_rigid_body_map.iter() { let body = physics.borrow_body(*body); self.graph[*node] .local_transform_mut() .set_position(body.get_position()); } } /// Removes node from scene with all associated entities, like animations etc. /// /// # Panics /// /// Panics if handle is invalid. pub fn remove_node(&mut self, handle: Handle<Node>) { for descendant in self.graph.traverse_handle_iter(handle) { // Remove all associated animations. self.animations.retain(|animation| { for track in animation.get_tracks() { if track.get_node() == descendant { return false; } } true }); } self.graph.remove_node(handle) } pub(in crate) fn resolve(&mut self) { Log::writeln("Starting resolve...".to_owned()); self.graph.resolve(); self.animations.resolve(&self.graph); Log::writeln("Resolve succeeded!".to_owned()); } /// Tries to set new lightmap to scene. pub fn set_lightmap(&mut self, lightmap: Lightmap) -> Result<Option<Lightmap>, &'static str> { // Assign textures to surfaces. for (handle, lightmaps) in lightmap.map.iter() { if let Node::Mesh(mesh) = &mut self.graph[*handle] { if mesh.surfaces().len() != lightmaps.len() { return Err("failed to set lightmap, surface count mismatch"); } for (surface, entry) in mesh.surfaces_mut().iter_mut().zip(lightmaps) { // This unwrap() call must never panic in normal conditions, because texture wrapped in Option // only to implement Default trait to be serializable. let texture = entry.texture.clone().unwrap(); surface.set_lightmap_texture(texture) } } } Ok(std::mem::replace(&mut self.lightmap, Some(lightmap))) } /// Performs single update tick with given delta time from last frame. Internally /// it updates physics, animations, and each graph node. In most cases there is /// no need to call it directly, engine automatically updates all available scenes. pub fn update(&mut self, frame_size: Vec2, dt: f32) { self.update_physics(dt); self.animations.update_animations(dt); self.graph.update_nodes(frame_size, dt); } /// Creates deep copy of a scene, filter predicate allows you to filter out nodes /// by your criteria. pub fn clone<F>(&self, filter: &mut F) -> Self where F: FnMut(Handle<Node>, &Node) -> bool, { let (graph, old_new_map) = self.graph.clone(filter); let mut animations = self.animations.clone(); for animation in animations.iter_mut() { // Remove all tracks for nodes that were filtered out. animation.retain_tracks(|track| old_new_map.contains_key(&track.get_node())); // Remap track nodes. for track in animation.get_tracks_mut() { track.set_node(old_new_map[&track.get_node()]); } } let physics = self.physics.clone(); let mut physics_binder = PhysicsBinder::default(); for (node, &body) in self.physics_binder.node_rigid_body_map.iter() { // Make sure we bind existing node with new physical body. if let Some(&new_node) = old_new_map.get(node) { // Re-use of body handle is fine here because physics copy bodies // directly and handles from previous pool is still suitable for copy. physics_binder.bind(new_node, body); } } Self { graph, animations, physics, physics_binder, render_target: Default::default(), lightmap: self.lightmap.clone(), } } } impl Visit for Scene { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.physics_binder.visit("PhysicsBinder", visitor)?; self.graph.visit("Graph", visitor)?; self.animations.visit("Animations", visitor)?; self.physics.visit("Physics", visitor)?; let _ = self.lightmap.visit("Lightmap", visitor); visitor.leave_region() } } /// Container for scenes in the engine. It just a simple wrapper around Pool. pub struct SceneContainer { pool: Pool<Scene>, } impl SceneContainer { pub(in crate) fn new() -> Self { Self { pool: Pool::new() } } /// Creates new iterator over scenes in container. #[inline] pub fn iter(&self) -> PoolIterator<Scene> { self.pool.iter() } /// Creates new mutable iterator over scenes in container. #[inline] pub fn iter_mut(&mut self) -> PoolIteratorMut<Scene> { self.pool.iter_mut() } /// Adds new scene into container. #[inline] pub fn
(&mut self, scene: Scene) -> Handle<Scene> { self.pool.spawn(scene) } /// Removes all scenes from container. #[inline] pub fn clear(&mut self) { self.pool.clear() } /// Removes given scene from container. #[inline] pub fn remove(&mut self, handle: Handle<Scene>) { self.pool.free(handle); } } impl Index<Handle<Scene>> for SceneContainer { type Output = Scene; #[inline] fn index(&self, index: Handle<Scene>) -> &Self::Output { &self.pool[index] } } impl IndexMut<Handle<Scene>> for SceneContainer { #[inline] fn index_mut(&mut self, index: Handle<Scene>) -> &mut Self::Output { &mut self.pool[index] } } impl Default for SceneContainer { fn default() -> Self { Self { pool: Pool::new() } } } impl Visit for SceneContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
add
identifier_name
extractor.py
from typing import Dict, Any, List, Optional import yaml import json from json import JSONEncoder import os import re import datetime as dt import pandas as pd from pprint import pprint from pathlib import Path from bs4 import BeautifulSoup from bs4.element import Tag import numpy as np HOME = Path( os.getenv('HOME') ) # TODO: # Logros # Destacado: cristian-david-montoya-saldarriaga-09638514a # Herramientas y tecnologías # TODO: features to extract # Whether has resume available # extract english level # https://www.linkedin.com/in/luis-mario-urrea-murillo/ MY_PATH = HOME / '_data/talent' SECS_IN_YEAR = 365.25 * 24 * 3600 COMMON_ENGLISH = {'the', 'with', 'on', 'and', 'I', 'am', 'is', 'my'} COMMON_SPANISH = {'y', 'el', 'la', 'de', 'los', 'las'} class _Config: raw_profiles_path = MY_PATH / 'linkedin_raw_profiles' profiles_yamls_path = MY_PATH / 'linkedin_yaml_profiles' CFG = _Config class DateTimeEncoder(JSONEncoder): """Override the default method""" def default(self, obj): """default formating as string""" if isinstance(obj, (dt.date, dt.datetime)): return obj.isoformat() yaml.SafeDumper.yaml_representers[None] = lambda self, data: \ yaml.representer.SafeRepresenter.represent_str( self, str(data) ) # %% def main(): """Read scraped profiles parse them and write to json and yamls""" # %% CFG.profiles_yamls_path.mkdir(parents=True, exist_ok=True) fpaths = list( _Config.raw_profiles_path.glob('*.html') ) print( f'{len(fpaths)} htmls found' ) # %% fpath = CFG.raw_profiles_path / 'luis-mario-urrea-murillo.html' # %% fpath = CFG.raw_profiles_path / 'cristian-david-montoya-saldarriaga-09638514a.html' # %% fpaths = [ CFG.raw_profiles_path / 'ricardo-alarcon-44079b105.html' ] # %% fpaths = [ Path('/home/teo/_data/talent/linkedin_raw_profiles/israellaguan.html')] # %% dics = {} # %% for i, fpath in enumerate(fpaths): if fpath in dics: continue with fpath.open('rt') as f_in: html = f_in.read() print( f'\n***{i+1}/{len(fpaths)} {fpath.name}:') dic = extract_one( html, fpath ) dic['linkedin_url'] = f"https://www.linkedin.com/in/{fpath.name.split('.')[0]}" dic['scraped_at'] = dt.datetime.fromtimestamp( fpath.stat().st_ctime ) # pprint(dic['work_stats']) dics[fpath] = dic dics_arr = list(dics.values()) # %% del dics # %% with (CFG.profiles_yamls_path / 'all_profiles.json').open('wt') as f_out: json.dump( dics_arr, f_out, cls=DateTimeEncoder, indent=4 ) # %% with (CFG.profiles_yamls_path / 'all_profiles.yaml').open('wt') as f_out: yaml.safe_dump( dics_arr, f_out ) # %% df = produce_summary_table( dics_arr ) df.to_excel( CFG.raw_profiles_path.parent / 'mined_ruby_candidates_sample.xlsx', index=False) # %% def _interactive_testing( dics_arr, fpaths, html: str ): # %% # noinspection PyUnresolvedReferences runfile('talent-miner/extractor.py') # %% pprint( dics_arr[4] ) # %% fpath = [ f for f in fpaths if str(f).find('israellaguan') >= 0 ][0] # %% doc = BeautifulSoup( html, features='html.parser' ) # %% _extract_accomplishments(doc) # %% def _extract_accomplishments( doc: BeautifulSoup ) -> Dict[str, List[str]]: accomps = doc.find_all('section', {'class': 'pv-accomplishments-block'}) # accomp0 = accomps[2] ret = {} for accomp in accomps: accomp_header = accomp.find_all('h3', {'class': 'pv-accomplishments-block__title'})[0].text accomp_vals = [ li_elem.text for li_elem in accomp.find_all('li') ] ret[accomp_header] = accomp_vals return ret # %% def produce_summary_table( dics: List[Dict[str, Any]]) -> pd.DataFrame: # %% recs = [] for dic in dics: try: w_stats = dic['work_stats'] edu_stats = dic['education_stats'] skills = dic['skills'] rec = dict( name=dic['name'], total_experience_yrs=w_stats['total_experience_yrs'], n_work_positions=w_stats['n_work_positions'], pos_lt1_year=w_stats['poss_lt1.2_years'], pos_lt2_year=w_stats['poss_lt2_years'], about=dic['about'], about_eng_ratio=dic['about_stats']['about_eng_ratio'], current_position=dic['current_position'], has_worked_abroad=w_stats['has_worked_abroad'], max_degree=edu_stats['max_degree'], studied_abroad=edu_stats['has_studied_abroad'], ruby=(skills.get('Ruby', -1) + 1) + (skills.get('Ruby on Rails', -1) + 1), python=skills.get('Python (Programming Language)', -1) + 1, java=skills.get('Java', -1) + 1, javascript=skills.get('JavaScript', -1) + 1, cpp=skills.get('C++', -1) + 1, csharp=skills.get('C#', -1) + 1, skills=skills, profile_text_length=dic['profile_text_stats']['length'], profile_eng_ratio=dic['profile_text_stats']['eng_ratio'] * 10.0, languages=",".join ( dic.get('accomplishments', {}).get('idiomas', []) ), num_contacts=dic['num_contacts'], location=dic['location'], linkedin_url=dic['linkedin_url'], scraped_at=dic['scraped_at']) except Exception as exc: pprint( dic ) raise exc recs.append(rec) df = pd.DataFrame( recs ) # %% return df # %% def extract_one( html: str, fpath: Path ): """Extract data from one scraped html""" # %% doc = BeautifulSoup( html, features='html.parser') ret = { 'linkedin_handle': fpath.name.split('.')[0] } _parse_top_card( ret, doc ) # %% ret['about'] = _extract_about( doc ) # if len(ret['about']) < 100 and ret['about'].find('ver más') > 0: # print( f"\nVer más detected: \nabout:{ret['about']} fpath={fpath}" ) ret['about_stats'] = {'about_eng_ratio': _common_english_ratio(ret['about'])} # %% ret['work_experience'] = _parse_experiences( doc ) ret['work_stats'] = calc_work_stats( ret['work_experience']) # %% ret['skills'] = proc_skills_section( doc ) ret['education'] = _parse_education( doc ) ret['education_stats'] = _education_stats( ret['education']) ret['accomplishments'] = _extract_accomplishments(doc) ret['profile_text_stats'] = profile_text_stats( doc ) # %% return ret # %% def calc_work_stats( work_xps: List[Dict[str, Any]] ): """Calculate total_experience_yrs and other stats""" durations = [ rec['duration'] for rec in work_xps if 'duration' in rec ] total_years = sum( durations ) if durations else None avg_years = np.round( total_years / len(durations), 2) if durations else None poss_lt2_years = sum( 1 for dur in durations if dur < 2.0 ) poss_lt1_2_years = sum(1 for dur in durations if dur < 1.2 ) has_worked_abroad = any( rec for rec in work_xps if _is_location_abroad( rec.get('location_raw') )) return { "total_experience_yrs": total_years, 'avg_years': avg_years, 'n_work_positions': len(durations), 'poss_lt2_years': poss_lt2_years, 'poss_lt1.2_years': poss_lt1_2_years, 'has_worked_abroad': has_worked_abroad } # %% def _is_location_abroad( location: Optional[str] ): if location is None or location.strip() == '': return False else: ret = not re.search( 'Colombia|Medell.n|Bogot.|Barranquilla|Cali|Pereira' '|Caldas|Cucuta|Dosquebradas|Antioquia|Remot[eo]', location, re.IGNORECASE) if ret: print( f'abroad location: {location}') return ret def _is_abroad_school( school: Optional[str] ): ret = re.search(r"(University|College|\bof\b)", school) if ret: print( f'abroad school: {school}') return ret def profile_text_stats( doc: BeautifulSoup ): """
def _extract_about( doc ) -> Optional[str]: about_section = doc.find('section', {'class': 'pv-about-section'}) if about_section is None: return None parts = about_section.find_all("p") return (" ".join( part.text.replace('\n', ' ').strip() for part in parts ) .replace( '... ver más', '') ) # %% def _parse_top_card( ret: Dict[ str, Any], doc: BeautifulSoup ): top_card_els = doc.find_all( "ul", {"class": "pv-top-card--list"} ) name_elem = top_card_els[0].find_all("li")[0] name = name_elem.text.strip() current_position = doc.find_all("h2", {"class": "mt1"})[0].text.strip() location = top_card_els[1].find_all( "li" )[0].text.strip() # %% num_contacts = _extract_num_contacts( top_card_els[1] ) # %% top_card_xp = doc.find_all('a', {"class": "pv-top-card--experience-list-item"}) main_school = top_card_xp[0].text.strip() if top_card_xp else None data = dict(name=name, current_position=current_position, location=location, num_contacts=num_contacts, main_school=main_school) ret.update(data) # %% def _extract_num_contacts( elem: Tag ): num_contacts_text = elem.find_all("li")[1].text.strip() mch = re.match(r'(\d+) contactos', num_contacts_text) if mch: return int(mch.group(1)) mch2 = re.search(r'Más de 500 contactos', num_contacts_text) if mch2: return 501 def _parse_experiences(doc: BeautifulSoup) -> List[Dict]: # %% xp_section = doc.find( 'section', {'id': 'experience-section'} ) if xp_section is None: return [] # %% summaries = xp_section.find_all('div', {'class': 'pv-entity__summary-info'}) ret = [ proc_employment_summary(summary) for summary in summaries ] return ret # %% def proc_employment_summary(summary: Tag) -> Dict: """process one employment summary and extract info from it""" xp_record = dict() xp_record['position'] = summary.find('h3').text.strip() company = summary.find_all('p', {'class': 'pv-entity__secondary-title'})[0] xp_record['company'] = "; ".join( [ line.strip() for line in company.text.split('\n') if line.strip() != ''] ) # %% for xp_line in summary.find_all('h4'): fld_name, value = [span.text.strip() for span in xp_line.find_all('span') ] if fld_name == 'Fechas de empleo': xp_record['period_raw'] = value period = _extract_period( value ) xp_record['period'] = period # print( period ) xp_record['duration'] = np.round( (period[1] - period[0]).total_seconds() / SECS_IN_YEAR, 2) elif fld_name == 'Duración del empleo': xp_record['duration_raw'] = value elif fld_name == 'Ubicación': xp_record['location_raw'] = value # print( f'location: {value}') elif fld_name.startswith('LinkedIn me ayud'): continue else: print( "proc_employment_summary: ", fld_name, value ) # %% # pprint( xp_record ) # %% return xp_record # %% def _extract_period( period_raw: str ): mch2 = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . ' r'(?P<mes2>[a-z]+)\. de (?P<year2>[0-9]+)', period_raw) if mch2: # print('mch2', mch2, mch2.group("year1"), mch2.group("year2")) mes1, mes2 = _translate_mes(mch2.group("mes1")), _translate_mes(mch2.group("mes2")) return ( dt.date(int(mch2.group("year1")), int( mes1 ), 1), dt.date(int(mch2.group("year2")), int( mes2 ), 1) ) mch1 = re.match(r'(?P<mes>[a-z]+)\. de (?P<year>[0-9]+)( . actualidad)?', period_raw) if mch1: # print('mch1') mes = _translate_mes(mch1.group("mes")) return dt.date(int(mch1.group("year")), mes, 1), dt.date.today() mch2b = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . (?P<year2>[0-9]{4})', period_raw) if mch2b: mes1 = _translate_mes(mch2b.group("mes1")) return ( dt.date(int(mch2b.group("year1")), int(mes1), 1), dt.date(int(mch2b.group("year2")), 1, 1) ) mch3 = re.match(r'(?P<year1>[0-9]{4}) . (?P<year2>[0-9]{4})', period_raw) if mch3: return (dt.date(int(mch3.group("year1")), 1, 1), dt.date(int(mch3.group("year2")), 1, 1)) mch4 = re.match(r'(?P<year1>[0-9]{4})', period_raw) if mch4: return (dt.date(int(mch4.group("year1")), 1, 1), dt.date(int(mch4.group("year1")) + 1, 1, 1)) assert False, period_raw # %% def _interactive_test(): # %% period_raw = 'ene. de 2015 – actualidad' # %% period_raw = 'ene. de 2015 – may. de 2015' print( _extract_period( period_raw ) ) # %% period_raw = 'ene. de 2012 – may. de 2013' print(_extract_period(period_raw)) # %% def _translate_mes( mes: str) -> int: return {'ene': 1, 'feb': 2, 'mar': 3, 'abr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'ago': 8, 'sept': 9, 'oct': 10, 'nov': 11, 'dic': 12}[mes] def _common_english_ratio( a_text: str ) -> int: if a_text is None: return None words = a_text.split() cnt_english = sum( 1 for word in words if word in COMMON_ENGLISH ) return np.round( cnt_english / (len(words) + 0.001) * 10, 2) def _parse_education(doc: BeautifulSoup) -> List[Dict]: # %% edu_section = doc.find( 'section', {'id': 'education-section'} ) # %% if edu_section is None: return [] # %% summaries = edu_section.find_all('li', {'class': 'pv-education-entity'}) ret = [ proc_education_summary(summary) for summary in summaries ] # %% return ret # %% def _education_stats( edu_records: List[Dict[str, str]]): return {'has_studied_abroad': any(rec['is_abroad_school'] for rec in edu_records), 'max_degree': _max_degree(edu_records)} def proc_education_summary( summary: Tag ) -> Dict[str, str]: """Process one education summary and generate a record""" edu_record = dict() edu_record['school'] = summary.find('h3').text.strip() edu_record['is_abroad_school'] = _is_abroad_school( edu_record['school'] ) for parag in summary.find_all('p'): spans = [span.text.strip() for span in parag.find_all('span')] if len( spans ) == 2: fld_name, value = spans value = value.strip() elif len(spans) == 0: # print( 'education parag: ', parag ) edu_record['description'] = parag.text.strip() continue else: print( 'education spans: ', spans ) continue if fld_name == 'Nombre de la titulación': edu_record['degree_raw'] = value edu_record['degree'] = _classify_degree( value ) # print( 'degree: ', value, _classify_degree(value) ) elif fld_name == 'Disciplina académica': edu_record['field_raw'] = value elif fld_name == 'Nota': edu_record['grade_raw'] = value elif fld_name.startswith('Fechas de estudios'): edu_record['period_raw'] = value elif fld_name.startswith('Actividades y asociaciones'): edu_record['activities_raw'] = value else: print("proc_education_summary: ", fld_name, ' :: ', value) if edu_record.get('degree', 'Unknown') == 'Unknown': if re.search( 'Ingenier|Engineering', edu_record.get('field_raw', '') ): edu_record['degree'] = 'University' return edu_record # %% def _classify_degree( degree: str ) -> str: if re.search('Ingenier|Engineer', degree): return 'University' elif re.search('^Tecn.log', degree): return 'Tecnología' elif re.search('^Mae?ste?r', degree): return 'Master''s' elif re.search('^Dimplom', degree): return 'Diploma' elif re.search('^(Esp\.|Especializ)', degree): return 'Specialization' elif re.search('^Phd', degree, re.IGNORECASE): return 'PhD' else: return 'Unknown' DEGREE_LEVELS = {'Tecnología': 1, 'University': 2, 'Diploma': 3, 'Specialization': 4, 'Master''s': 5, 'PhD': 5, 'Unknown': -1} def _max_degree(edu_records: List[Dict[str, str]]) -> Optional[str] : levels = DEGREE_LEVELS if len(edu_records) > 0: return max( [rec.get('degree', 'Unknown') for rec in edu_records ], key=lambda x: levels[x]) else: return None def proc_skills_section( doc: BeautifulSoup ): # %% skills_section = doc.find('section', {'class': 'pv-skill-categories-section'}) if skills_section is None: return {} # %% divs = skills_section.find_all('div', {'class': 'pv-skill-category-entity__skill-wrapper'}) # %% ret = {} for div in divs: texts = [ span.text.strip() for span in div.find_all('span') if span.text.strip() != '' ] if len(texts) >= 1: key = texts[0] if len(texts) >= 3: mch = re.match(r'(\d+)', texts[2]) if mch: ret[key] = int( mch.group(1)) else: print( f"skills {len(texts)} spans: {texts}") ret[key] = None elif len(texts) == 1: ret[key] = 0 else: print(f"skills {len(texts)} spans: {texts}") # %% return ret
some metrics on the whole profile text""" text = doc.find('main', {'class': 'core-rail'}).text.strip() words = text.split() eng_ratio = sum(1 for word in words if word in COMMON_ENGLISH) * 10/ (len(words) + 0.001) return { 'length': len( text ), 'eng_ratio': np.round( eng_ratio, 2)} # %%
identifier_body
extractor.py
from typing import Dict, Any, List, Optional import yaml import json from json import JSONEncoder import os import re import datetime as dt import pandas as pd from pprint import pprint from pathlib import Path from bs4 import BeautifulSoup from bs4.element import Tag import numpy as np HOME = Path( os.getenv('HOME') ) # TODO: # Logros # Destacado: cristian-david-montoya-saldarriaga-09638514a # Herramientas y tecnologías # TODO: features to extract # Whether has resume available # extract english level # https://www.linkedin.com/in/luis-mario-urrea-murillo/ MY_PATH = HOME / '_data/talent' SECS_IN_YEAR = 365.25 * 24 * 3600 COMMON_ENGLISH = {'the', 'with', 'on', 'and', 'I', 'am', 'is', 'my'} COMMON_SPANISH = {'y', 'el', 'la', 'de', 'los', 'las'} class _Config: raw_profiles_path = MY_PATH / 'linkedin_raw_profiles' profiles_yamls_path = MY_PATH / 'linkedin_yaml_profiles' CFG = _Config class DateTimeEncoder(JSONEncoder): """Override the default method""" def default(self, obj): """default formating as string""" if isinstance(obj, (dt.date, dt.datetime)): return obj.isoformat() yaml.SafeDumper.yaml_representers[None] = lambda self, data: \ yaml.representer.SafeRepresenter.represent_str( self, str(data) ) # %% def main(): """Read scraped profiles parse them and write to json and yamls""" # %% CFG.profiles_yamls_path.mkdir(parents=True, exist_ok=True) fpaths = list( _Config.raw_profiles_path.glob('*.html') ) print( f'{len(fpaths)} htmls found' ) # %% fpath = CFG.raw_profiles_path / 'luis-mario-urrea-murillo.html' # %% fpath = CFG.raw_profiles_path / 'cristian-david-montoya-saldarriaga-09638514a.html' # %% fpaths = [ CFG.raw_profiles_path / 'ricardo-alarcon-44079b105.html' ] # %% fpaths = [ Path('/home/teo/_data/talent/linkedin_raw_profiles/israellaguan.html')] # %% dics = {} # %% for i, fpath in enumerate(fpaths): if fpath in dics: continue with fpath.open('rt') as f_in: html = f_in.read() print( f'\n***{i+1}/{len(fpaths)} {fpath.name}:') dic = extract_one( html, fpath ) dic['linkedin_url'] = f"https://www.linkedin.com/in/{fpath.name.split('.')[0]}" dic['scraped_at'] = dt.datetime.fromtimestamp( fpath.stat().st_ctime )
dics[fpath] = dic dics_arr = list(dics.values()) # %% del dics # %% with (CFG.profiles_yamls_path / 'all_profiles.json').open('wt') as f_out: json.dump( dics_arr, f_out, cls=DateTimeEncoder, indent=4 ) # %% with (CFG.profiles_yamls_path / 'all_profiles.yaml').open('wt') as f_out: yaml.safe_dump( dics_arr, f_out ) # %% df = produce_summary_table( dics_arr ) df.to_excel( CFG.raw_profiles_path.parent / 'mined_ruby_candidates_sample.xlsx', index=False) # %% def _interactive_testing( dics_arr, fpaths, html: str ): # %% # noinspection PyUnresolvedReferences runfile('talent-miner/extractor.py') # %% pprint( dics_arr[4] ) # %% fpath = [ f for f in fpaths if str(f).find('israellaguan') >= 0 ][0] # %% doc = BeautifulSoup( html, features='html.parser' ) # %% _extract_accomplishments(doc) # %% def _extract_accomplishments( doc: BeautifulSoup ) -> Dict[str, List[str]]: accomps = doc.find_all('section', {'class': 'pv-accomplishments-block'}) # accomp0 = accomps[2] ret = {} for accomp in accomps: accomp_header = accomp.find_all('h3', {'class': 'pv-accomplishments-block__title'})[0].text accomp_vals = [ li_elem.text for li_elem in accomp.find_all('li') ] ret[accomp_header] = accomp_vals return ret # %% def produce_summary_table( dics: List[Dict[str, Any]]) -> pd.DataFrame: # %% recs = [] for dic in dics: try: w_stats = dic['work_stats'] edu_stats = dic['education_stats'] skills = dic['skills'] rec = dict( name=dic['name'], total_experience_yrs=w_stats['total_experience_yrs'], n_work_positions=w_stats['n_work_positions'], pos_lt1_year=w_stats['poss_lt1.2_years'], pos_lt2_year=w_stats['poss_lt2_years'], about=dic['about'], about_eng_ratio=dic['about_stats']['about_eng_ratio'], current_position=dic['current_position'], has_worked_abroad=w_stats['has_worked_abroad'], max_degree=edu_stats['max_degree'], studied_abroad=edu_stats['has_studied_abroad'], ruby=(skills.get('Ruby', -1) + 1) + (skills.get('Ruby on Rails', -1) + 1), python=skills.get('Python (Programming Language)', -1) + 1, java=skills.get('Java', -1) + 1, javascript=skills.get('JavaScript', -1) + 1, cpp=skills.get('C++', -1) + 1, csharp=skills.get('C#', -1) + 1, skills=skills, profile_text_length=dic['profile_text_stats']['length'], profile_eng_ratio=dic['profile_text_stats']['eng_ratio'] * 10.0, languages=",".join ( dic.get('accomplishments', {}).get('idiomas', []) ), num_contacts=dic['num_contacts'], location=dic['location'], linkedin_url=dic['linkedin_url'], scraped_at=dic['scraped_at']) except Exception as exc: pprint( dic ) raise exc recs.append(rec) df = pd.DataFrame( recs ) # %% return df # %% def extract_one( html: str, fpath: Path ): """Extract data from one scraped html""" # %% doc = BeautifulSoup( html, features='html.parser') ret = { 'linkedin_handle': fpath.name.split('.')[0] } _parse_top_card( ret, doc ) # %% ret['about'] = _extract_about( doc ) # if len(ret['about']) < 100 and ret['about'].find('ver más') > 0: # print( f"\nVer más detected: \nabout:{ret['about']} fpath={fpath}" ) ret['about_stats'] = {'about_eng_ratio': _common_english_ratio(ret['about'])} # %% ret['work_experience'] = _parse_experiences( doc ) ret['work_stats'] = calc_work_stats( ret['work_experience']) # %% ret['skills'] = proc_skills_section( doc ) ret['education'] = _parse_education( doc ) ret['education_stats'] = _education_stats( ret['education']) ret['accomplishments'] = _extract_accomplishments(doc) ret['profile_text_stats'] = profile_text_stats( doc ) # %% return ret # %% def calc_work_stats( work_xps: List[Dict[str, Any]] ): """Calculate total_experience_yrs and other stats""" durations = [ rec['duration'] for rec in work_xps if 'duration' in rec ] total_years = sum( durations ) if durations else None avg_years = np.round( total_years / len(durations), 2) if durations else None poss_lt2_years = sum( 1 for dur in durations if dur < 2.0 ) poss_lt1_2_years = sum(1 for dur in durations if dur < 1.2 ) has_worked_abroad = any( rec for rec in work_xps if _is_location_abroad( rec.get('location_raw') )) return { "total_experience_yrs": total_years, 'avg_years': avg_years, 'n_work_positions': len(durations), 'poss_lt2_years': poss_lt2_years, 'poss_lt1.2_years': poss_lt1_2_years, 'has_worked_abroad': has_worked_abroad } # %% def _is_location_abroad( location: Optional[str] ): if location is None or location.strip() == '': return False else: ret = not re.search( 'Colombia|Medell.n|Bogot.|Barranquilla|Cali|Pereira' '|Caldas|Cucuta|Dosquebradas|Antioquia|Remot[eo]', location, re.IGNORECASE) if ret: print( f'abroad location: {location}') return ret def _is_abroad_school( school: Optional[str] ): ret = re.search(r"(University|College|\bof\b)", school) if ret: print( f'abroad school: {school}') return ret def profile_text_stats( doc: BeautifulSoup ): """some metrics on the whole profile text""" text = doc.find('main', {'class': 'core-rail'}).text.strip() words = text.split() eng_ratio = sum(1 for word in words if word in COMMON_ENGLISH) * 10/ (len(words) + 0.001) return { 'length': len( text ), 'eng_ratio': np.round( eng_ratio, 2)} # %% def _extract_about( doc ) -> Optional[str]: about_section = doc.find('section', {'class': 'pv-about-section'}) if about_section is None: return None parts = about_section.find_all("p") return (" ".join( part.text.replace('\n', ' ').strip() for part in parts ) .replace( '... ver más', '') ) # %% def _parse_top_card( ret: Dict[ str, Any], doc: BeautifulSoup ): top_card_els = doc.find_all( "ul", {"class": "pv-top-card--list"} ) name_elem = top_card_els[0].find_all("li")[0] name = name_elem.text.strip() current_position = doc.find_all("h2", {"class": "mt1"})[0].text.strip() location = top_card_els[1].find_all( "li" )[0].text.strip() # %% num_contacts = _extract_num_contacts( top_card_els[1] ) # %% top_card_xp = doc.find_all('a', {"class": "pv-top-card--experience-list-item"}) main_school = top_card_xp[0].text.strip() if top_card_xp else None data = dict(name=name, current_position=current_position, location=location, num_contacts=num_contacts, main_school=main_school) ret.update(data) # %% def _extract_num_contacts( elem: Tag ): num_contacts_text = elem.find_all("li")[1].text.strip() mch = re.match(r'(\d+) contactos', num_contacts_text) if mch: return int(mch.group(1)) mch2 = re.search(r'Más de 500 contactos', num_contacts_text) if mch2: return 501 def _parse_experiences(doc: BeautifulSoup) -> List[Dict]: # %% xp_section = doc.find( 'section', {'id': 'experience-section'} ) if xp_section is None: return [] # %% summaries = xp_section.find_all('div', {'class': 'pv-entity__summary-info'}) ret = [ proc_employment_summary(summary) for summary in summaries ] return ret # %% def proc_employment_summary(summary: Tag) -> Dict: """process one employment summary and extract info from it""" xp_record = dict() xp_record['position'] = summary.find('h3').text.strip() company = summary.find_all('p', {'class': 'pv-entity__secondary-title'})[0] xp_record['company'] = "; ".join( [ line.strip() for line in company.text.split('\n') if line.strip() != ''] ) # %% for xp_line in summary.find_all('h4'): fld_name, value = [span.text.strip() for span in xp_line.find_all('span') ] if fld_name == 'Fechas de empleo': xp_record['period_raw'] = value period = _extract_period( value ) xp_record['period'] = period # print( period ) xp_record['duration'] = np.round( (period[1] - period[0]).total_seconds() / SECS_IN_YEAR, 2) elif fld_name == 'Duración del empleo': xp_record['duration_raw'] = value elif fld_name == 'Ubicación': xp_record['location_raw'] = value # print( f'location: {value}') elif fld_name.startswith('LinkedIn me ayud'): continue else: print( "proc_employment_summary: ", fld_name, value ) # %% # pprint( xp_record ) # %% return xp_record # %% def _extract_period( period_raw: str ): mch2 = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . ' r'(?P<mes2>[a-z]+)\. de (?P<year2>[0-9]+)', period_raw) if mch2: # print('mch2', mch2, mch2.group("year1"), mch2.group("year2")) mes1, mes2 = _translate_mes(mch2.group("mes1")), _translate_mes(mch2.group("mes2")) return ( dt.date(int(mch2.group("year1")), int( mes1 ), 1), dt.date(int(mch2.group("year2")), int( mes2 ), 1) ) mch1 = re.match(r'(?P<mes>[a-z]+)\. de (?P<year>[0-9]+)( . actualidad)?', period_raw) if mch1: # print('mch1') mes = _translate_mes(mch1.group("mes")) return dt.date(int(mch1.group("year")), mes, 1), dt.date.today() mch2b = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . (?P<year2>[0-9]{4})', period_raw) if mch2b: mes1 = _translate_mes(mch2b.group("mes1")) return ( dt.date(int(mch2b.group("year1")), int(mes1), 1), dt.date(int(mch2b.group("year2")), 1, 1) ) mch3 = re.match(r'(?P<year1>[0-9]{4}) . (?P<year2>[0-9]{4})', period_raw) if mch3: return (dt.date(int(mch3.group("year1")), 1, 1), dt.date(int(mch3.group("year2")), 1, 1)) mch4 = re.match(r'(?P<year1>[0-9]{4})', period_raw) if mch4: return (dt.date(int(mch4.group("year1")), 1, 1), dt.date(int(mch4.group("year1")) + 1, 1, 1)) assert False, period_raw # %% def _interactive_test(): # %% period_raw = 'ene. de 2015 – actualidad' # %% period_raw = 'ene. de 2015 – may. de 2015' print( _extract_period( period_raw ) ) # %% period_raw = 'ene. de 2012 – may. de 2013' print(_extract_period(period_raw)) # %% def _translate_mes( mes: str) -> int: return {'ene': 1, 'feb': 2, 'mar': 3, 'abr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'ago': 8, 'sept': 9, 'oct': 10, 'nov': 11, 'dic': 12}[mes] def _common_english_ratio( a_text: str ) -> int: if a_text is None: return None words = a_text.split() cnt_english = sum( 1 for word in words if word in COMMON_ENGLISH ) return np.round( cnt_english / (len(words) + 0.001) * 10, 2) def _parse_education(doc: BeautifulSoup) -> List[Dict]: # %% edu_section = doc.find( 'section', {'id': 'education-section'} ) # %% if edu_section is None: return [] # %% summaries = edu_section.find_all('li', {'class': 'pv-education-entity'}) ret = [ proc_education_summary(summary) for summary in summaries ] # %% return ret # %% def _education_stats( edu_records: List[Dict[str, str]]): return {'has_studied_abroad': any(rec['is_abroad_school'] for rec in edu_records), 'max_degree': _max_degree(edu_records)} def proc_education_summary( summary: Tag ) -> Dict[str, str]: """Process one education summary and generate a record""" edu_record = dict() edu_record['school'] = summary.find('h3').text.strip() edu_record['is_abroad_school'] = _is_abroad_school( edu_record['school'] ) for parag in summary.find_all('p'): spans = [span.text.strip() for span in parag.find_all('span')] if len( spans ) == 2: fld_name, value = spans value = value.strip() elif len(spans) == 0: # print( 'education parag: ', parag ) edu_record['description'] = parag.text.strip() continue else: print( 'education spans: ', spans ) continue if fld_name == 'Nombre de la titulación': edu_record['degree_raw'] = value edu_record['degree'] = _classify_degree( value ) # print( 'degree: ', value, _classify_degree(value) ) elif fld_name == 'Disciplina académica': edu_record['field_raw'] = value elif fld_name == 'Nota': edu_record['grade_raw'] = value elif fld_name.startswith('Fechas de estudios'): edu_record['period_raw'] = value elif fld_name.startswith('Actividades y asociaciones'): edu_record['activities_raw'] = value else: print("proc_education_summary: ", fld_name, ' :: ', value) if edu_record.get('degree', 'Unknown') == 'Unknown': if re.search( 'Ingenier|Engineering', edu_record.get('field_raw', '') ): edu_record['degree'] = 'University' return edu_record # %% def _classify_degree( degree: str ) -> str: if re.search('Ingenier|Engineer', degree): return 'University' elif re.search('^Tecn.log', degree): return 'Tecnología' elif re.search('^Mae?ste?r', degree): return 'Master''s' elif re.search('^Dimplom', degree): return 'Diploma' elif re.search('^(Esp\.|Especializ)', degree): return 'Specialization' elif re.search('^Phd', degree, re.IGNORECASE): return 'PhD' else: return 'Unknown' DEGREE_LEVELS = {'Tecnología': 1, 'University': 2, 'Diploma': 3, 'Specialization': 4, 'Master''s': 5, 'PhD': 5, 'Unknown': -1} def _max_degree(edu_records: List[Dict[str, str]]) -> Optional[str] : levels = DEGREE_LEVELS if len(edu_records) > 0: return max( [rec.get('degree', 'Unknown') for rec in edu_records ], key=lambda x: levels[x]) else: return None def proc_skills_section( doc: BeautifulSoup ): # %% skills_section = doc.find('section', {'class': 'pv-skill-categories-section'}) if skills_section is None: return {} # %% divs = skills_section.find_all('div', {'class': 'pv-skill-category-entity__skill-wrapper'}) # %% ret = {} for div in divs: texts = [ span.text.strip() for span in div.find_all('span') if span.text.strip() != '' ] if len(texts) >= 1: key = texts[0] if len(texts) >= 3: mch = re.match(r'(\d+)', texts[2]) if mch: ret[key] = int( mch.group(1)) else: print( f"skills {len(texts)} spans: {texts}") ret[key] = None elif len(texts) == 1: ret[key] = 0 else: print(f"skills {len(texts)} spans: {texts}") # %% return ret
# pprint(dic['work_stats'])
random_line_split
extractor.py
from typing import Dict, Any, List, Optional import yaml import json from json import JSONEncoder import os import re import datetime as dt import pandas as pd from pprint import pprint from pathlib import Path from bs4 import BeautifulSoup from bs4.element import Tag import numpy as np HOME = Path( os.getenv('HOME') ) # TODO: # Logros # Destacado: cristian-david-montoya-saldarriaga-09638514a # Herramientas y tecnologías # TODO: features to extract # Whether has resume available # extract english level # https://www.linkedin.com/in/luis-mario-urrea-murillo/ MY_PATH = HOME / '_data/talent' SECS_IN_YEAR = 365.25 * 24 * 3600 COMMON_ENGLISH = {'the', 'with', 'on', 'and', 'I', 'am', 'is', 'my'} COMMON_SPANISH = {'y', 'el', 'la', 'de', 'los', 'las'} class _Config: raw_profiles_path = MY_PATH / 'linkedin_raw_profiles' profiles_yamls_path = MY_PATH / 'linkedin_yaml_profiles' CFG = _Config class DateTimeEncoder(JSONEncoder): """Override the default method""" def default(self, obj): """default formating as string""" if isinstance(obj, (dt.date, dt.datetime)): return obj.isoformat() yaml.SafeDumper.yaml_representers[None] = lambda self, data: \ yaml.representer.SafeRepresenter.represent_str( self, str(data) ) # %% def main(): """Read scraped profiles parse them and write to json and yamls""" # %% CFG.profiles_yamls_path.mkdir(parents=True, exist_ok=True) fpaths = list( _Config.raw_profiles_path.glob('*.html') ) print( f'{len(fpaths)} htmls found' ) # %% fpath = CFG.raw_profiles_path / 'luis-mario-urrea-murillo.html' # %% fpath = CFG.raw_profiles_path / 'cristian-david-montoya-saldarriaga-09638514a.html' # %% fpaths = [ CFG.raw_profiles_path / 'ricardo-alarcon-44079b105.html' ] # %% fpaths = [ Path('/home/teo/_data/talent/linkedin_raw_profiles/israellaguan.html')] # %% dics = {} # %% for i, fpath in enumerate(fpaths): if fpath in dics: continue with fpath.open('rt') as f_in: html = f_in.read() print( f'\n***{i+1}/{len(fpaths)} {fpath.name}:') dic = extract_one( html, fpath ) dic['linkedin_url'] = f"https://www.linkedin.com/in/{fpath.name.split('.')[0]}" dic['scraped_at'] = dt.datetime.fromtimestamp( fpath.stat().st_ctime ) # pprint(dic['work_stats']) dics[fpath] = dic dics_arr = list(dics.values()) # %% del dics # %% with (CFG.profiles_yamls_path / 'all_profiles.json').open('wt') as f_out: json.dump( dics_arr, f_out, cls=DateTimeEncoder, indent=4 ) # %% with (CFG.profiles_yamls_path / 'all_profiles.yaml').open('wt') as f_out: yaml.safe_dump( dics_arr, f_out ) # %% df = produce_summary_table( dics_arr ) df.to_excel( CFG.raw_profiles_path.parent / 'mined_ruby_candidates_sample.xlsx', index=False) # %% def _interactive_testing( dics_arr, fpaths, html: str ): # %% # noinspection PyUnresolvedReferences runfile('talent-miner/extractor.py') # %% pprint( dics_arr[4] ) # %% fpath = [ f for f in fpaths if str(f).find('israellaguan') >= 0 ][0] # %% doc = BeautifulSoup( html, features='html.parser' ) # %% _extract_accomplishments(doc) # %% def _extract_accomplishments( doc: BeautifulSoup ) -> Dict[str, List[str]]: accomps = doc.find_all('section', {'class': 'pv-accomplishments-block'}) # accomp0 = accomps[2] ret = {} for accomp in accomps: accomp_header = accomp.find_all('h3', {'class': 'pv-accomplishments-block__title'})[0].text accomp_vals = [ li_elem.text for li_elem in accomp.find_all('li') ] ret[accomp_header] = accomp_vals return ret # %% def produce_summary_table( dics: List[Dict[str, Any]]) -> pd.DataFrame: # %% recs = [] for dic in dics: try: w_stats = dic['work_stats'] edu_stats = dic['education_stats'] skills = dic['skills'] rec = dict( name=dic['name'], total_experience_yrs=w_stats['total_experience_yrs'], n_work_positions=w_stats['n_work_positions'], pos_lt1_year=w_stats['poss_lt1.2_years'], pos_lt2_year=w_stats['poss_lt2_years'], about=dic['about'], about_eng_ratio=dic['about_stats']['about_eng_ratio'], current_position=dic['current_position'], has_worked_abroad=w_stats['has_worked_abroad'], max_degree=edu_stats['max_degree'], studied_abroad=edu_stats['has_studied_abroad'], ruby=(skills.get('Ruby', -1) + 1) + (skills.get('Ruby on Rails', -1) + 1), python=skills.get('Python (Programming Language)', -1) + 1, java=skills.get('Java', -1) + 1, javascript=skills.get('JavaScript', -1) + 1, cpp=skills.get('C++', -1) + 1, csharp=skills.get('C#', -1) + 1, skills=skills, profile_text_length=dic['profile_text_stats']['length'], profile_eng_ratio=dic['profile_text_stats']['eng_ratio'] * 10.0, languages=",".join ( dic.get('accomplishments', {}).get('idiomas', []) ), num_contacts=dic['num_contacts'], location=dic['location'], linkedin_url=dic['linkedin_url'], scraped_at=dic['scraped_at']) except Exception as exc: pprint( dic ) raise exc recs.append(rec) df = pd.DataFrame( recs ) # %% return df # %% def extract_one( html: str, fpath: Path ): """Extract data from one scraped html""" # %% doc = BeautifulSoup( html, features='html.parser') ret = { 'linkedin_handle': fpath.name.split('.')[0] } _parse_top_card( ret, doc ) # %% ret['about'] = _extract_about( doc ) # if len(ret['about']) < 100 and ret['about'].find('ver más') > 0: # print( f"\nVer más detected: \nabout:{ret['about']} fpath={fpath}" ) ret['about_stats'] = {'about_eng_ratio': _common_english_ratio(ret['about'])} # %% ret['work_experience'] = _parse_experiences( doc ) ret['work_stats'] = calc_work_stats( ret['work_experience']) # %% ret['skills'] = proc_skills_section( doc ) ret['education'] = _parse_education( doc ) ret['education_stats'] = _education_stats( ret['education']) ret['accomplishments'] = _extract_accomplishments(doc) ret['profile_text_stats'] = profile_text_stats( doc ) # %% return ret # %% def calc_work_stats( work_xps: List[Dict[str, Any]] ): """Calculate total_experience_yrs and other stats""" durations = [ rec['duration'] for rec in work_xps if 'duration' in rec ] total_years = sum( durations ) if durations else None avg_years = np.round( total_years / len(durations), 2) if durations else None poss_lt2_years = sum( 1 for dur in durations if dur < 2.0 ) poss_lt1_2_years = sum(1 for dur in durations if dur < 1.2 ) has_worked_abroad = any( rec for rec in work_xps if _is_location_abroad( rec.get('location_raw') )) return { "total_experience_yrs": total_years, 'avg_years': avg_years, 'n_work_positions': len(durations), 'poss_lt2_years': poss_lt2_years, 'poss_lt1.2_years': poss_lt1_2_years, 'has_worked_abroad': has_worked_abroad } # %% def _is_location_abroad( location: Optional[str] ): if location is None or location.strip() == '': return False else: ret = not re.search( 'Colombia|Medell.n|Bogot.|Barranquilla|Cali|Pereira' '|Caldas|Cucuta|Dosquebradas|Antioquia|Remot[eo]', location, re.IGNORECASE) if ret: print( f'abroad location: {location}') return ret def _is_abroad_school( school: Optional[str] ): ret = re.search(r"(University|College|\bof\b)", school) if ret: print( f'abroad school: {school}') return ret def profile_text_stats( doc: BeautifulSoup ): """some metrics on the whole profile text""" text = doc.find('main', {'class': 'core-rail'}).text.strip() words = text.split() eng_ratio = sum(1 for word in words if word in COMMON_ENGLISH) * 10/ (len(words) + 0.001) return { 'length': len( text ), 'eng_ratio': np.round( eng_ratio, 2)} # %% def _extract_about( doc ) -> Optional[str]: about_section = doc.find('section', {'class': 'pv-about-section'}) if about_section is None: return None parts = about_section.find_all("p") return (" ".join( part.text.replace('\n', ' ').strip() for part in parts ) .replace( '... ver más', '') ) # %% def _parse_top_card( ret: Dict[ str, Any], doc: BeautifulSoup ): top_card_els = doc.find_all( "ul", {"class": "pv-top-card--list"} ) name_elem = top_card_els[0].find_all("li")[0] name = name_elem.text.strip() current_position = doc.find_all("h2", {"class": "mt1"})[0].text.strip() location = top_card_els[1].find_all( "li" )[0].text.strip() # %% num_contacts = _extract_num_contacts( top_card_els[1] ) # %% top_card_xp = doc.find_all('a', {"class": "pv-top-card--experience-list-item"}) main_school = top_card_xp[0].text.strip() if top_card_xp else None data = dict(name=name, current_position=current_position, location=location, num_contacts=num_contacts, main_school=main_school) ret.update(data) # %% def _extract_num_contacts( elem: Tag ): num_contacts_text = elem.find_all("li")[1].text.strip() mch = re.match(r'(\d+) contactos', num_contacts_text) if mch: return int(mch.group(1)) mch2 = re.search(r'Más de 500 contactos', num_contacts_text) if mch2: return 501 def _parse_experiences(doc: BeautifulSoup) -> List[Dict]: # %% xp_section = doc.find( 'section', {'id': 'experience-section'} ) if xp_section is None: return [] # %% summaries = xp_section.find_all('div', {'class': 'pv-entity__summary-info'}) ret = [ proc_employment_summary(summary) for summary in summaries ] return ret # %% def proc_employment_summary(summary: Tag) -> Dict: """process one employment summary and extract info from it""" xp_record = dict() xp_record['position'] = summary.find('h3').text.strip() company = summary.find_all('p', {'class': 'pv-entity__secondary-title'})[0] xp_record['company'] = "; ".join( [ line.strip() for line in company.text.split('\n') if line.strip() != ''] ) # %% for xp_line in summary.find_all('h4'): fld_name, value = [span.text.strip() for span in xp_line.find_all('span') ] if fld_name == 'Fechas de empleo': xp_record['period_raw'] = value period = _extract_period( value ) xp_record['period'] = period # print( period ) xp_record['duration'] = np.round( (period[1] - period[0]).total_seconds() / SECS_IN_YEAR, 2) elif fld_name == 'Duración del empleo': xp_record['duration_raw'] = value elif fld_name == 'Ubicación': xp_record['location_raw'] = value # print( f'location: {value}') elif fld_name.startswith('LinkedIn me ayud'): continue else: print( "proc_employment_summary: ", fld_name, value ) # %% # pprint( xp_record ) # %% return xp_record # %% def _extract_period( period_raw: str ): mch2 = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . ' r'(?P<mes2>[a-z]+)\. de (?P<year2>[0-9]+)', period_raw) if mch2: # print('mch2', mch2, mch2.group("year1"), mch2.group("year2")) mes1, mes2 = _translate_mes(mch2.group("mes1")), _translate_mes(mch2.group("mes2")) return ( dt.date(int(mch2.group("year1")), int( mes1 ), 1), dt.date(int(mch2.group("year2")), int( mes2 ), 1) ) mch1 = re.match(r'(?P<mes>[a-z]+)\. de (?P<year>[0-9]+)( . actualidad)?', period_raw) if mch1: # print('mch1') mes = _translate_mes(mch1.group("mes")) return dt.date(int(mch1.group("year")), mes, 1), dt.date.today() mch2b = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . (?P<year2>[0-9]{4})', period_raw) if mch2b: mes1 = _translate_mes(mch2b.group("mes1")) return ( dt.date(int(mch2b.group("year1")), int(mes1), 1), dt.date(int(mch2b.group("year2")), 1, 1) ) mch3 = re.match(r'(?P<year1>[0-9]{4}) . (?P<year2>[0-9]{4})', period_raw) if mch3: return (dt.date(int(mch3.group("year1")), 1, 1), dt.date(int(mch3.group("year2")), 1, 1)) mch4 = re.match(r'(?P<year1>[0-9]{4})', period_raw) if mch4: return (dt.date(int(mch4.group("year1")), 1, 1), dt.date(int(mch4.group("year1")) + 1, 1, 1)) assert False, period_raw # %% def _interactive_test(): # %% period_raw = 'ene. de 2015 – actualidad' # %% period_raw = 'ene. de 2015 – may. de 2015' print( _extract_period( period_raw ) ) # %% period_raw = 'ene. de 2012 – may. de 2013' print(_extract_period(period_raw)) # %% def _translate_mes( mes: str) -> int: return {'ene': 1, 'feb': 2, 'mar': 3, 'abr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'ago': 8, 'sept': 9, 'oct': 10, 'nov': 11, 'dic': 12}[mes] def _common_english_ratio( a_text: str ) -> int: if a_text is None: return None words = a_text.split() cnt_english = sum( 1 for word in words if word in COMMON_ENGLISH ) return np.round( cnt_english / (len(words) + 0.001) * 10, 2) def _parse_education(doc: BeautifulSoup) -> List[Dict]: # %% edu_section = doc.find( 'section', {'id': 'education-section'} ) # %% if edu_section is None: return [] # %% summaries = edu_section.find_all('li', {'class': 'pv-education-entity'}) ret = [ proc_education_summary(summary) for summary in summaries ] # %% return ret # %% def _education_stats( edu_records: List[Dict[str, str]]): return {'has_studied_abroad': any(rec['is_abroad_school'] for rec in edu_records), 'max_degree': _max_degree(edu_records)} def proc_education_summary( summary: Tag ) -> Dict[str, str]: """Process one education summary and generate a record""" edu_record = dict() edu_record['school'] = summary.find('h3').text.strip() edu_record['is_abroad_school'] = _is_abroad_school( edu_record['school'] ) for parag in summary.find_all('p'): spans = [span.text.strip() for span in parag.find_all('span')] if len( spans ) == 2: fld_name, value = spans value = value.strip() elif len(spans) == 0: # print( 'education parag: ', parag ) edu_record['description'] = parag.text.strip() continue else: print( 'education spans: ', spans ) continue if fld_name == 'Nombre de la titulación': edu_record['degree_raw'] = value edu_record['degree'] = _classify_degree( value ) # print( 'degree: ', value, _classify_degree(value) ) elif fld_name == 'Disciplina académica': edu_record['field_raw'] = value elif fld_name == 'Nota': edu_record['grade_raw'] = value elif fld_name.startswith('Fechas de estudios'): edu_record['period_raw'] = value elif fld_name.startswith('Actividades y asociaciones'): edu_record['activities_raw'] = value else: print("proc_education_summary: ", fld_name, ' :: ', value) if edu_record.get('degree', 'Unknown') == 'Unknown': if re.search( 'Ingenier|Engineering', edu_record.get('field_raw', '') ): edu_record['degree'] = 'University' return edu_record # %% def _classify_degree( degree: str ) -> str: if re.search('Ingenier|Engineer', degree): return 'University' elif re.search('^Tecn.log', degree): return 'Tecnología' elif re.search('^Mae?ste?r', degree): return 'Master''s' elif re.search('^Dimplom', degree): return 'Diploma' elif re.search('^(Esp\.|Especializ)', degree): return 'Specialization' elif re.search('^Phd', degree, re.IGNORECASE): return 'PhD' else: return 'Unknown' DEGREE_LEVELS = {'Tecnología': 1, 'University': 2, 'Diploma': 3, 'Specialization': 4, 'Master''s': 5, 'PhD': 5, 'Unknown': -1} def _max_degree(edu_r
t[Dict[str, str]]) -> Optional[str] : levels = DEGREE_LEVELS if len(edu_records) > 0: return max( [rec.get('degree', 'Unknown') for rec in edu_records ], key=lambda x: levels[x]) else: return None def proc_skills_section( doc: BeautifulSoup ): # %% skills_section = doc.find('section', {'class': 'pv-skill-categories-section'}) if skills_section is None: return {} # %% divs = skills_section.find_all('div', {'class': 'pv-skill-category-entity__skill-wrapper'}) # %% ret = {} for div in divs: texts = [ span.text.strip() for span in div.find_all('span') if span.text.strip() != '' ] if len(texts) >= 1: key = texts[0] if len(texts) >= 3: mch = re.match(r'(\d+)', texts[2]) if mch: ret[key] = int( mch.group(1)) else: print( f"skills {len(texts)} spans: {texts}") ret[key] = None elif len(texts) == 1: ret[key] = 0 else: print(f"skills {len(texts)} spans: {texts}") # %% return ret
ecords: Lis
identifier_name
extractor.py
from typing import Dict, Any, List, Optional import yaml import json from json import JSONEncoder import os import re import datetime as dt import pandas as pd from pprint import pprint from pathlib import Path from bs4 import BeautifulSoup from bs4.element import Tag import numpy as np HOME = Path( os.getenv('HOME') ) # TODO: # Logros # Destacado: cristian-david-montoya-saldarriaga-09638514a # Herramientas y tecnologías # TODO: features to extract # Whether has resume available # extract english level # https://www.linkedin.com/in/luis-mario-urrea-murillo/ MY_PATH = HOME / '_data/talent' SECS_IN_YEAR = 365.25 * 24 * 3600 COMMON_ENGLISH = {'the', 'with', 'on', 'and', 'I', 'am', 'is', 'my'} COMMON_SPANISH = {'y', 'el', 'la', 'de', 'los', 'las'} class _Config: raw_profiles_path = MY_PATH / 'linkedin_raw_profiles' profiles_yamls_path = MY_PATH / 'linkedin_yaml_profiles' CFG = _Config class DateTimeEncoder(JSONEncoder): """Override the default method""" def default(self, obj): """default formating as string""" if isinstance(obj, (dt.date, dt.datetime)): return obj.isoformat() yaml.SafeDumper.yaml_representers[None] = lambda self, data: \ yaml.representer.SafeRepresenter.represent_str( self, str(data) ) # %% def main(): """Read scraped profiles parse them and write to json and yamls""" # %% CFG.profiles_yamls_path.mkdir(parents=True, exist_ok=True) fpaths = list( _Config.raw_profiles_path.glob('*.html') ) print( f'{len(fpaths)} htmls found' ) # %% fpath = CFG.raw_profiles_path / 'luis-mario-urrea-murillo.html' # %% fpath = CFG.raw_profiles_path / 'cristian-david-montoya-saldarriaga-09638514a.html' # %% fpaths = [ CFG.raw_profiles_path / 'ricardo-alarcon-44079b105.html' ] # %% fpaths = [ Path('/home/teo/_data/talent/linkedin_raw_profiles/israellaguan.html')] # %% dics = {} # %% for i, fpath in enumerate(fpaths): if fpath in dics: continue with fpath.open('rt') as f_in: html = f_in.read() print( f'\n***{i+1}/{len(fpaths)} {fpath.name}:') dic = extract_one( html, fpath ) dic['linkedin_url'] = f"https://www.linkedin.com/in/{fpath.name.split('.')[0]}" dic['scraped_at'] = dt.datetime.fromtimestamp( fpath.stat().st_ctime ) # pprint(dic['work_stats']) dics[fpath] = dic dics_arr = list(dics.values()) # %% del dics # %% with (CFG.profiles_yamls_path / 'all_profiles.json').open('wt') as f_out: json.dump( dics_arr, f_out, cls=DateTimeEncoder, indent=4 ) # %% with (CFG.profiles_yamls_path / 'all_profiles.yaml').open('wt') as f_out: yaml.safe_dump( dics_arr, f_out ) # %% df = produce_summary_table( dics_arr ) df.to_excel( CFG.raw_profiles_path.parent / 'mined_ruby_candidates_sample.xlsx', index=False) # %% def _interactive_testing( dics_arr, fpaths, html: str ): # %% # noinspection PyUnresolvedReferences runfile('talent-miner/extractor.py') # %% pprint( dics_arr[4] ) # %% fpath = [ f for f in fpaths if str(f).find('israellaguan') >= 0 ][0] # %% doc = BeautifulSoup( html, features='html.parser' ) # %% _extract_accomplishments(doc) # %% def _extract_accomplishments( doc: BeautifulSoup ) -> Dict[str, List[str]]: accomps = doc.find_all('section', {'class': 'pv-accomplishments-block'}) # accomp0 = accomps[2] ret = {} for accomp in accomps: accomp_header = accomp.find_all('h3', {'class': 'pv-accomplishments-block__title'})[0].text accomp_vals = [ li_elem.text for li_elem in accomp.find_all('li') ] ret[accomp_header] = accomp_vals return ret # %% def produce_summary_table( dics: List[Dict[str, Any]]) -> pd.DataFrame: # %% recs = [] for dic in dics: try: w_stats = dic['work_stats'] edu_stats = dic['education_stats'] skills = dic['skills'] rec = dict( name=dic['name'], total_experience_yrs=w_stats['total_experience_yrs'], n_work_positions=w_stats['n_work_positions'], pos_lt1_year=w_stats['poss_lt1.2_years'], pos_lt2_year=w_stats['poss_lt2_years'], about=dic['about'], about_eng_ratio=dic['about_stats']['about_eng_ratio'], current_position=dic['current_position'], has_worked_abroad=w_stats['has_worked_abroad'], max_degree=edu_stats['max_degree'], studied_abroad=edu_stats['has_studied_abroad'], ruby=(skills.get('Ruby', -1) + 1) + (skills.get('Ruby on Rails', -1) + 1), python=skills.get('Python (Programming Language)', -1) + 1, java=skills.get('Java', -1) + 1, javascript=skills.get('JavaScript', -1) + 1, cpp=skills.get('C++', -1) + 1, csharp=skills.get('C#', -1) + 1, skills=skills, profile_text_length=dic['profile_text_stats']['length'], profile_eng_ratio=dic['profile_text_stats']['eng_ratio'] * 10.0, languages=",".join ( dic.get('accomplishments', {}).get('idiomas', []) ), num_contacts=dic['num_contacts'], location=dic['location'], linkedin_url=dic['linkedin_url'], scraped_at=dic['scraped_at']) except Exception as exc: pprint( dic ) raise exc recs.append(rec) df = pd.DataFrame( recs ) # %% return df # %% def extract_one( html: str, fpath: Path ): """Extract data from one scraped html""" # %% doc = BeautifulSoup( html, features='html.parser') ret = { 'linkedin_handle': fpath.name.split('.')[0] } _parse_top_card( ret, doc ) # %% ret['about'] = _extract_about( doc ) # if len(ret['about']) < 100 and ret['about'].find('ver más') > 0: # print( f"\nVer más detected: \nabout:{ret['about']} fpath={fpath}" ) ret['about_stats'] = {'about_eng_ratio': _common_english_ratio(ret['about'])} # %% ret['work_experience'] = _parse_experiences( doc ) ret['work_stats'] = calc_work_stats( ret['work_experience']) # %% ret['skills'] = proc_skills_section( doc ) ret['education'] = _parse_education( doc ) ret['education_stats'] = _education_stats( ret['education']) ret['accomplishments'] = _extract_accomplishments(doc) ret['profile_text_stats'] = profile_text_stats( doc ) # %% return ret # %% def calc_work_stats( work_xps: List[Dict[str, Any]] ): """Calculate total_experience_yrs and other stats""" durations = [ rec['duration'] for rec in work_xps if 'duration' in rec ] total_years = sum( durations ) if durations else None avg_years = np.round( total_years / len(durations), 2) if durations else None poss_lt2_years = sum( 1 for dur in durations if dur < 2.0 ) poss_lt1_2_years = sum(1 for dur in durations if dur < 1.2 ) has_worked_abroad = any( rec for rec in work_xps if _is_location_abroad( rec.get('location_raw') )) return { "total_experience_yrs": total_years, 'avg_years': avg_years, 'n_work_positions': len(durations), 'poss_lt2_years': poss_lt2_years, 'poss_lt1.2_years': poss_lt1_2_years, 'has_worked_abroad': has_worked_abroad } # %% def _is_location_abroad( location: Optional[str] ): if location is None or location.strip() == '': return False else: ret = not re.search( 'Colombia|Medell.n|Bogot.|Barranquilla|Cali|Pereira' '|Caldas|Cucuta|Dosquebradas|Antioquia|Remot[eo]', location, re.IGNORECASE) if ret: print( f'abroad location: {location}') return ret def _is_abroad_school( school: Optional[str] ): ret = re.search(r"(University|College|\bof\b)", school) if ret: print( f'abroad school: {school}') return ret def profile_text_stats( doc: BeautifulSoup ): """some metrics on the whole profile text""" text = doc.find('main', {'class': 'core-rail'}).text.strip() words = text.split() eng_ratio = sum(1 for word in words if word in COMMON_ENGLISH) * 10/ (len(words) + 0.001) return { 'length': len( text ), 'eng_ratio': np.round( eng_ratio, 2)} # %% def _extract_about( doc ) -> Optional[str]: about_section = doc.find('section', {'class': 'pv-about-section'}) if about_section is None: return None parts = about_section.find_all("p") return (" ".join( part.text.replace('\n', ' ').strip() for part in parts ) .replace( '... ver más', '') ) # %% def _parse_top_card( ret: Dict[ str, Any], doc: BeautifulSoup ): top_card_els = doc.find_all( "ul", {"class": "pv-top-card--list"} ) name_elem = top_card_els[0].find_all("li")[0] name = name_elem.text.strip() current_position = doc.find_all("h2", {"class": "mt1"})[0].text.strip() location = top_card_els[1].find_all( "li" )[0].text.strip() # %% num_contacts = _extract_num_contacts( top_card_els[1] ) # %% top_card_xp = doc.find_all('a', {"class": "pv-top-card--experience-list-item"}) main_school = top_card_xp[0].text.strip() if top_card_xp else None data = dict(name=name, current_position=current_position, location=location, num_contacts=num_contacts, main_school=main_school) ret.update(data) # %% def _extract_num_contacts( elem: Tag ): num_contacts_text = elem.find_all("li")[1].text.strip() mch = re.match(r'(\d+) contactos', num_contacts_text) if mch: return int(mch.group(1)) mch2 = re.search(r'Más de 500 contactos', num_contacts_text) if mch2: return 501 def _parse_experiences(doc: BeautifulSoup) -> List[Dict]: # %% xp_section = doc.find( 'section', {'id': 'experience-section'} ) if xp_section is None: return [] # %% summaries = xp_section.find_all('div', {'class': 'pv-entity__summary-info'}) ret = [ proc_employment_summary(summary) for summary in summaries ] return ret # %% def proc_employment_summary(summary: Tag) -> Dict: """process one employment summary and extract info from it""" xp_record = dict() xp_record['position'] = summary.find('h3').text.strip() company = summary.find_all('p', {'class': 'pv-entity__secondary-title'})[0] xp_record['company'] = "; ".join( [ line.strip() for line in company.text.split('\n') if line.strip() != ''] ) # %% for xp_line in summary.find_all('h4'): fld_name, value = [span.text.strip() for span in xp_line.find_all('span') ] if fld_name == 'Fechas de empleo': xp_record['period_raw'] = value period = _extract_period( value ) xp_record['period'] = period # print( period ) xp_record['duration'] = np.round( (period[1] - period[0]).total_seconds() / SECS_IN_YEAR, 2) elif fld_name == 'Duración del empleo': xp_record['duration_raw'] = value elif fld_name == 'Ubicación': xp_record['location_raw'] = value # print( f'location: {value}') elif fld_name.startswith('LinkedIn me ayud'): continue else: print( "proc_employment_summary: ", fld_name, value ) # %% # pprint( xp_record ) # %% return xp_record # %% def _extract_period( period_raw: str ): mch2 = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . ' r'(?P<mes2>[a-z]+)\. de (?P<year2>[0-9]+)', period_raw) if mch2: # print('mch2', mch2, mch2.group("year1"), mch2.group("year2")) mes1, mes2 = _translate_mes(mch2.group("mes1")), _translate_mes(mch2.group("mes2")) return ( dt.date(int(mch2.group("year1")), int( mes1 ), 1), dt.date(int(mch2.group("year2")), int( mes2 ), 1) ) mch1 = re.match(r'(?P<mes>[a-z]+)\. de (?P<year>[0-9]+)( . actualidad)?', period_raw) if mch1: # print('mch1') mes = _translate_mes(mch1.group("mes")) return dt.date(int(mch1.group("year")), mes, 1), dt.date.today() mch2b = re.match(r'(?P<mes1>[a-z]+)\. de (?P<year1>[0-9]+) . (?P<year2>[0-9]{4})', period_raw) if mch2b: mes1 = _translate_mes(mch2b.group("mes1")) return ( dt.date(int(mch2b.group("year1")), int(mes1), 1), dt.date(int(mch2b.group("year2")), 1, 1) ) mch3 = re.match(r'(?P<year1>[0-9]{4}) . (?P<year2>[0-9]{4})', period_raw) if mch3: return (dt.date(int(mch3.group("year1")), 1, 1), dt.date(int(mch3.group("year2")), 1, 1)) mch4 = re.match(r'(?P<year1>[0-9]{4})', period_raw) if mch4: return (dt.date(int(mch4.group("year1")), 1, 1), dt.date(int(mch4.group("year1")) + 1, 1, 1)) assert False, period_raw # %% def _interactive_test(): # %% period_raw = 'ene. de 2015 – actualidad' # %% period_raw = 'ene. de 2015 – may. de 2015' print( _extract_period( period_raw ) ) # %% period_raw = 'ene. de 2012 – may. de 2013' print(_extract_period(period_raw)) # %% def _translate_mes( mes: str) -> int: return {'ene': 1, 'feb': 2, 'mar': 3, 'abr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'ago': 8, 'sept': 9, 'oct': 10, 'nov': 11, 'dic': 12}[mes] def _common_english_ratio( a_text: str ) -> int: if a_text is None: return None words = a_text.split() cnt_english = sum( 1 for word in words if word in COMMON_ENGLISH ) return np.round( cnt_english / (len(words) + 0.001) * 10, 2) def _parse_education(doc: BeautifulSoup) -> List[Dict]: # %% edu_section = doc.find( 'section', {'id': 'education-section'} ) # %% if edu_section is None: return [] # %% summaries = edu_section.find_all('li', {'class': 'pv-education-entity'}) ret = [ proc_education_summary(summary) for summary in summaries ] # %% return ret # %% def _education_stats( edu_records: List[Dict[str, str]]): return {'has_studied_abroad': any(rec['is_abroad_school'] for rec in edu_records), 'max_degree': _max_degree(edu_records)} def proc_education_summary( summary: Tag ) -> Dict[str, str]: """Process one education summary and generate a record""" edu_record = dict() edu_record['school'] = summary.find('h3').text.strip() edu_record['is_abroad_school'] = _is_abroad_school( edu_record['school'] ) for parag in summary.find_all('p'): spans = [span.text.strip() for span in parag.find_all('span')] if len( spans ) == 2: fld_name, value = spans value = value.strip() elif len(spans) == 0: # print( 'education parag: ', parag ) edu_record['description'] = parag.text.strip() continue else: print( 'education spans: ', spans ) continue if fld_name == 'Nombre de la titulación': edu_record['degree_raw'] = value edu_record['degree'] = _classify_degree( value ) # print( 'degree: ', value, _classify_degree(value) ) elif fld_name == 'Disciplina académica': edu_record['field_raw'] = value elif fld_name == 'Nota': edu_record['grade_raw'] = value elif fld_name.startswith('Fechas de estudios'): edu_record['period_raw'] = value elif fld_name.startswith('Actividades y asociaciones'): edu_record['activities_raw'] = value else: print("proc_education_summary: ", fld_name, ' :: ', value) if edu_record.get('degree', 'Unknown') == 'Unknown': if re.search( 'Ingenier|Engineering', edu_record.get('field_raw', '') ): edu_record['degree'] = 'University' return edu_record # %% def _classify_degree( degree: str ) -> str: if re.search('Ingenier|Engineer', degree): return 'University' elif re.search('^Tecn.log', degree): return 'Tecnología' elif re.search('^Mae?ste?r', degree): return 'Master''s' elif re.search('^Dimplom', degree): return 'Diploma' elif re.search('^(Esp\.|Especializ)', degree): return 'Specialization' elif re.search('^Phd', degree, re.IGNORECASE): return 'PhD' else: return 'Unknown' DEGREE_LEVELS = {'Tecnología': 1, 'University': 2, 'Diploma': 3, 'Specialization': 4, 'Master''s': 5, 'PhD': 5, 'Unknown': -1} def _max_degree(edu_records: List[Dict[str, str]]) -> Optional[str] : levels = DEGREE_LEVELS if len(edu_records) > 0: return max( [rec.get('degree', 'Unknown') for rec in edu_records ], key=lambda x: levels[x]) else: return None def proc_skills_section( doc: BeautifulSoup ): # %% skills_section = doc.find('section', {'class': 'pv-skill-categories-section'}) if skills_section is None: return {} # %% divs = skills_section.find_all('div', {'class': 'pv-skill-category-entity__skill-wrapper'}) # %% ret = {} for div in divs: texts = [ span.text.strip() for span in div.find_all('span') if span.text.strip() != '' ] if len(texts) >= 1: key = texts[0]
turn ret
if len(texts) >= 3: mch = re.match(r'(\d+)', texts[2]) if mch: ret[key] = int( mch.group(1)) else: print( f"skills {len(texts)} spans: {texts}") ret[key] = None elif len(texts) == 1: ret[key] = 0 else: print(f"skills {len(texts)} spans: {texts}") # %% re
conditional_block
index.rs
/*! Indexing within memory elements. This module provides types which guarantee certain properties about selecting bits within a memory element. These types enable their use sites to explicitly declare the indexing behavior they require, and move safety checks from runtime to compile time. # Bit Indexing The [`BitIdx`] type represents the semantic index of a bit within a memory element. It does not perform bit positioning, and cannot be used to create a shift instruction or mask value. It is transformed into a value which can do these things – [`BitPos`] – through the [`BitOrder::at`] function. # Region End Marker `bitvec` uses “half-open” ranges, described by a starting point and a count of members that are live. This means that the “end” of a range is not the last member that is *in*cluded in the range, but rather the first member that is *ex*cluded from it. This requires the [`BitTail` end marker to include in its range the width of the element type (`8` for `u8`, etc), in order to mark that a region includes the very last bit in the element (index `7` for `u8`, etc`). The starting number for a dead region cannot be used to perform bit selection, but is used to provide range computation, so it is kept distinct from the indexing types. # Bit Positioning The [`BitPos`] type corresponds directly to a bit position in a memory element. Its value can be used to create shift instructions which select part of memory. It is only ever created by the `BitOrder::at` function. # Bit Selection The [`BitSel`] type is a one-hot mask encoding for a memory element. Unlike the previous types, which are range-limited integers, this type is a wrapper over a memory element and guarantees that it can be used as a mask value in `&` and `|` operations to modify exactly one bit. It is equivalent to `1 << BitPos.value()`. # Bit Masking Lastly, the [`BitMask`] type is a bitmask that permits any number of bits to be set or cleared. It is provided as a type rather than a bare value in order to clearly communicate that there is no restriction on what this mask may affect. [`BitIdx`]: struct.BitIdx.html [`BitMask`]: struct.BitMask.html [`BitOrder::at`]: ../order/trait.BitOrder.html#method.at [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html [`BitTail`]: struct.BitTail.html !*/ use crate::mem::BitMemory; use core::{ fmt::{ self, Binary, Formatter, }, iter::{ Product, Sum, }, marker::PhantomData, ops::{ BitAnd, BitOr, Deref, Not, }, }; #[cfg(feature = "serde")] use core::convert::TryFrom; /** Indicates a semantic index of a bit within a memory element. This is a counter in the domain `0 .. M::BITS`, and marks a semantic position in the ordering sequence described by a [`BitOrder`] implementation. It is used for both position computation through `BitOrder` and range computation in [`BitPtr`]. # Type Parameters - `M`: The memory element type controlled by this index. [`BitOrder`]: ../order/trait.BitOrder.html [`BitPtr`]: ../pointer/struct.BitPtr.html **/ // If Rust had user-provided ranged integers, this would be communicable to the // compiler: // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdx<M> where M: BitMemory { /// Semantic index within an element. Constrained to `0 .. M::BITS`. idx: u8, /// Marker for the indexed type. _ty: PhantomData<M>, } impl<M> BitIdx<M> where M: BitMemory { /// The zero index. pub const ZERO: Self = Self { idx: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. /// /// # Returns /// /// If `idx` is within the range `0 .. M::BITS`, then this returns the index /// value wrapped in the index type; if `idx` exceeds this range, then this /// returns `None`. pub fn new(idx: u8) -> Option<Self> { if idx >= M::BITS { return None; } Some(unsafe { Self::new_unchecked(idx) }) } /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. It must be in the /// range `0 .. M::BITS`. /// /// # Safety /// /// If `idx` is outside the range, then the produced value will cause errors /// and memory unsafety when used. #[inline] pub unsafe fn new_unchecked(idx: u8) -> Self { debug_assert!( idx < M::BITS, "Bit index {} cannot exceed type width {}", idx, M::BITS, ); Self { idx, _ty: PhantomData, } } /// Finds the destination bit a certain distance away from a starting bit. /// /// This produces the number of elements to move from the starting point, /// and then the bit index of the destination bit in the destination /// element. /// /// # Parameters /// /// - `self`: A bit index in some memory element, used as the starting /// position for the offset calculation. /// - `by`: The number of bits by which to move. Negative values move /// downwards in memory: towards index zero, then counting from index /// `M::MASK` to index zero in the next element lower in memory, repeating /// until arrival. Positive values move upwards in memory: towards index /// `M::MASK`, then counting from index zero to index `M::MASK` in the /// next element higher in memory, repeating until arrival. /// /// # Returns /// /// - `.0`: The number of elements by which to offset the caller’s element /// cursor. This value can be passed directly into [`ptr::offset`]. /// - `.1`: The bit index of the destination bit in the element selected by /// applying the `.0` pointer offset. /// /// # Safety /// /// `by` must not be far enough to cause the returned element offset value /// to, when applied to the original memory address via [`ptr::offset`], /// produce a reference out of bounds of the original allocation. This /// method has no way of checking this requirement. /// /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub(crate) fn offset(self, by: isize) -> (isize, Self) { let val = *self; /* Signed-add `*self` and the jump distance. Overflowing is the unlikely branch. The result is a bit index, and an overflow marker. `far` is permitted to be negative; this means that it is lower in memory than the origin bit. The number line has its origin at the front edge of the origin element, so `-1` is the *last* bit of the prior memory element. */ let (far, ovf) = by.overflowing_add(val as isize); // If the `isize` addition does not overflow, then the sum can be used // directly. if !ovf { // If `far` is in the origin element, then the jump moves zero // elements and produces `far` as an absolute index directly. if (0 .. M::BITS as isize).contains(&far) { (0, (far as u8).idx()) } /* Otherwise, downshift the bit distance to compute the number of elements moved in either direction, and mask to compute the absolute bit index in the destination element. */ else { (far >> M::INDX, (far as u8 & M::MASK).idx()) } } else { /* Overflowing `isize` addition happens to produce ordinary `usize` addition. In point of fact, `isize` addition and `usize` addition are the same machine instruction to perform the sum; it is merely the signed interpretation of the sum that differs. The sum can be recast back to `usize` without issue. */ let far = far as usize; // This is really only needed in order to prevent sign-extension of // the downshift; once shifted, the value can be safely re-signed. ((far >> M::INDX) as isize, (far as u8 & M::MASK).idx()) } } /// Computes the size of a span from `self` for `len` bits. /// /// Spans always extend upwards in memory. /// /// # Parameters /// /// - `self`: The starting bit position of the span. /// - `len`: The number of bits to include in the span. /// /// # Returns /// /// - `.0`: The number of elements of `M` included in the span. If `len` is /// `0`, this will be `0`; otherwise, it will be at least one. /// - `.1`: The index of the first dead bit *after* the span. If `self` and /// `len` are both `0`, this will be `0`; otherwise, it will be in the /// domain `1 ..= M::BITS`. /// /// # Notes /// /// This defers to [`BitTail::span`], because `BitTail` is a strict superset /// of `BitIdx` (it is `{ BitIdx | M::BITS }`), and spans frequently begin /// from the tail of a slice in this crate. The `offset` function is *not* /// implemented on `BitTail`, and remains on `BitIdx` because offsets can /// only be computed from bit addresses that exist. It does not make sense /// to compute the offset from a `M::BITS` tail. /// /// [`BitTail::span`]: struct.BitTail.html#method.span #[inline] pub(crate) fn span(self, len: usize) -> (usize, BitTail<M>) { unsafe { BitTail::new_unchecked(*self) }.span(len) } } impl<M> Binary for BitIdx<M> where M: BitMemory { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "0b{:0>1$b}", self.idx, M::INDX as usize) } } impl<M> Deref for BitIdx<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.idx } } #[cfg(feature = "serde")] impl<M> TryFrom<u8> for BitIdx<M> where M: BitMemory { type Error = &'static str; fn try_from(idx: u8) -> Result<Self, Self::Error> { Self::new(idx).ok_or( "Attempted to construct a `BitIdx` with an index out of range", ) } } /** Indicates a semantic index of a dead bit *beyond* a memory element. This type is equivalent to `BitIdx<M>`, except that it includes `M::BITS` in its domain. Instances of this type will only ever contain `0` when the span they describe is *empty*. Non-empty spans always cycle through the domain `1 ..= M::BITS`. This type cannot be used for indexing, and does not translate to `BitPos<M>`. This type has no behavior other than viewing its internal `u8` for arithmetic. # Type Parameters - `M`: The memory element type controlled by this tail. **/ // #[rustc_layout_scalar_valid_range_end(M::BITS + 1)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitTail<M> where M: BitMemory { /// Semantic index *after* an element. Constrained to `0 ..= M::BITS`. end: u8, /// Marker for the tailed type. _ty: PhantomData<M>, } impl<M> BitTail<M> where M: BitMemory { /// The termination index. pub const END: Self = Self { end: M::BITS, _ty: PhantomData, }; /// Mark that `end` is a tail index for a type. /// /// # Parameters /// /// - `end` must be in the range `0 ..= M::BITS`. pub(crate) unsafe fn new_unchecked(end: u8) -> Self { debug_assert!( end <= M::BITS, "Bit tail {} cannot surpass type width {}", end, M::BITS, ); Self { end, _ty: PhantomData, } } pub(crate) fn span(self, len: usize) -> (usize, Self) { let val = *self; debug_assert!( val <= M::BITS, "Tail out of range: {} overflows type width {}", val, M::BITS, ); if len == 0 { return (0, self); } let head = val & M::MASK; let bits_in_head = (M::BITS - head) as usize; if len <= bits_in_head { return (1, (head + len as u8).tail()); } let bits_after_head = len - bits_in_head; let elts = bits_after_head >> M::INDX; let tail = bits_after_head as u8 & M::MASK; let is_zero = (tail == 0) as u8; let edges = 2 - is_zero as usize; (elts + edges, ((is_zero << M::INDX) | tail).tail()) /* The above expression is the branchless equivalent of this structure: if tail == 0 { (elts + 1, M::BITS.tail()) } else { (elts + 2, tail.tail()) } */ } } impl<M> Deref for BitTail<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.end } } /** Indicates a real electrical index within an element. This type is produced by [`BitOrder`] implementors, and marks a specific electrical bit within a memory element, rather than [`BitIdx`]’s semantic bit. # Type Parameters - `M`: A `BitMemory` element which provides bounds-checking information. The [`new`] constructor uses [`M::BITS`] to ensure that constructed `BitPos` instances are always valid to use within `M` elements. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]: ../order/trait.BitOrder.html [`M::BITS`]: ../mem/trait.BitMemory.html#associatedconstant.BITS [`new`]: #method.new **/ // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitPos<M> where M: BitMemory { /// Electrical position within an element. Constrained to `0 .. M::BITS`. pos: u8, /// Marker for the positioned type. _ty: PhantomData<M>, } impl<M> BitPos<M> where M: BitMemory { /// Produce a new bit position marker at a valid position value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the range invariant is /// upheld. /// /// # Parameters /// /// - `pos`: The bit position value to encode. It must be in the range `0 .. /// M::BITS`. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(pos: u8) -> Self { assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produce a new bit position marker at any position value. /// /// # Safety /// /// The caller *must* ensure that `pos` is less than `M::BITS`. `BitOrder` /// implementations should prefer [`::new`], which panics on range failure. /// /// # Parameters /// /// - `pos`: The bit position value to encode. This must be in the range `0 /// .. M::BITS`. /// /// # Returns /// /// `pos` wrapped in the `BitPos` marker type. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`, but /// only in debug builds. It does not inspect `pos` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(pos: u8) -> Self { debug_assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produces a one-hot selector mask from a position value. /// /// This is equivalent to `1 << *self`. /// /// # Parameters /// /// - `self` /// /// # Returns /// /// A one-hot selector mask with the bit at `*self` set. #[inline] pub fn select(self) -> BitSel<M> { unsafe { BitSel::new_unchecked(M::ONE << *self) } } } impl<M> Deref for BitPos<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.pos } } /** Wrapper type indicating a one-hot encoding of a bit mask for an element. This type is produced by [`BitOrder`] implementations to speed up access to the underlying memory. It ensures that masks have exactly one set bit, and can safely be used as a mask for read/write access to memory. # Type Parameters - `M`: The storage type being masked. [`BitOrder`]: ../order/trait.BitOrder.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitSel<M> where M: BitMemory { /// Mask value. sel: M, } impl<M> BitSel<M> where M: BitMemory { /// Produce a new bit-mask wrapper around a one-hot mask value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the one-hot invariant is /// upheld. /// /// # Parameters /// /// - `mask`: The mask value to encode. This **must** have exactly one bit /// set high, and all others set low. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function unconditionally panics if `mask` has zero or multiple bits /// set high. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(sel: M) -> Self { assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } /// Produce a new bit-mask wrapper around any value. /// /// # Safety /// /// The caller *must* ensure that `mask` has exactly one bit set. `BitOrder` /// implementations should prefer [`::new`], which always panics on failure. /// /// # Parameters /// /// - `mask`: The mask value to encode. This must have exactly one bit set. /// Failure to uphold this requirement will introduce uncontrolled state /// contamination. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function panics if `mask` has zero or multiple bits set, only in /// debug builds. It does not inspect `mask` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(sel: M) -> Self { debug_assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } } impl<M> Deref for BitSel<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.sel } } /** A multi-bit selector mask. Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this mask type permits any number of bits to be set or unset. This is used to combine batch operations in an element. It is only constructed by accumulating [`BitPos`] or [`BitSel`] values. As `BitSel` is only constructed from `BitPos`, and `BitPos` is only constructed from [`BitIdx`] and [`BitOrder`], this enforces a chain of responsibility to prove that a given multimask is safe. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]: ../order/trait.BitOrder.html [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitMask<M> where M: BitMemory { /// A mask of any number of bits to modify. mask: M, } impl<M> BitMask<M> where M: BitMemory { /// A full mask. pub const ALL: Self = Self { mask: M::ALL }; /// An empty mask. pub const ZERO: Self = Self { mask: M::ZERO }; /// Wraps a value as a bitmask. /// /// # Safety /// /// The caller must ensure that the mask value is correct in the caller’s /// provenance. /// /// # Parameters /// /// - `mask`: Any integer, to be reïnterpreted as a bitmask. /// /// # Returns /// /// The `mask` value as a bitmask. pub fn new(mask: M) -> Self { Self { mask } } } impl<M> Product<BitPos<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).product() } } impl<M> Product<BitSel<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ALL, BitAnd::bitand) } } /// Enable accumulation of a multi-bit mask from a sequence of position values. impl<M> Sum<BitPos<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).sum() } } /// Enable accumulation of a multi-bit mask from a sequence of selector masks. impl<M> Sum<BitSel<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ZERO, BitOr::bitor) } } impl<M> BitAnd<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: M) -> Self { Self { mask: self.mask & rhs, } } } impl<M> BitAnd<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitPos<M>) -> Self { self & rhs.select() } } impl<M> BitAnd<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask & rhs.sel, } } } impl<M> BitOr<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: M) -> Self { Self { mask: self.mask | rhs, } } } /// Insert a position value into a multimask. impl<M> BitOr<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitPos<M>) -> Self { self | rhs.select() } } /// Insert a single selector into a multimask. impl<M> BitOr<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask | rhs.sel, } } } impl<M> Deref for BitMask<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.mask } } impl<M> Not for BitMask<M> where M: BitMemory { type Output = Self; fn not(self) -> Self { Self { mask: !self.mask } } } /** Internal convenience trait for wrapping numbers with appropriate markers. This trait must only be used on values that are known to be valid for their context. It provides an internal-only shorthand for wrapping integer literals and known-good values in marker types. It is only implemented on `u8`. **/ pub(crate) trait Indexable { /// Wraps a value as a `BitIdx<M>`. fn idx<M>(self) -> BitIdx<M> where M: BitMemory; /// Wraps a value as a `BitTail<M>`. fn tail<M>(self) -> BitTail<M> where M: BitMemory; /// Wraps a value as a `BitPos<M>`. fn pos<M>(self) -> BitPos<M> where M: BitMemory; } impl Indexable for u8 { fn idx<M>(self) -> BitIdx<M> where M: BitMemory { unsafe { BitIdx::<M>::new_unchecked(self) } } fn tail<M>(self) -> BitTail<M> where M: BitMemory { unsafe { BitTai
-> BitPos<M> where M: BitMemory { unsafe { BitPos::<M>::new_unchecked(self) } } } #[cfg(test)] mod tests { use super::*; #[test] fn jump_far_up() { // isize::max_value() is 0x7f...ff, so the result bit will be one less // than the start bit. for n in 1 .. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, (isize::max_value() >> u8::INDX) + 1); assert_eq!(*bit, n - 1); } let (elt, bit) = 0u8.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, isize::max_value() >> u8::INDX); assert_eq!(*bit, 7); } #[test] fn jump_far_down() { // isize::min_value() is 0x80...00, so the result bit will be equal to // the start bit for n in 0 .. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::min_value()); assert_eq!(elt, isize::min_value() >> u8::INDX); assert_eq!(*bit, n); } } }
l::<M>::new_unchecked(self) } } fn pos<M>(self)
identifier_body
index.rs
/*! Indexing within memory elements. This module provides types which guarantee certain properties about selecting bits within a memory element. These types enable their use sites to explicitly declare the indexing behavior they require, and move safety checks from runtime to compile time. # Bit Indexing The [`BitIdx`] type represents the semantic index of a bit within a memory element. It does not perform bit positioning, and cannot be used to create a shift instruction or mask value. It is transformed into a value which can do these things – [`BitPos`] – through the [`BitOrder::at`] function. # Region End Marker `bitvec` uses “half-open” ranges, described by a starting point and a count of members that are live. This means that the “end” of a range is not the last member that is *in*cluded in the range, but rather the first member that is *ex*cluded from it. This requires the [`BitTail` end marker to include in its range the width of the element type (`8` for `u8`, etc), in order to mark that a region includes the very last bit in the element (index `7` for `u8`, etc`). The starting number for a dead region cannot be used to perform bit selection, but is used to provide range computation, so it is kept distinct from the indexing types. # Bit Positioning The [`BitPos`] type corresponds directly to a bit position in a memory element. Its value can be used to create shift instructions which select part of memory. It is only ever created by the `BitOrder::at` function. # Bit Selection The [`BitSel`] type is a one-hot mask encoding for a memory element. Unlike the previous types, which are range-limited integers, this type is a wrapper over a memory element and guarantees that it can be used as a mask value in `&` and `|` operations to modify exactly one bit. It is equivalent to `1 << BitPos.value()`. # Bit Masking Lastly, the [`BitMask`] type is a bitmask that permits any number of bits to be set or cleared. It is provided as a type rather than a bare value in order to clearly communicate that there is no restriction on what this mask may affect. [`BitIdx`]: struct.BitIdx.html [`BitMask`]: struct.BitMask.html [`BitOrder::at`]: ../order/trait.BitOrder.html#method.at [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html [`BitTail`]: struct.BitTail.html !*/ use crate::mem::BitMemory; use core::{ fmt::{ self, Binary, Formatter, }, iter::{ Product, Sum, }, marker::PhantomData, ops::{ BitAnd, BitOr, Deref, Not, }, }; #[cfg(feature = "serde")] use core::convert::TryFrom; /** Indicates a semantic index of a bit within a memory element. This is a counter in the domain `0 .. M::BITS`, and marks a semantic position in the ordering sequence described by a [`BitOrder`] implementation. It is used for both position computation through `BitOrder` and range computation in [`BitPtr`]. # Type Parameters - `M`: The memory element type controlled by this index. [`BitOrder`]: ../order/trait.BitOrder.html [`BitPtr`]: ../pointer/struct.BitPtr.html **/ // If Rust had user-provided ranged integers, this would be communicable to the // compiler: // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdx<M> where M: BitMemory { /// Semantic index within an element. Constrained to `0 .. M::BITS`. idx: u8, /// Marker for the indexed type. _ty: PhantomData<M>, } impl<M> BitIdx<M> where M: BitMemory { /// The zero index. pub const ZERO: Self = Self { idx: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. /// /// # Returns /// /// If `idx` is within the range `0 .. M::BITS`, then this returns the index /// value wrapped in the index type; if `idx` exceeds this range, then this /// returns `None`. pub fn new(idx: u8) -> Option<Self> { if idx >= M::BITS { return None; } Some(unsafe { Self::new_unchecked(idx) }) } /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. It must be in the /// range `0 .. M::BITS`. /// /// # Safety /// /// If `idx` is outside the range, then the produced value will cause errors /// and memory unsafety when used. #[inline] pub unsafe fn new_unchecked(idx: u8) -> Self { debug_assert!( idx < M::BITS, "Bit index {} cannot exceed type width {}", idx, M::BITS, ); Self { idx, _ty: PhantomData, } } /// Finds the destination bit a certain distance away from a starting bit. /// /// This produces the number of elements to move from the starting point, /// and then the bit index of the destination bit in the destination /// element. /// /// # Parameters /// /// - `self`: A bit index in some memory element, used as the starting /// position for the offset calculation. /// - `by`: The number of bits by which to move. Negative values move /// downwards in memory: towards index zero, then counting from index /// `M::MASK` to index zero in the next element lower in memory, repeating /// until arrival. Positive values move upwards in memory: towards index /// `M::MASK`, then counting from index zero to index `M::MASK` in the /// next element higher in memory, repeating until arrival. /// /// # Returns /// /// - `.0`: The number of elements by which to offset the caller’s element /// cursor. This value can be passed directly into [`ptr::offset`]. /// - `.1`: The bit index of the destination bit in the element selected by /// applying the `.0` pointer offset. /// /// # Safety /// /// `by` must not be far enough to cause the returned element offset value /// to, when applied to the original memory address via [`ptr::offset`], /// produce a reference out of bounds of the original allocation. This /// method has no way of checking this requirement. /// /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub(crate) fn offset(self, by: isize) -> (isize, Self) { let val = *self; /* Signed-add `*self` and the jump distance. Overflowing is the unlikely branch. The result is a bit index, and an overflow marker. `far` is permitted to be negative; this means that it is lower in memory than the origin bit. The number line has its origin at the front edge of the origin element, so `-1` is the *last* bit of the prior memory element. */ let (far, ovf) = by.overflowing_add(val as isize); // If the `isize` addition does not overflow, then the sum can be used // directly. if !ovf { // If `far` is in the origin element, then the jump moves zero // elements and produces `far` as an absolute index directly. if (0 .. M::BITS as isize).contains(&far) { (0, (far
se, downshift the bit distance to compute the number of elements moved in either direction, and mask to compute the absolute bit index in the destination element. */ else { (far >> M::INDX, (far as u8 & M::MASK).idx()) } } else { /* Overflowing `isize` addition happens to produce ordinary `usize` addition. In point of fact, `isize` addition and `usize` addition are the same machine instruction to perform the sum; it is merely the signed interpretation of the sum that differs. The sum can be recast back to `usize` without issue. */ let far = far as usize; // This is really only needed in order to prevent sign-extension of // the downshift; once shifted, the value can be safely re-signed. ((far >> M::INDX) as isize, (far as u8 & M::MASK).idx()) } } /// Computes the size of a span from `self` for `len` bits. /// /// Spans always extend upwards in memory. /// /// # Parameters /// /// - `self`: The starting bit position of the span. /// - `len`: The number of bits to include in the span. /// /// # Returns /// /// - `.0`: The number of elements of `M` included in the span. If `len` is /// `0`, this will be `0`; otherwise, it will be at least one. /// - `.1`: The index of the first dead bit *after* the span. If `self` and /// `len` are both `0`, this will be `0`; otherwise, it will be in the /// domain `1 ..= M::BITS`. /// /// # Notes /// /// This defers to [`BitTail::span`], because `BitTail` is a strict superset /// of `BitIdx` (it is `{ BitIdx | M::BITS }`), and spans frequently begin /// from the tail of a slice in this crate. The `offset` function is *not* /// implemented on `BitTail`, and remains on `BitIdx` because offsets can /// only be computed from bit addresses that exist. It does not make sense /// to compute the offset from a `M::BITS` tail. /// /// [`BitTail::span`]: struct.BitTail.html#method.span #[inline] pub(crate) fn span(self, len: usize) -> (usize, BitTail<M>) { unsafe { BitTail::new_unchecked(*self) }.span(len) } } impl<M> Binary for BitIdx<M> where M: BitMemory { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "0b{:0>1$b}", self.idx, M::INDX as usize) } } impl<M> Deref for BitIdx<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.idx } } #[cfg(feature = "serde")] impl<M> TryFrom<u8> for BitIdx<M> where M: BitMemory { type Error = &'static str; fn try_from(idx: u8) -> Result<Self, Self::Error> { Self::new(idx).ok_or( "Attempted to construct a `BitIdx` with an index out of range", ) } } /** Indicates a semantic index of a dead bit *beyond* a memory element. This type is equivalent to `BitIdx<M>`, except that it includes `M::BITS` in its domain. Instances of this type will only ever contain `0` when the span they describe is *empty*. Non-empty spans always cycle through the domain `1 ..= M::BITS`. This type cannot be used for indexing, and does not translate to `BitPos<M>`. This type has no behavior other than viewing its internal `u8` for arithmetic. # Type Parameters - `M`: The memory element type controlled by this tail. **/ // #[rustc_layout_scalar_valid_range_end(M::BITS + 1)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitTail<M> where M: BitMemory { /// Semantic index *after* an element. Constrained to `0 ..= M::BITS`. end: u8, /// Marker for the tailed type. _ty: PhantomData<M>, } impl<M> BitTail<M> where M: BitMemory { /// The termination index. pub const END: Self = Self { end: M::BITS, _ty: PhantomData, }; /// Mark that `end` is a tail index for a type. /// /// # Parameters /// /// - `end` must be in the range `0 ..= M::BITS`. pub(crate) unsafe fn new_unchecked(end: u8) -> Self { debug_assert!( end <= M::BITS, "Bit tail {} cannot surpass type width {}", end, M::BITS, ); Self { end, _ty: PhantomData, } } pub(crate) fn span(self, len: usize) -> (usize, Self) { let val = *self; debug_assert!( val <= M::BITS, "Tail out of range: {} overflows type width {}", val, M::BITS, ); if len == 0 { return (0, self); } let head = val & M::MASK; let bits_in_head = (M::BITS - head) as usize; if len <= bits_in_head { return (1, (head + len as u8).tail()); } let bits_after_head = len - bits_in_head; let elts = bits_after_head >> M::INDX; let tail = bits_after_head as u8 & M::MASK; let is_zero = (tail == 0) as u8; let edges = 2 - is_zero as usize; (elts + edges, ((is_zero << M::INDX) | tail).tail()) /* The above expression is the branchless equivalent of this structure: if tail == 0 { (elts + 1, M::BITS.tail()) } else { (elts + 2, tail.tail()) } */ } } impl<M> Deref for BitTail<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.end } } /** Indicates a real electrical index within an element. This type is produced by [`BitOrder`] implementors, and marks a specific electrical bit within a memory element, rather than [`BitIdx`]’s semantic bit. # Type Parameters - `M`: A `BitMemory` element which provides bounds-checking information. The [`new`] constructor uses [`M::BITS`] to ensure that constructed `BitPos` instances are always valid to use within `M` elements. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]: ../order/trait.BitOrder.html [`M::BITS`]: ../mem/trait.BitMemory.html#associatedconstant.BITS [`new`]: #method.new **/ // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitPos<M> where M: BitMemory { /// Electrical position within an element. Constrained to `0 .. M::BITS`. pos: u8, /// Marker for the positioned type. _ty: PhantomData<M>, } impl<M> BitPos<M> where M: BitMemory { /// Produce a new bit position marker at a valid position value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the range invariant is /// upheld. /// /// # Parameters /// /// - `pos`: The bit position value to encode. It must be in the range `0 .. /// M::BITS`. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(pos: u8) -> Self { assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produce a new bit position marker at any position value. /// /// # Safety /// /// The caller *must* ensure that `pos` is less than `M::BITS`. `BitOrder` /// implementations should prefer [`::new`], which panics on range failure. /// /// # Parameters /// /// - `pos`: The bit position value to encode. This must be in the range `0 /// .. M::BITS`. /// /// # Returns /// /// `pos` wrapped in the `BitPos` marker type. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`, but /// only in debug builds. It does not inspect `pos` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(pos: u8) -> Self { debug_assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produces a one-hot selector mask from a position value. /// /// This is equivalent to `1 << *self`. /// /// # Parameters /// /// - `self` /// /// # Returns /// /// A one-hot selector mask with the bit at `*self` set. #[inline] pub fn select(self) -> BitSel<M> { unsafe { BitSel::new_unchecked(M::ONE << *self) } } } impl<M> Deref for BitPos<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.pos } } /** Wrapper type indicating a one-hot encoding of a bit mask for an element. This type is produced by [`BitOrder`] implementations to speed up access to the underlying memory. It ensures that masks have exactly one set bit, and can safely be used as a mask for read/write access to memory. # Type Parameters - `M`: The storage type being masked. [`BitOrder`]: ../order/trait.BitOrder.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitSel<M> where M: BitMemory { /// Mask value. sel: M, } impl<M> BitSel<M> where M: BitMemory { /// Produce a new bit-mask wrapper around a one-hot mask value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the one-hot invariant is /// upheld. /// /// # Parameters /// /// - `mask`: The mask value to encode. This **must** have exactly one bit /// set high, and all others set low. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function unconditionally panics if `mask` has zero or multiple bits /// set high. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(sel: M) -> Self { assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } /// Produce a new bit-mask wrapper around any value. /// /// # Safety /// /// The caller *must* ensure that `mask` has exactly one bit set. `BitOrder` /// implementations should prefer [`::new`], which always panics on failure. /// /// # Parameters /// /// - `mask`: The mask value to encode. This must have exactly one bit set. /// Failure to uphold this requirement will introduce uncontrolled state /// contamination. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function panics if `mask` has zero or multiple bits set, only in /// debug builds. It does not inspect `mask` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(sel: M) -> Self { debug_assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } } impl<M> Deref for BitSel<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.sel } } /** A multi-bit selector mask. Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this mask type permits any number of bits to be set or unset. This is used to combine batch operations in an element. It is only constructed by accumulating [`BitPos`] or [`BitSel`] values. As `BitSel` is only constructed from `BitPos`, and `BitPos` is only constructed from [`BitIdx`] and [`BitOrder`], this enforces a chain of responsibility to prove that a given multimask is safe. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]: ../order/trait.BitOrder.html [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitMask<M> where M: BitMemory { /// A mask of any number of bits to modify. mask: M, } impl<M> BitMask<M> where M: BitMemory { /// A full mask. pub const ALL: Self = Self { mask: M::ALL }; /// An empty mask. pub const ZERO: Self = Self { mask: M::ZERO }; /// Wraps a value as a bitmask. /// /// # Safety /// /// The caller must ensure that the mask value is correct in the caller’s /// provenance. /// /// # Parameters /// /// - `mask`: Any integer, to be reïnterpreted as a bitmask. /// /// # Returns /// /// The `mask` value as a bitmask. pub fn new(mask: M) -> Self { Self { mask } } } impl<M> Product<BitPos<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).product() } } impl<M> Product<BitSel<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ALL, BitAnd::bitand) } } /// Enable accumulation of a multi-bit mask from a sequence of position values. impl<M> Sum<BitPos<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).sum() } } /// Enable accumulation of a multi-bit mask from a sequence of selector masks. impl<M> Sum<BitSel<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ZERO, BitOr::bitor) } } impl<M> BitAnd<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: M) -> Self { Self { mask: self.mask & rhs, } } } impl<M> BitAnd<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitPos<M>) -> Self { self & rhs.select() } } impl<M> BitAnd<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask & rhs.sel, } } } impl<M> BitOr<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: M) -> Self { Self { mask: self.mask | rhs, } } } /// Insert a position value into a multimask. impl<M> BitOr<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitPos<M>) -> Self { self | rhs.select() } } /// Insert a single selector into a multimask. impl<M> BitOr<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask | rhs.sel, } } } impl<M> Deref for BitMask<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.mask } } impl<M> Not for BitMask<M> where M: BitMemory { type Output = Self; fn not(self) -> Self { Self { mask: !self.mask } } } /** Internal convenience trait for wrapping numbers with appropriate markers. This trait must only be used on values that are known to be valid for their context. It provides an internal-only shorthand for wrapping integer literals and known-good values in marker types. It is only implemented on `u8`. **/ pub(crate) trait Indexable { /// Wraps a value as a `BitIdx<M>`. fn idx<M>(self) -> BitIdx<M> where M: BitMemory; /// Wraps a value as a `BitTail<M>`. fn tail<M>(self) -> BitTail<M> where M: BitMemory; /// Wraps a value as a `BitPos<M>`. fn pos<M>(self) -> BitPos<M> where M: BitMemory; } impl Indexable for u8 { fn idx<M>(self) -> BitIdx<M> where M: BitMemory { unsafe { BitIdx::<M>::new_unchecked(self) } } fn tail<M>(self) -> BitTail<M> where M: BitMemory { unsafe { BitTail::<M>::new_unchecked(self) } } fn pos<M>(self) -> BitPos<M> where M: BitMemory { unsafe { BitPos::<M>::new_unchecked(self) } } } #[cfg(test)] mod tests { use super::*; #[test] fn jump_far_up() { // isize::max_value() is 0x7f...ff, so the result bit will be one less // than the start bit. for n in 1 .. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, (isize::max_value() >> u8::INDX) + 1); assert_eq!(*bit, n - 1); } let (elt, bit) = 0u8.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, isize::max_value() >> u8::INDX); assert_eq!(*bit, 7); } #[test] fn jump_far_down() { // isize::min_value() is 0x80...00, so the result bit will be equal to // the start bit for n in 0 .. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::min_value()); assert_eq!(elt, isize::min_value() >> u8::INDX); assert_eq!(*bit, n); } } }
as u8).idx()) } /* Otherwi
conditional_block
index.rs
/*! Indexing within memory elements. This module provides types which guarantee certain properties about selecting bits within a memory element. These types enable their use sites to explicitly declare the indexing behavior they require, and move safety checks from runtime to compile time. # Bit Indexing The [`BitIdx`] type represents the semantic index of a bit within a memory element. It does not perform bit positioning, and cannot be used to create a shift instruction or mask value. It is transformed into a value which can do these things – [`BitPos`] – through the [`BitOrder::at`] function. # Region End Marker `bitvec` uses “half-open” ranges, described by a starting point and a count of members that are live. This means that the “end” of a range is not the last member that is *in*cluded in the range, but rather the first member that is *ex*cluded from it. This requires the [`BitTail` end marker to include in its range the width of the element type (`8` for `u8`, etc), in order to mark that a region includes the very last bit in the element (index `7` for `u8`, etc`). The starting number for a dead region cannot be used to perform bit selection, but is used to provide range computation, so it is kept distinct from the indexing types. # Bit Positioning The [`BitPos`] type corresponds directly to a bit position in a memory element. Its value can be used to create shift instructions which select part of memory. It is only ever created by the `BitOrder::at` function. # Bit Selection The [`BitSel`] type is a one-hot mask encoding for a memory element. Unlike the previous types, which are range-limited integers, this type is a wrapper over a memory element and guarantees that it can be used as a mask value in `&` and `|` operations to modify exactly one bit. It is equivalent to `1 << BitPos.value()`. # Bit Masking Lastly, the [`BitMask`] type is a bitmask that permits any number of bits to be set or cleared. It is provided as a type rather than a bare value in order to clearly communicate that there is no restriction on what this mask may affect. [`BitIdx`]: struct.BitIdx.html [`BitMask`]: struct.BitMask.html [`BitOrder::at`]: ../order/trait.BitOrder.html#method.at [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html [`BitTail`]: struct.BitTail.html !*/ use crate::mem::BitMemory; use core::{ fmt::{ self, Binary, Formatter, }, iter::{ Product, Sum, }, marker::PhantomData, ops::{ BitAnd, BitOr, Deref, Not, }, }; #[cfg(feature = "serde")] use core::convert::TryFrom; /** Indicates a semantic index of a bit within a memory element. This is a counter in the domain `0 .. M::BITS`, and marks a semantic position in the ordering sequence described by a [`BitOrder`] implementation. It is used for both position computation through `BitOrder` and range computation in [`BitPtr`]. # Type Parameters - `M`: The memory element type controlled by this index. [`BitOrder`]: ../order/trait.BitOrder.html [`BitPtr`]: ../pointer/struct.BitPtr.html **/ // If Rust had user-provided ranged integers, this would be communicable to the // compiler: // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdx<M> where M: BitMemory { /// Semantic index within an element. Constrained to `0 .. M::BITS`. idx: u8, /// Marker for the indexed type. _ty: PhantomData<M>, } impl<M> BitIdx<M> where M: BitMemory { /// The zero index. pub const ZERO: Self = Self { idx: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. /// /// # Returns /// /// If `idx` is within the range `0 .. M::BITS`, then this returns the index /// value wrapped in the index type; if `idx` exceeds this range, then this /// returns `None`. pub fn new(idx: u8) -> Option<Self> { if idx >= M::BITS { return None; } Some(unsafe { Self::new_unchecked(idx) }) } /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. It must be in the /// range `0 .. M::BITS`. /// /// # Safety /// /// If `idx` is outside the range, then the produced value will cause errors /// and memory unsafety when used. #[inline] pub unsafe fn new_unchecked(idx: u8) -> Self { debug_assert!( idx < M::BITS, "Bit index {} cannot exceed type width {}", idx, M::BITS, ); Self { idx, _ty: PhantomData, } } /// Finds the destination bit a certain distance away from a starting bit. /// /// This produces the number of elements to move from the starting point, /// and then the bit index of the destination bit in the destination /// element. /// /// # Parameters /// /// - `self`: A bit index in some memory element, used as the starting /// position for the offset calculation. /// - `by`: The number of bits by which to move. Negative values move /// downwards in memory: towards index zero, then counting from index /// `M::MASK` to index zero in the next element lower in memory, repeating /// until arrival. Positive values move upwards in memory: towards index /// `M::MASK`, then counting from index zero to index `M::MASK` in the /// next element higher in memory, repeating until arrival. /// /// # Returns /// /// - `.0`: The number of elements by which to offset the caller’s element /// cursor. This value can be passed directly into [`ptr::offset`]. /// - `.1`: The bit index of the destination bit in the element selected by /// applying the `.0` pointer offset. /// /// # Safety /// /// `by` must not be far enough to cause the returned element offset value /// to, when applied to the original memory address via [`ptr::offset`], /// produce a reference out of bounds of the original allocation. This /// method has no way of checking this requirement. /// /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub(crate) fn offset(self, by: isize) -> (isize, Self) { let val = *self; /* Signed-add `*self` and the jump distance. Overflowing is the unlikely branch. The result is a bit index, and an overflow marker. `far` is permitted to be negative; this means that it is lower in memory than the origin bit. The number line has its origin at the front edge of the origin element, so `-1` is the *last* bit of the prior memory element. */ let (far, ovf) = by.overflowing_add(val as isize); // If the `isize` addition does not overflow, then the sum can be used // directly. if !ovf { // If `far` is in the origin element, then the jump moves zero // elements and produces `far` as an absolute index directly. if (0 .. M::BITS as isize).contains(&far) { (0, (far as u8).idx()) } /* Otherwise, downshift the bit distance to compute the number of elements moved in either direction, and mask to compute the absolute bit index in the destination element. */ else { (far >> M::INDX, (far as u8 & M::MASK).idx()) } } else { /* Overflowing `isize` addition happens to produce ordinary `usize` addition. In point of fact, `isize` addition and `usize` addition are the same machine instruction to perform the sum; it is merely the signed interpretation of the sum that differs. The sum can be recast back to `usize` without issue. */ let far = far as usize; // This is really only needed in order to prevent sign-extension of // the downshift; once shifted, the value can be safely re-signed. ((far >> M::INDX) as isize, (far as u8 & M::MASK).idx()) } } /// Computes the size of a span from `self` for `len` bits. /// /// Spans always extend upwards in memory. /// /// # Parameters /// /// - `self`: The starting bit position of the span. /// - `len`: The number of bits to include in the span. /// /// # Returns /// /// - `.0`: The number of elements of `M` included in the span. If `len` is /// `0`, this will be `0`; otherwise, it will be at least one. /// - `.1`: The index of the first dead bit *after* the span. If `self` and /// `len` are both `0`, this will be `0`; otherwise, it will be in the /// domain `1 ..= M::BITS`. /// /// # Notes /// /// This defers to [`BitTail::span`], because `BitTail` is a strict superset /// of `BitIdx` (it is `{ BitIdx | M::BITS }`), and spans frequently begin /// from the tail of a slice in this crate. The `offset` function is *not* /// implemented on `BitTail`, and remains on `BitIdx` because offsets can /// only be computed from bit addresses that exist. It does not make sense /// to compute the offset from a `M::BITS` tail. /// /// [`BitTail::span`]: struct.BitTail.html#method.span #[inline] pub(crate) fn span(self, len: usize) -> (usize, BitTail<M>) { unsafe { BitTail::new_unchecked(*self) }.span(len) } } impl<M> Binary for BitIdx<M> where M: BitMemory { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "0b{:0>1$b}", self.idx, M::INDX as usize) } } impl<M> Deref for BitIdx<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.idx } } #[cfg(feature = "serde")] impl<M> TryFrom<u8> for BitIdx<M> where M: BitMemory { type Error = &'static str; fn try_from(idx: u8) -> Result<Self, Self::Error> { Self::new(idx).ok_or( "Attempted to construct a `BitIdx` with an index out of range", ) } } /** Indicates a semantic index of a dead bit *beyond* a memory element. This type is equivalent to `BitIdx<M>`, except that it includes `M::BITS` in its domain. Instances of this type will only ever contain `0` when the span they describe is *empty*. Non-empty spans always cycle through the domain `1 ..= M::BITS`. This type cannot be used for indexing, and does not translate to `BitPos<M>`. This type has no behavior other than viewing its internal `u8` for arithmetic. # Type Parameters - `M`: The memory element type controlled by this tail. **/ // #[rustc_layout_scalar_valid_range_end(M::BITS + 1)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitTail<M> where M: BitMemory { /// Semantic index *after* an element. Constrained to `0 ..= M::BITS`. end: u8, /// Marker for the tailed type. _ty: PhantomData<M>, } impl<M> BitTail<M> where M: BitMemory { /// The termination index. pub const END: Self = Self { end: M::BITS, _ty: PhantomData, }; /// Mark that `end` is a tail index for a type. /// /// # Parameters /// /// - `end` must be in the range `0 ..= M::BITS`. pub(crate) unsafe fn new_unchecked(end: u8) -> Self { debug_assert!( end <= M::BITS, "Bit tail {} cannot surpass type width {}", end, M::BITS, ); Self { end, _ty: PhantomData, } } pub(crate) fn span(self, len: usize) -> (usize, Self) { let val = *self; debug_assert!( val <= M::BITS, "Tail out of range: {} overflows type width {}", val, M::BITS, ); if len == 0 { return (0, self); } let head = val & M::MASK; let bits_in_head = (M::BITS - head) as usize; if len <= bits_in_head { return (1, (head + len as u8).tail()); } let bits_after_head = len - bits_in_head; let elts = bits_after_head >> M::INDX; let tail = bits_after_head as u8 & M::MASK; let is_zero = (tail == 0) as u8; let edges = 2 - is_zero as usize; (elts + edges, ((is_zero << M::INDX) | tail).tail()) /* The above expression is the branchless equivalent of this structure: if tail == 0 { (elts + 1, M::BITS.tail()) } else { (elts + 2, tail.tail()) } */ } } impl<M> Deref for BitTail<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.end } } /** Indicates a real electrical index within an element. This type is produced by [`BitOrder`] implementors, and marks a specific electrical bit within a memory element, rather than [`BitIdx`]’s semantic bit. # Type Parameters - `M`: A `BitMemory` element which provides bounds-checking information. The [`new`] constructor uses [`M::BITS`] to ensure that constructed `BitPos` instances are always valid to use within `M` elements. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]: ../order/trait.BitOrder.html [`M::BITS`]: ../mem/trait.BitMemory.html#associatedconstant.BITS [`new`]: #method.new **/ // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitPos<M> where M: BitMemory { /// Electrical position within an element. Constrained to `0 .. M::BITS`. pos: u8, /// Marker for the positioned type. _ty: PhantomData<M>, } impl<M> BitPos<M> where M: BitMemory { /// Produce a new bit position marker at a valid position value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the range invariant is /// upheld. /// /// # Parameters /// /// - `pos`: The bit position value to encode. It must be in the range `0 .. /// M::BITS`. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(pos: u8) -> Self { assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produce a new bit position marker at any position value. /// /// # Safety /// /// The caller *must* ensure that `pos` is less than `M::BITS`. `BitOrder` /// implementations should prefer [`::new`], which panics on range failure. /// /// # Parameters /// /// - `pos`: The bit position value to encode. This must be in the range `0 /// .. M::BITS`. /// /// # Returns /// /// `pos` wrapped in the `BitPos` marker type. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`, but /// only in debug builds. It does not inspect `pos` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(pos: u8) -> Self { debug_assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produces a one-hot selector mask from a position value. /// /// This is equivalent to `1 << *self`. /// /// # Parameters /// /// - `self` /// /// # Returns /// /// A one-hot selector mask with the bit at `*self` set. #[inline] pub fn select(self) -> BitSel<M> { unsafe { BitSel::new_unchecked(M::ONE << *self) } } } impl<M> Deref for BitPos<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.pos } } /** Wrapper type indicating a one-hot encoding of a bit mask for an element. This type is produced by [`BitOrder`] implementations to speed up access to the underlying memory. It ensures that masks have exactly one set bit, and can safely be used as a mask for read/write access to memory. # Type Parameters - `M`: The storage type being masked. [`BitOrder`]: ../order/trait.BitOrder.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitSel<M> where M: BitMemory { /// Mask value. sel: M, } impl<M> BitSel<M> where M: BitMemory { /// Produce a new bit-mask wrapper around a one-hot mask value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the one-hot invariant is /// upheld. /// /// # Parameters /// /// - `mask`: The mask value to encode. This **must** have exactly one bit /// set high, and all others set low. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function unconditionally panics if `mask` has zero or multiple bits /// set high. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(sel: M) -> Self { assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } /// Produce a new bit-mask wrapper around any value. /// /// # Safety /// /// The caller *must* ensure that `mask` has exactly one bit set. `BitOrder` /// implementations should prefer [`::new`], which always panics on failure. /// /// # Parameters /// /// - `mask`: The mask value to encode. This must have exactly one bit set. /// Failure to uphold this requirement will introduce uncontrolled state /// contamination. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function panics if `mask` has zero or multiple bits set, only in /// debug builds. It does not inspect `mask` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(sel: M) -> Self { debug_assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } } impl<M> Deref for BitSel<M> where M: BitMemory { type Target = M; fn deref(&self) ->
::Target { &self.sel } } /** A multi-bit selector mask. Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this mask type permits any number of bits to be set or unset. This is used to combine batch operations in an element. It is only constructed by accumulating [`BitPos`] or [`BitSel`] values. As `BitSel` is only constructed from `BitPos`, and `BitPos` is only constructed from [`BitIdx`] and [`BitOrder`], this enforces a chain of responsibility to prove that a given multimask is safe. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]: ../order/trait.BitOrder.html [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitMask<M> where M: BitMemory { /// A mask of any number of bits to modify. mask: M, } impl<M> BitMask<M> where M: BitMemory { /// A full mask. pub const ALL: Self = Self { mask: M::ALL }; /// An empty mask. pub const ZERO: Self = Self { mask: M::ZERO }; /// Wraps a value as a bitmask. /// /// # Safety /// /// The caller must ensure that the mask value is correct in the caller’s /// provenance. /// /// # Parameters /// /// - `mask`: Any integer, to be reïnterpreted as a bitmask. /// /// # Returns /// /// The `mask` value as a bitmask. pub fn new(mask: M) -> Self { Self { mask } } } impl<M> Product<BitPos<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).product() } } impl<M> Product<BitSel<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ALL, BitAnd::bitand) } } /// Enable accumulation of a multi-bit mask from a sequence of position values. impl<M> Sum<BitPos<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).sum() } } /// Enable accumulation of a multi-bit mask from a sequence of selector masks. impl<M> Sum<BitSel<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ZERO, BitOr::bitor) } } impl<M> BitAnd<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: M) -> Self { Self { mask: self.mask & rhs, } } } impl<M> BitAnd<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitPos<M>) -> Self { self & rhs.select() } } impl<M> BitAnd<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask & rhs.sel, } } } impl<M> BitOr<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: M) -> Self { Self { mask: self.mask | rhs, } } } /// Insert a position value into a multimask. impl<M> BitOr<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitPos<M>) -> Self { self | rhs.select() } } /// Insert a single selector into a multimask. impl<M> BitOr<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask | rhs.sel, } } } impl<M> Deref for BitMask<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.mask } } impl<M> Not for BitMask<M> where M: BitMemory { type Output = Self; fn not(self) -> Self { Self { mask: !self.mask } } } /** Internal convenience trait for wrapping numbers with appropriate markers. This trait must only be used on values that are known to be valid for their context. It provides an internal-only shorthand for wrapping integer literals and known-good values in marker types. It is only implemented on `u8`. **/ pub(crate) trait Indexable { /// Wraps a value as a `BitIdx<M>`. fn idx<M>(self) -> BitIdx<M> where M: BitMemory; /// Wraps a value as a `BitTail<M>`. fn tail<M>(self) -> BitTail<M> where M: BitMemory; /// Wraps a value as a `BitPos<M>`. fn pos<M>(self) -> BitPos<M> where M: BitMemory; } impl Indexable for u8 { fn idx<M>(self) -> BitIdx<M> where M: BitMemory { unsafe { BitIdx::<M>::new_unchecked(self) } } fn tail<M>(self) -> BitTail<M> where M: BitMemory { unsafe { BitTail::<M>::new_unchecked(self) } } fn pos<M>(self) -> BitPos<M> where M: BitMemory { unsafe { BitPos::<M>::new_unchecked(self) } } } #[cfg(test)] mod tests { use super::*; #[test] fn jump_far_up() { // isize::max_value() is 0x7f...ff, so the result bit will be one less // than the start bit. for n in 1 .. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, (isize::max_value() >> u8::INDX) + 1); assert_eq!(*bit, n - 1); } let (elt, bit) = 0u8.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, isize::max_value() >> u8::INDX); assert_eq!(*bit, 7); } #[test] fn jump_far_down() { // isize::min_value() is 0x80...00, so the result bit will be equal to // the start bit for n in 0 .. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::min_value()); assert_eq!(elt, isize::min_value() >> u8::INDX); assert_eq!(*bit, n); } } }
&Self
identifier_name
index.rs
/*! Indexing within memory elements. This module provides types which guarantee certain properties about selecting bits within a memory element. These types enable their use sites to explicitly declare the indexing behavior they require, and move safety checks from runtime to compile time. # Bit Indexing The [`BitIdx`] type represents the semantic index of a bit within a memory element. It does not perform bit positioning, and cannot be used to create a shift instruction or mask value. It is transformed into a value which can do these things – [`BitPos`] – through the [`BitOrder::at`] function. # Region End Marker `bitvec` uses “half-open” ranges, described by a starting point and a count of members that are live. This means that the “end” of a range is not the last member that is *in*cluded in the range, but rather the first member that is *ex*cluded from it. This requires the [`BitTail` end marker to include in its range the width of the element type (`8` for `u8`, etc), in order to mark that a region includes the very last bit in the element (index `7` for `u8`, etc`). The starting number for a dead region cannot be used to perform bit selection, but is used to provide range computation, so it is kept distinct from the indexing types. # Bit Positioning The [`BitPos`] type corresponds directly to a bit position in a memory element. Its value can be used to create shift instructions which select part of memory. It is only ever created by the `BitOrder::at` function. # Bit Selection The [`BitSel`] type is a one-hot mask encoding for a memory element. Unlike the previous types, which are range-limited integers, this type is a wrapper over a memory element and guarantees that it can be used as a mask value in `&` and `|` operations to modify exactly one bit. It is equivalent to `1 << BitPos.value()`. # Bit Masking Lastly, the [`BitMask`] type is a bitmask that permits any number of bits to be set or cleared. It is provided as a type rather than a bare value in order to clearly communicate that there is no restriction on what this mask may affect. [`BitIdx`]: struct.BitIdx.html [`BitMask`]: struct.BitMask.html [`BitOrder::at`]: ../order/trait.BitOrder.html#method.at [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html [`BitTail`]: struct.BitTail.html !*/ use crate::mem::BitMemory; use core::{ fmt::{ self, Binary, Formatter, }, iter::{ Product, Sum, }, marker::PhantomData, ops::{ BitAnd, BitOr, Deref, Not, }, }; #[cfg(feature = "serde")] use core::convert::TryFrom; /** Indicates a semantic index of a bit within a memory element. This is a counter in the domain `0 .. M::BITS`, and marks a semantic position in the ordering sequence described by a [`BitOrder`] implementation. It is used for both position computation through `BitOrder` and range computation in [`BitPtr`]. # Type Parameters - `M`: The memory element type controlled by this index. [`BitOrder`]: ../order/trait.BitOrder.html [`BitPtr`]: ../pointer/struct.BitPtr.html **/ // If Rust had user-provided ranged integers, this would be communicable to the // compiler: // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdx<M> where M: BitMemory { /// Semantic index within an element. Constrained to `0 .. M::BITS`. idx: u8, /// Marker for the indexed type. _ty: PhantomData<M>, } impl<M> BitIdx<M> where M: BitMemory { /// The zero index. pub const ZERO: Self = Self { idx: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. /// /// # Returns /// /// If `idx` is within the range `0 .. M::BITS`, then this returns the index /// value wrapped in the index type; if `idx` exceeds this range, then this /// returns `None`. pub fn new(idx: u8) -> Option<Self> { if idx >= M::BITS { return None; } Some(unsafe { Self::new_unchecked(idx) }) } /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. It must be in the /// range `0 .. M::BITS`. /// /// # Safety /// /// If `idx` is outside the range, then the produced value will cause errors /// and memory unsafety when used. #[inline] pub unsafe fn new_unchecked(idx: u8) -> Self { debug_assert!( idx < M::BITS, "Bit index {} cannot exceed type width {}", idx, M::BITS, ); Self { idx, _ty: PhantomData, } } /// Finds the destination bit a certain distance away from a starting bit. /// /// This produces the number of elements to move from the starting point, /// and then the bit index of the destination bit in the destination /// element. /// /// # Parameters /// /// - `self`: A bit index in some memory element, used as the starting /// position for the offset calculation. /// - `by`: The number of bits by which to move. Negative values move /// downwards in memory: towards index zero, then counting from index /// `M::MASK` to index zero in the next element lower in memory, repeating /// until arrival. Positive values move upwards in memory: towards index /// `M::MASK`, then counting from index zero to index `M::MASK` in the /// next element higher in memory, repeating until arrival. /// /// # Returns /// /// - `.0`: The number of elements by which to offset the caller’s element /// cursor. This value can be passed directly into [`ptr::offset`]. /// - `.1`: The bit index of the destination bit in the element selected by /// applying the `.0` pointer offset. /// /// # Safety /// /// `by` must not be far enough to cause the returned element offset value /// to, when applied to the original memory address via [`ptr::offset`], /// produce a reference out of bounds of the original allocation. This /// method has no way of checking this requirement. /// /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub(crate) fn offset(self, by: isize) -> (isize, Self) { let val = *self; /* Signed-add `*self` and the jump distance. Overflowing is the unlikely branch. The result is a bit index, and an overflow marker. `far` is permitted to be negative; this means that it is lower in memory than the origin bit. The number line has its origin at the front edge of the origin element, so `-1` is the *last* bit of the prior memory element. */ let (far, ovf) = by.overflowing_add(val as isize); // If the `isize` addition does not overflow, then the sum can be used // directly. if !ovf { // If `far` is in the origin element, then the jump moves zero // elements and produces `far` as an absolute index directly. if (0 .. M::BITS as isize).contains(&far) { (0, (far as u8).idx()) } /* Otherwise, downshift the bit distance to compute the number of elements moved in either direction, and mask to compute the absolute bit index in the destination element. */ else { (far >> M::INDX, (far as u8 & M::MASK).idx()) } } else { /* Overflowing `isize` addition happens to produce ordinary `usize` addition. In point of fact, `isize` addition and `usize` addition are the same machine instruction to perform the sum; it is merely the signed interpretation of the sum that differs. The sum can be recast back to `usize` without issue. */ let far = far as usize; // This is really only needed in order to prevent sign-extension of // the downshift; once shifted, the value can be safely re-signed. ((far >> M::INDX) as isize, (far as u8 & M::MASK).idx()) } } /// Computes the size of a span from `self` for `len` bits. /// /// Spans always extend upwards in memory. /// /// # Parameters /// /// - `self`: The starting bit position of the span. /// - `len`: The number of bits to include in the span. /// /// # Returns /// /// - `.0`: The number of elements of `M` included in the span. If `len` is /// `0`, this will be `0`; otherwise, it will be at least one. /// - `.1`: The index of the first dead bit *after* the span. If `self` and /// `len` are both `0`, this will be `0`; otherwise, it will be in the /// domain `1 ..= M::BITS`. /// /// # Notes /// /// This defers to [`BitTail::span`], because `BitTail` is a strict superset /// of `BitIdx` (it is `{ BitIdx | M::BITS }`), and spans frequently begin /// from the tail of a slice in this crate. The `offset` function is *not* /// implemented on `BitTail`, and remains on `BitIdx` because offsets can /// only be computed from bit addresses that exist. It does not make sense /// to compute the offset from a `M::BITS` tail. /// /// [`BitTail::span`]: struct.BitTail.html#method.span #[inline] pub(crate) fn span(self, len: usize) -> (usize, BitTail<M>) { unsafe { BitTail::new_unchecked(*self) }.span(len) } } impl<M> Binary for BitIdx<M> where M: BitMemory { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "0b{:0>1$b}", self.idx, M::INDX as usize) } } impl<M> Deref for BitIdx<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.idx } } #[cfg(feature = "serde")] impl<M> TryFrom<u8> for BitIdx<M> where M: BitMemory { type Error = &'static str; fn try_from(idx: u8) -> Result<Self, Self::Error> { Self::new(idx).ok_or( "Attempted to construct a `BitIdx` with an index out of range", ) } } /** Indicates a semantic index of a dead bit *beyond* a memory element. This type is equivalent to `BitIdx<M>`, except that it includes `M::BITS` in its domain. Instances of this type will only ever contain `0` when the span they describe is *empty*. Non-empty spans always cycle through the domain `1 ..= M::BITS`. This type cannot be used for indexing, and does not translate to `BitPos<M>`. This type has no behavior other than viewing its internal `u8` for arithmetic. # Type Parameters - `M`: The memory element type controlled by this tail. **/ // #[rustc_layout_scalar_valid_range_end(M::BITS + 1)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitTail<M> where M: BitMemory { /// Semantic index *after* an element. Constrained to `0 ..= M::BITS`. end: u8, /// Marker for the tailed type. _ty: PhantomData<M>, } impl<M> BitTail<M> where M: BitMemory
{ /// The termination index. pub const END: Self = Self { end: M::BITS, _ty: PhantomData, }; /// Mark that `end` is a tail index for a type. /// /// # Parameters /// /// - `end` must be in the range `0 ..= M::BITS`. pub(crate) unsafe fn new_unchecked(end: u8) -> Self { debug_assert!( end <= M::BITS, "Bit tail {} cannot surpass type width {}", end, M::BITS, ); Self { end, _ty: PhantomData, } } pub(crate) fn span(self, len: usize) -> (usize, Self) { let val = *self; debug_assert!( val <= M::BITS, "Tail out of range: {} overflows type width {}", val, M::BITS, ); if len == 0 { return (0, self); } let head = val & M::MASK; let bits_in_head = (M::BITS - head) as usize; if len <= bits_in_head { return (1, (head + len as u8).tail()); } let bits_after_head = len - bits_in_head; let elts = bits_after_head >> M::INDX; let tail = bits_after_head as u8 & M::MASK; let is_zero = (tail == 0) as u8; let edges = 2 - is_zero as usize; (elts + edges, ((is_zero << M::INDX) | tail).tail()) /* The above expression is the branchless equivalent of this structure: if tail == 0 { (elts + 1, M::BITS.tail()) } else { (elts + 2, tail.tail()) } */ } } impl<M> Deref for BitTail<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.end } } /** Indicates a real electrical index within an element. This type is produced by [`BitOrder`] implementors, and marks a specific electrical bit within a memory element, rather than [`BitIdx`]’s semantic bit. # Type Parameters - `M`: A `BitMemory` element which provides bounds-checking information. The [`new`] constructor uses [`M::BITS`] to ensure that constructed `BitPos` instances are always valid to use within `M` elements. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]: ../order/trait.BitOrder.html [`M::BITS`]: ../mem/trait.BitMemory.html#associatedconstant.BITS [`new`]: #method.new **/ // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitPos<M> where M: BitMemory { /// Electrical position within an element. Constrained to `0 .. M::BITS`. pos: u8, /// Marker for the positioned type. _ty: PhantomData<M>, } impl<M> BitPos<M> where M: BitMemory { /// Produce a new bit position marker at a valid position value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the range invariant is /// upheld. /// /// # Parameters /// /// - `pos`: The bit position value to encode. It must be in the range `0 .. /// M::BITS`. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(pos: u8) -> Self { assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produce a new bit position marker at any position value. /// /// # Safety /// /// The caller *must* ensure that `pos` is less than `M::BITS`. `BitOrder` /// implementations should prefer [`::new`], which panics on range failure. /// /// # Parameters /// /// - `pos`: The bit position value to encode. This must be in the range `0 /// .. M::BITS`. /// /// # Returns /// /// `pos` wrapped in the `BitPos` marker type. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`, but /// only in debug builds. It does not inspect `pos` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(pos: u8) -> Self { debug_assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produces a one-hot selector mask from a position value. /// /// This is equivalent to `1 << *self`. /// /// # Parameters /// /// - `self` /// /// # Returns /// /// A one-hot selector mask with the bit at `*self` set. #[inline] pub fn select(self) -> BitSel<M> { unsafe { BitSel::new_unchecked(M::ONE << *self) } } } impl<M> Deref for BitPos<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.pos } } /** Wrapper type indicating a one-hot encoding of a bit mask for an element. This type is produced by [`BitOrder`] implementations to speed up access to the underlying memory. It ensures that masks have exactly one set bit, and can safely be used as a mask for read/write access to memory. # Type Parameters - `M`: The storage type being masked. [`BitOrder`]: ../order/trait.BitOrder.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitSel<M> where M: BitMemory { /// Mask value. sel: M, } impl<M> BitSel<M> where M: BitMemory { /// Produce a new bit-mask wrapper around a one-hot mask value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the one-hot invariant is /// upheld. /// /// # Parameters /// /// - `mask`: The mask value to encode. This **must** have exactly one bit /// set high, and all others set low. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function unconditionally panics if `mask` has zero or multiple bits /// set high. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(sel: M) -> Self { assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } /// Produce a new bit-mask wrapper around any value. /// /// # Safety /// /// The caller *must* ensure that `mask` has exactly one bit set. `BitOrder` /// implementations should prefer [`::new`], which always panics on failure. /// /// # Parameters /// /// - `mask`: The mask value to encode. This must have exactly one bit set. /// Failure to uphold this requirement will introduce uncontrolled state /// contamination. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function panics if `mask` has zero or multiple bits set, only in /// debug builds. It does not inspect `mask` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(sel: M) -> Self { debug_assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } } impl<M> Deref for BitSel<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.sel } } /** A multi-bit selector mask. Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this mask type permits any number of bits to be set or unset. This is used to combine batch operations in an element. It is only constructed by accumulating [`BitPos`] or [`BitSel`] values. As `BitSel` is only constructed from `BitPos`, and `BitPos` is only constructed from [`BitIdx`] and [`BitOrder`], this enforces a chain of responsibility to prove that a given multimask is safe. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]: ../order/trait.BitOrder.html [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitMask<M> where M: BitMemory { /// A mask of any number of bits to modify. mask: M, } impl<M> BitMask<M> where M: BitMemory { /// A full mask. pub const ALL: Self = Self { mask: M::ALL }; /// An empty mask. pub const ZERO: Self = Self { mask: M::ZERO }; /// Wraps a value as a bitmask. /// /// # Safety /// /// The caller must ensure that the mask value is correct in the caller’s /// provenance. /// /// # Parameters /// /// - `mask`: Any integer, to be reïnterpreted as a bitmask. /// /// # Returns /// /// The `mask` value as a bitmask. pub fn new(mask: M) -> Self { Self { mask } } } impl<M> Product<BitPos<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).product() } } impl<M> Product<BitSel<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ALL, BitAnd::bitand) } } /// Enable accumulation of a multi-bit mask from a sequence of position values. impl<M> Sum<BitPos<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).sum() } } /// Enable accumulation of a multi-bit mask from a sequence of selector masks. impl<M> Sum<BitSel<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ZERO, BitOr::bitor) } } impl<M> BitAnd<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: M) -> Self { Self { mask: self.mask & rhs, } } } impl<M> BitAnd<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitPos<M>) -> Self { self & rhs.select() } } impl<M> BitAnd<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask & rhs.sel, } } } impl<M> BitOr<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: M) -> Self { Self { mask: self.mask | rhs, } } } /// Insert a position value into a multimask. impl<M> BitOr<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitPos<M>) -> Self { self | rhs.select() } } /// Insert a single selector into a multimask. impl<M> BitOr<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask | rhs.sel, } } } impl<M> Deref for BitMask<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.mask } } impl<M> Not for BitMask<M> where M: BitMemory { type Output = Self; fn not(self) -> Self { Self { mask: !self.mask } } } /** Internal convenience trait for wrapping numbers with appropriate markers. This trait must only be used on values that are known to be valid for their context. It provides an internal-only shorthand for wrapping integer literals and known-good values in marker types. It is only implemented on `u8`. **/ pub(crate) trait Indexable { /// Wraps a value as a `BitIdx<M>`. fn idx<M>(self) -> BitIdx<M> where M: BitMemory; /// Wraps a value as a `BitTail<M>`. fn tail<M>(self) -> BitTail<M> where M: BitMemory; /// Wraps a value as a `BitPos<M>`. fn pos<M>(self) -> BitPos<M> where M: BitMemory; } impl Indexable for u8 { fn idx<M>(self) -> BitIdx<M> where M: BitMemory { unsafe { BitIdx::<M>::new_unchecked(self) } } fn tail<M>(self) -> BitTail<M> where M: BitMemory { unsafe { BitTail::<M>::new_unchecked(self) } } fn pos<M>(self) -> BitPos<M> where M: BitMemory { unsafe { BitPos::<M>::new_unchecked(self) } } } #[cfg(test)] mod tests { use super::*; #[test] fn jump_far_up() { // isize::max_value() is 0x7f...ff, so the result bit will be one less // than the start bit. for n in 1 .. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, (isize::max_value() >> u8::INDX) + 1); assert_eq!(*bit, n - 1); } let (elt, bit) = 0u8.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, isize::max_value() >> u8::INDX); assert_eq!(*bit, 7); } #[test] fn jump_far_down() { // isize::min_value() is 0x80...00, so the result bit will be equal to // the start bit for n in 0 .. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::min_value()); assert_eq!(elt, isize::min_value() >> u8::INDX); assert_eq!(*bit, n); } } }
random_line_split
transaction.rs
use std::{ collections::HashMap, future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, }; use futures::{ stream::{FuturesUnordered, StreamExt}, FutureExt, }; use tower::{Service, ServiceExt}; use tracing::Instrument; use zebra_chain::{ block, parameters::{Network, NetworkUpgrade}, transaction::{self, HashType, Transaction}, transparent, }; use zebra_script::CachedFfiTransaction; use zebra_state as zs; use crate::{error::TransactionError, primitives, script, BoxError}; mod check; /// Asynchronous transaction verification. #[derive(Debug, Clone)] pub struct Verifier<ZS> { network: Network, script_verifier: script::Verifier<ZS>, // spend_verifier: groth16::Verifier, // output_verifier: groth16::Verifier, // joinsplit_verifier: groth16::Verifier, } impl<ZS> Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS::Future: Send + 'static, { // XXX: how should this struct be constructed? pub fn new(network: Network, script_verifier: script::Verifier<ZS>) -> Self { // let (spend_verifier, output_verifier, joinsplit_verifier) = todo!(); Self { network, script_verifier, // spend_verifier, // output_verifier, // joinsplit_verifier, } } } /// Specifies whether a transaction should be verified as part of a block or as /// part of the mempool. /// /// Transaction verification has slightly different consensus rules, depending on /// whether the transaction is to be included in a block on in the mempool. #[allow(dead_code)] pub enum Request { /// Verify the supplied transaction as part of a block. Block { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// The height of the block containing this transaction, used to /// determine the applicable network upgrade. height: block::Height, }, /// Verify the supplied transaction as part of the mempool. Mempool { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// Bug: this field should be the next block height, because some /// consensus rules depend on the exact height. See #1683. upgrade: NetworkUpgrade, }, } impl<ZS> Service<Request> for Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS::Future: Send + 'static, { type Response = transaction::Hash; type Error = TransactionError; type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } // TODO: break up each chunk into its own method fn call(&mut self, req: Request) -> Self::Future
{ let is_mempool = match req { Request::Block { .. } => false, Request::Mempool { .. } => true, }; if is_mempool { // XXX determine exactly which rules apply to mempool transactions unimplemented!(); } let (tx, known_utxos, upgrade) = match req { Request::Block { transaction, known_utxos, height, } => { let upgrade = NetworkUpgrade::current(self.network, height); (transaction, known_utxos, upgrade) } Request::Mempool { transaction, known_utxos, upgrade, } => (transaction, known_utxos, upgrade), }; let mut spend_verifier = primitives::groth16::SPEND_VERIFIER.clone(); let mut output_verifier = primitives::groth16::OUTPUT_VERIFIER.clone(); let mut ed25519_verifier = primitives::ed25519::VERIFIER.clone(); let mut redjubjub_verifier = primitives::redjubjub::VERIFIER.clone(); let mut script_verifier = self.script_verifier.clone(); let span = tracing::debug_span!("tx", hash = %tx.hash()); async move { tracing::trace!(?tx); match &*tx { Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } => { tracing::debug!(?tx, "got transaction with wrong version"); Err(TransactionError::WrongVersion) } Transaction::V4 { inputs, // outputs, // lock_time, // expiry_height, joinsplit_data, sapling_shielded_data, .. } => { // A set of asynchronous checks which must all succeed. // We finish by waiting on these below. let mut async_checks = FuturesUnordered::new(); // Do basic checks first check::has_inputs_and_outputs(&tx)?; // Handle transparent inputs and outputs. if tx.is_coinbase() { check::coinbase_tx_no_joinsplit_or_spend(&tx)?; } else { // feed all of the inputs to the script and shielded verifiers // the script_verifier also checks transparent sighashes, using its own implementation let cached_ffi_transaction = Arc::new(CachedFfiTransaction::new(tx.clone())); for input_index in 0..inputs.len() { let rsp = script_verifier.ready_and().await?.call(script::Request { upgrade, known_utxos: known_utxos.clone(), cached_ffi_transaction: cached_ffi_transaction.clone(), input_index, }); async_checks.push(rsp); } } let shielded_sighash = tx.sighash( upgrade, HashType::ALL, None, ); if let Some(joinsplit_data) = joinsplit_data { // XXX create a method on JoinSplitData // that prepares groth16::Items with the correct proofs // and proof inputs, handling interstitial treestates // correctly. // Then, pass those items to self.joinsplit to verify them. // Consensus rule: The joinSplitSig MUST represent a // valid signature, under joinSplitPubKey, of the // sighash. // // Queue the validation of the JoinSplit signature while // adding the resulting future to our collection of // async checks that (at a minimum) must pass for the // transaction to verify. // // https://zips.z.cash/protocol/protocol.pdf#sproutnonmalleability // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus let rsp = ed25519_verifier .ready_and() .await? .call((joinsplit_data.pub_key, joinsplit_data.sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } if let Some(shielded_data) = sapling_shielded_data { check::shielded_balances_match(&shielded_data)?; for spend in shielded_data.spends_per_anchor() { // Consensus rule: cv and rk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]rk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#spenddesc check::spend_cv_rk_not_small_order(&spend)?; // Consensus rule: The proof π_ZKSpend MUST be valid // given a primary input formed from the other // fields except spendAuthSig. // // Queue the verification of the Groth16 spend proof // for each Spend description while adding the // resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let spend_rsp = spend_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(&spend).into()); async_checks.push(spend_rsp.boxed()); // Consensus rule: The spend authorization signature // MUST be a valid SpendAuthSig signature over // SigHash using rk as the validating key. // // Queue the validation of the RedJubjub spend // authorization signature for each Spend // description while adding the resulting future to // our collection of async checks that (at a // minimum) must pass for the transaction to verify. let rsp = redjubjub_verifier .ready_and() .await? .call((spend.rk, spend.spend_auth_sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } for output in shielded_data.outputs() { // Consensus rule: cv and wpk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]wpk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#outputdesc check::output_cv_epk_not_small_order(output)?; // Consensus rule: The proof π_ZKOutput MUST be // valid given a primary input formed from the other // fields except C^enc and C^out. // // Queue the verification of the Groth16 output // proof for each Output description while adding // the resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let output_rsp = output_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(output).into()); async_checks.push(output_rsp.boxed()); } let bvk = shielded_data.binding_verification_key(); // TODO: enable async verification and remove this block - #1939 { let item: zebra_chain::primitives::redjubjub::batch::Item = (bvk, shielded_data.binding_sig, &shielded_sighash).into(); item.verify_single().unwrap_or_else(|binding_sig_error| { let binding_sig_error = binding_sig_error.to_string(); tracing::warn!(%binding_sig_error, "ignoring"); metrics::counter!("zebra.error.sapling.binding", 1, "kind" => binding_sig_error); }); // Ignore errors until binding signatures are fixed //.map_err(|e| BoxError::from(Box::new(e)))?; } let _rsp = redjubjub_verifier .ready_and() .await? .call((bvk, shielded_data.binding_sig, &shielded_sighash).into()) .boxed(); // TODO: stop ignoring binding signature errors - #1939 // async_checks.push(rsp); } // Finally, wait for all asynchronous checks to complete // successfully, or fail verification if they error. while let Some(check) = async_checks.next().await { tracing::trace!(?check, remaining = async_checks.len()); check?; } Ok(tx.hash()) } Transaction::V5 { .. } => { unimplemented!("v5 transaction validation as specified in ZIP-216, ZIP-224, ZIP-225, and ZIP-244") } } } .instrument(span) .boxed() } }
identifier_body
transaction.rs
use std::{ collections::HashMap, future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, }; use futures::{ stream::{FuturesUnordered, StreamExt}, FutureExt, }; use tower::{Service, ServiceExt}; use tracing::Instrument; use zebra_chain::{ block, parameters::{Network, NetworkUpgrade}, transaction::{self, HashType, Transaction}, transparent, }; use zebra_script::CachedFfiTransaction; use zebra_state as zs; use crate::{error::TransactionError, primitives, script, BoxError}; mod check; /// Asynchronous transaction verification. #[derive(Debug, Clone)] pub struct Verifier<ZS> { network: Network, script_verifier: script::Verifier<ZS>, // spend_verifier: groth16::Verifier, // output_verifier: groth16::Verifier, // joinsplit_verifier: groth16::Verifier, } impl<ZS> Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS::Future: Send + 'static, { // XXX: how should this struct be constructed? pub fn new(network: Network, script_verifier: script::Verifier<ZS>) -> Self { // let (spend_verifier, output_verifier, joinsplit_verifier) = todo!(); Self { network, script_verifier, // spend_verifier, // output_verifier, // joinsplit_verifier, } } } /// Specifies whether a transaction should be verified as part of a block or as /// part of the mempool. /// /// Transaction verification has slightly different consensus rules, depending on /// whether the transaction is to be included in a block on in the mempool. #[allow(dead_code)] pub enum Request { /// Verify the supplied transaction as part of a block. Block { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// The height of the block containing this transaction, used to /// determine the applicable network upgrade. height: block::Height, }, /// Verify the supplied transaction as part of the mempool. Mempool { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// Bug: this field should be the next block height, because some /// consensus rules depend on the exact height. See #1683. upgrade: NetworkUpgrade, }, } impl<ZS> Service<Request> for Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS::Future: Send + 'static, { type Response = transaction::Hash; type Error = TransactionError; type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } // TODO: break up each chunk into its own method fn call(&mut self, req: Request) -> Self::Future { let is_mempool = match req { Request::Block { .. } => false, Request::Mempool { .. } => true, }; if is_mempool { // XXX determine exactly which rules apply to mempool transactions unimplemented!(); } let (tx, known_utxos, upgrade) = match req { Request::Block { transaction, known_utxos, height, } => { let upgrade = NetworkUpgrade::current(self.network, height); (transaction, known_utxos, upgrade) } Request::Mempool { transaction, known_utxos, upgrade, } => (transaction, known_utxos, upgrade), }; let mut spend_verifier = primitives::groth16::SPEND_VERIFIER.clone(); let mut output_verifier = primitives::groth16::OUTPUT_VERIFIER.clone(); let mut ed25519_verifier = primitives::ed25519::VERIFIER.clone(); let mut redjubjub_verifier = primitives::redjubjub::VERIFIER.clone(); let mut script_verifier = self.script_verifier.clone(); let span = tracing::debug_span!("tx", hash = %tx.hash()); async move { tracing::trace!(?tx); match &*tx { Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } => { tracing::debug!(?tx, "got transaction with wrong version"); Err(TransactionError::WrongVersion) } Transaction::V4 { inputs, // outputs, // lock_time, // expiry_height, joinsplit_data, sapling_shielded_data, .. } => { // A set of asynchronous checks which must all succeed. // We finish by waiting on these below. let mut async_checks = FuturesUnordered::new(); // Do basic checks first check::has_inputs_and_outputs(&tx)?; // Handle transparent inputs and outputs. if tx.is_coinbase() { check::coinbase_tx_no_joinsplit_or_spend(&tx)?; } else { // feed all of the inputs to the script and shielded verifiers // the script_verifier also checks transparent sighashes, using its own implementation let cached_ffi_transaction = Arc::new(CachedFfiTransaction::new(tx.clone())); for input_index in 0..inputs.len() { let rsp = script_verifier.ready_and().await?.call(script::Request { upgrade, known_utxos: known_utxos.clone(), cached_ffi_transaction: cached_ffi_transaction.clone(), input_index, }); async_checks.push(rsp); } } let shielded_sighash = tx.sighash( upgrade, HashType::ALL, None, ); if let Some(joinsplit_data) = joinsplit_data { // XXX create a method on JoinSplitData // that prepares groth16::Items with the correct proofs // and proof inputs, handling interstitial treestates // correctly. // Then, pass those items to self.joinsplit to verify them. // Consensus rule: The joinSplitSig MUST represent a // valid signature, under joinSplitPubKey, of the // sighash. // // Queue the validation of the JoinSplit signature while
// https://zips.z.cash/protocol/protocol.pdf#sproutnonmalleability // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus let rsp = ed25519_verifier .ready_and() .await? .call((joinsplit_data.pub_key, joinsplit_data.sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } if let Some(shielded_data) = sapling_shielded_data { check::shielded_balances_match(&shielded_data)?; for spend in shielded_data.spends_per_anchor() { // Consensus rule: cv and rk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]rk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#spenddesc check::spend_cv_rk_not_small_order(&spend)?; // Consensus rule: The proof π_ZKSpend MUST be valid // given a primary input formed from the other // fields except spendAuthSig. // // Queue the verification of the Groth16 spend proof // for each Spend description while adding the // resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let spend_rsp = spend_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(&spend).into()); async_checks.push(spend_rsp.boxed()); // Consensus rule: The spend authorization signature // MUST be a valid SpendAuthSig signature over // SigHash using rk as the validating key. // // Queue the validation of the RedJubjub spend // authorization signature for each Spend // description while adding the resulting future to // our collection of async checks that (at a // minimum) must pass for the transaction to verify. let rsp = redjubjub_verifier .ready_and() .await? .call((spend.rk, spend.spend_auth_sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } for output in shielded_data.outputs() { // Consensus rule: cv and wpk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]wpk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#outputdesc check::output_cv_epk_not_small_order(output)?; // Consensus rule: The proof π_ZKOutput MUST be // valid given a primary input formed from the other // fields except C^enc and C^out. // // Queue the verification of the Groth16 output // proof for each Output description while adding // the resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let output_rsp = output_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(output).into()); async_checks.push(output_rsp.boxed()); } let bvk = shielded_data.binding_verification_key(); // TODO: enable async verification and remove this block - #1939 { let item: zebra_chain::primitives::redjubjub::batch::Item = (bvk, shielded_data.binding_sig, &shielded_sighash).into(); item.verify_single().unwrap_or_else(|binding_sig_error| { let binding_sig_error = binding_sig_error.to_string(); tracing::warn!(%binding_sig_error, "ignoring"); metrics::counter!("zebra.error.sapling.binding", 1, "kind" => binding_sig_error); }); // Ignore errors until binding signatures are fixed //.map_err(|e| BoxError::from(Box::new(e)))?; } let _rsp = redjubjub_verifier .ready_and() .await? .call((bvk, shielded_data.binding_sig, &shielded_sighash).into()) .boxed(); // TODO: stop ignoring binding signature errors - #1939 // async_checks.push(rsp); } // Finally, wait for all asynchronous checks to complete // successfully, or fail verification if they error. while let Some(check) = async_checks.next().await { tracing::trace!(?check, remaining = async_checks.len()); check?; } Ok(tx.hash()) } Transaction::V5 { .. } => { unimplemented!("v5 transaction validation as specified in ZIP-216, ZIP-224, ZIP-225, and ZIP-244") } } } .instrument(span) .boxed() } }
// adding the resulting future to our collection of // async checks that (at a minimum) must pass for the // transaction to verify. //
random_line_split
transaction.rs
use std::{ collections::HashMap, future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, }; use futures::{ stream::{FuturesUnordered, StreamExt}, FutureExt, }; use tower::{Service, ServiceExt}; use tracing::Instrument; use zebra_chain::{ block, parameters::{Network, NetworkUpgrade}, transaction::{self, HashType, Transaction}, transparent, }; use zebra_script::CachedFfiTransaction; use zebra_state as zs; use crate::{error::TransactionError, primitives, script, BoxError}; mod check; /// Asynchronous transaction verification. #[derive(Debug, Clone)] pub struct Verifier<ZS> { network: Network, script_verifier: script::Verifier<ZS>, // spend_verifier: groth16::Verifier, // output_verifier: groth16::Verifier, // joinsplit_verifier: groth16::Verifier, } impl<ZS> Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS::Future: Send + 'static, { // XXX: how should this struct be constructed? pub fn new(network: Network, script_verifier: script::Verifier<ZS>) -> Self { // let (spend_verifier, output_verifier, joinsplit_verifier) = todo!(); Self { network, script_verifier, // spend_verifier, // output_verifier, // joinsplit_verifier, } } } /// Specifies whether a transaction should be verified as part of a block or as /// part of the mempool. /// /// Transaction verification has slightly different consensus rules, depending on /// whether the transaction is to be included in a block on in the mempool. #[allow(dead_code)] pub enum
{ /// Verify the supplied transaction as part of a block. Block { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// The height of the block containing this transaction, used to /// determine the applicable network upgrade. height: block::Height, }, /// Verify the supplied transaction as part of the mempool. Mempool { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// Bug: this field should be the next block height, because some /// consensus rules depend on the exact height. See #1683. upgrade: NetworkUpgrade, }, } impl<ZS> Service<Request> for Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS::Future: Send + 'static, { type Response = transaction::Hash; type Error = TransactionError; type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } // TODO: break up each chunk into its own method fn call(&mut self, req: Request) -> Self::Future { let is_mempool = match req { Request::Block { .. } => false, Request::Mempool { .. } => true, }; if is_mempool { // XXX determine exactly which rules apply to mempool transactions unimplemented!(); } let (tx, known_utxos, upgrade) = match req { Request::Block { transaction, known_utxos, height, } => { let upgrade = NetworkUpgrade::current(self.network, height); (transaction, known_utxos, upgrade) } Request::Mempool { transaction, known_utxos, upgrade, } => (transaction, known_utxos, upgrade), }; let mut spend_verifier = primitives::groth16::SPEND_VERIFIER.clone(); let mut output_verifier = primitives::groth16::OUTPUT_VERIFIER.clone(); let mut ed25519_verifier = primitives::ed25519::VERIFIER.clone(); let mut redjubjub_verifier = primitives::redjubjub::VERIFIER.clone(); let mut script_verifier = self.script_verifier.clone(); let span = tracing::debug_span!("tx", hash = %tx.hash()); async move { tracing::trace!(?tx); match &*tx { Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } => { tracing::debug!(?tx, "got transaction with wrong version"); Err(TransactionError::WrongVersion) } Transaction::V4 { inputs, // outputs, // lock_time, // expiry_height, joinsplit_data, sapling_shielded_data, .. } => { // A set of asynchronous checks which must all succeed. // We finish by waiting on these below. let mut async_checks = FuturesUnordered::new(); // Do basic checks first check::has_inputs_and_outputs(&tx)?; // Handle transparent inputs and outputs. if tx.is_coinbase() { check::coinbase_tx_no_joinsplit_or_spend(&tx)?; } else { // feed all of the inputs to the script and shielded verifiers // the script_verifier also checks transparent sighashes, using its own implementation let cached_ffi_transaction = Arc::new(CachedFfiTransaction::new(tx.clone())); for input_index in 0..inputs.len() { let rsp = script_verifier.ready_and().await?.call(script::Request { upgrade, known_utxos: known_utxos.clone(), cached_ffi_transaction: cached_ffi_transaction.clone(), input_index, }); async_checks.push(rsp); } } let shielded_sighash = tx.sighash( upgrade, HashType::ALL, None, ); if let Some(joinsplit_data) = joinsplit_data { // XXX create a method on JoinSplitData // that prepares groth16::Items with the correct proofs // and proof inputs, handling interstitial treestates // correctly. // Then, pass those items to self.joinsplit to verify them. // Consensus rule: The joinSplitSig MUST represent a // valid signature, under joinSplitPubKey, of the // sighash. // // Queue the validation of the JoinSplit signature while // adding the resulting future to our collection of // async checks that (at a minimum) must pass for the // transaction to verify. // // https://zips.z.cash/protocol/protocol.pdf#sproutnonmalleability // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus let rsp = ed25519_verifier .ready_and() .await? .call((joinsplit_data.pub_key, joinsplit_data.sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } if let Some(shielded_data) = sapling_shielded_data { check::shielded_balances_match(&shielded_data)?; for spend in shielded_data.spends_per_anchor() { // Consensus rule: cv and rk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]rk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#spenddesc check::spend_cv_rk_not_small_order(&spend)?; // Consensus rule: The proof π_ZKSpend MUST be valid // given a primary input formed from the other // fields except spendAuthSig. // // Queue the verification of the Groth16 spend proof // for each Spend description while adding the // resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let spend_rsp = spend_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(&spend).into()); async_checks.push(spend_rsp.boxed()); // Consensus rule: The spend authorization signature // MUST be a valid SpendAuthSig signature over // SigHash using rk as the validating key. // // Queue the validation of the RedJubjub spend // authorization signature for each Spend // description while adding the resulting future to // our collection of async checks that (at a // minimum) must pass for the transaction to verify. let rsp = redjubjub_verifier .ready_and() .await? .call((spend.rk, spend.spend_auth_sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } for output in shielded_data.outputs() { // Consensus rule: cv and wpk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]wpk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#outputdesc check::output_cv_epk_not_small_order(output)?; // Consensus rule: The proof π_ZKOutput MUST be // valid given a primary input formed from the other // fields except C^enc and C^out. // // Queue the verification of the Groth16 output // proof for each Output description while adding // the resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let output_rsp = output_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(output).into()); async_checks.push(output_rsp.boxed()); } let bvk = shielded_data.binding_verification_key(); // TODO: enable async verification and remove this block - #1939 { let item: zebra_chain::primitives::redjubjub::batch::Item = (bvk, shielded_data.binding_sig, &shielded_sighash).into(); item.verify_single().unwrap_or_else(|binding_sig_error| { let binding_sig_error = binding_sig_error.to_string(); tracing::warn!(%binding_sig_error, "ignoring"); metrics::counter!("zebra.error.sapling.binding", 1, "kind" => binding_sig_error); }); // Ignore errors until binding signatures are fixed //.map_err(|e| BoxError::from(Box::new(e)))?; } let _rsp = redjubjub_verifier .ready_and() .await? .call((bvk, shielded_data.binding_sig, &shielded_sighash).into()) .boxed(); // TODO: stop ignoring binding signature errors - #1939 // async_checks.push(rsp); } // Finally, wait for all asynchronous checks to complete // successfully, or fail verification if they error. while let Some(check) = async_checks.next().await { tracing::trace!(?check, remaining = async_checks.len()); check?; } Ok(tx.hash()) } Transaction::V5 { .. } => { unimplemented!("v5 transaction validation as specified in ZIP-216, ZIP-224, ZIP-225, and ZIP-244") } } } .instrument(span) .boxed() } }
Request
identifier_name
ReservoirChainGen.py
''' Created on Jan 2, 2018 @author: dduque Generates an instance of the hydro scheduling problem for a chain of reservoirs. Outputs: Autoregressive matrix for each time period ''' if __name__ == '__main__': from os import path import sys sys.path.append(path.abspath('/Users/dduque/Dropbox/WORKSPACE/SDDP')) sys.path.append(path.abspath('/Users/dduque/Dropbox/WORKSPACE/SDDP/HydroExamples')) import numpy as np import scipy.sparse as sp import matplotlib.pyplot as plt import pickle from HydroModel import hydro_path def gen_instance(num_reservoirs=1000, up_stream_dep=1, T=12, lag=1, num_outcomes=30, simulate=False): ''' Generate a random instance consisting of: - Autoregresive matrices (stored as dictionaries) - Initial inflow vector (matrix for lag > 0) - Innovations of the autoregressive process ''' np.random.seed(0) season = 12 R_matrices = {t: {l: {i: {} for i in range(num_reservoirs)} for l in range(1, lag + 1)} for t in range(0, T)} for t in range(T): for l in range(1, lag + 1): for i in range(num_reservoirs): for j in range(up_stream_dep + 1): if i - j >= 0: if (t < season): #var = 0.2 if i>num_reservoirs/2 else 0.6 #R_matrices[t][l][i][i-j]=np.random.normal(0, var) #for nr=10 experiments
else: R_matrices[t][l][i][i - j] = R_matrices[t - season][l][i][i - j] print(R_matrices[2][1]) np.random.seed(1234) inflow_t0 = [[np.around(np.random.uniform(0, 30), 3) for i in range(num_reservoirs)] for l in range(lag + 1)] print(np.array(inflow_t0)[:, 0:5]) #=========================================================================== # RHS_noise = np.zeros(shape=(num_reservoirs,num_outcomes)) # #mu_s = np.random.uniform(5,10,num_reservoirs) # mu_s = np.random.uniform(0.5,1.5,num_reservoirs) # sig_s = np.random.uniform(0.2,1.2,num_reservoirs) # for i in range(num_reservoirs): # #RHS_noise[i] = np.sort(np.random.normal(mu_s[i],mu_s[i]/3,num_outcomes)) # #RHS_noise[i] = np.random.uniform(-5,10, num_outcomes) # #RHS_noise[i] = np.random.normal(8,np.log(num_reservoirs-i)+1, num_outcomes) # RHS_noise[i] = np.random.lognormal(mu_s[i],sig_s[i],num_outcomes) #nr 10 and nr 100 # #RHS_noise[i] = np.sort(np.random.lognormal(mu_s[i],0.5,num_outcomes)) # print(np.max(RHS_noise, 1)) #=========================================================================== RHS_noise = np.zeros(shape=(num_reservoirs, num_outcomes, T)) #reservoirs_mean = np.random.uniform(.50,1.1,size=num_reservoirs) reservoirs_mean = np.random.uniform(5, 20, size=num_reservoirs) reservoirs_mean_shift = reservoirs_mean * 0.5 r_CV = np.random.uniform(0.2, 0.5, size=num_reservoirs) print(reservoirs_mean) print(r_CV) for t in range(T): #mean_t = np.minimum(np.array([1.5 - round(0.1 * np.sin(0.5 * (t - 6)), 2) for i in range(num_reservoirs)]),1.5) #sig_t = np.array([1 + round(0.3 * np.sin(0.5 * (t - 5)), 2) for i in range(num_reservoirs)]) r_CV = np.random.uniform(0.7, 2, size=num_reservoirs) mean_t = np.minimum( np.array([ reservoirs_mean[i] - round(reservoirs_mean_shift[i] * np.sin(0.5 * (t - 6)), 2) for i in range(num_reservoirs) ]), 100) sig_t = r_CV * mean_t #sig_t = np.array([10 + round(5 * np.sin(0.5 * (t - 2)), 2) for i in range(num_reservoirs)]) cov_mat = np.zeros((nr, nr)) for i in range(nr): for j in range(nr): if i == j: cov_mat[i, j] = sig_t[i]**2 else: cov_mat[i, j] = sig_t[i] * sig_t[j] * np.random.uniform(0.3, 0.95) reservoirs_mu = np.random.uniform(2, 20, size=num_reservoirs) #RHS_corralated = reservoirs_mu+ np.exp(np.random.multivariate_normal(mean_t, cov_mat, size= num_outcomes)) RHS_corralated = np.random.multivariate_normal(mean_t, cov_mat, size=num_outcomes) if t < season: RHS_noise[:, :, t] = RHS_corralated.transpose() else: RHS_noise[:, :, t] = RHS_noise[:, :, t - season] print(t, ': ', mean_t[0:10], ' ', sig_t[0:10]) #======================================================================= # mu_s = np.random.uniform(mean_t , mean_t, num_reservoirs) # sig_s = np.random.uniform(sig_t*0.5 , sig_t, num_reservoirs) # #loc_s = np.exp(mu_s+0.5*sig_s**2) # for i in range(num_reservoirs): # if t<season: # #RHS_noise[i,:,t] = np.around(np.random.lognormal(mu_s[i],sig_s[i],num_outcomes),3) #nr 10 and nr 100 # mu_i = np.exp(mu_s[i]+0.5*sig_s[i]**2) # var_i = mu_i*sig_s[i] # RHS_noise[i,:,t] = (np.random.normal(mu_i, var_i,num_outcomes)) #nr 10 and nr 100 # else: # RHS_noise[i,:,t] = RHS_noise[i,:,t-season] #======================================================================= if simulate: simulate_AR_model(R_matrices, inflow_t0, RHS_noise, T, num_reservoirs, lag) instance = HydroRndInstance(R_matrices, inflow_t0, RHS_noise) return instance def simulate_AR_model(R_matrices, inflow_t0, RHS_noise, T, nr, lag): prices = [18 + round(5 * np.sin(0.5 * (x - 2)), 2) for x in range(0, T)] num_reservoirs = nr plt.figure(1) num_reps = 500 res_ref = [0, 1, 2] np.random.seed(res_ref) mean_res_ref = {rr: np.zeros((T)) for rr in res_ref} for replica in range(num_reps): plot_data = {rr: [inflow_t0[-1][rr]] for rr in res_ref} inflows = list(inflow_t0) for t in range(1, T): #innovation = np.random.triangular(-1, mu_ref, 4) outcome = np.random.randint(len(RHS_noise[0, :, t])) new_inflows = [0] * num_reservoirs for l in range(1, lag + 1): for i in range(num_reservoirs): new_inflows[i] += RHS_noise[i, outcome, t] for j in range(num_reservoirs): if (j in R_matrices[t][l][i]): new_inflows[i] += R_matrices[t][l][i][j] * inflows[-l][j] inflows.append(new_inflows) inflows.pop(0) for rr in res_ref: plot_data[rr].append(inflows[-1][rr]) for (i, rr) in enumerate(res_ref): mean_res_ref[rr] = mean_res_ref[rr] + np.array(plot_data[rr]) plotpos = int('%i1%i' % (len(res_ref), i + 1)) plt.subplot(plotpos) plt.plot(plot_data[rr], alpha=0.5) data_replica = np.array([plot_data[r1] for r1 in res_ref]) for (i, rr) in enumerate(res_ref): mean_res_ref[rr] = mean_res_ref[rr] / num_reps plotpos = int('%i1%i' % (len(res_ref), i + 1)) plt.subplot(plotpos) plt.plot(mean_res_ref[rr], linewidth=2, color='black', linestyle='--') plt.plot(prices, linewidth=2, color='red', linestyle='--') plt.grid() plt.show() class HydroRndInstance(): def __init__(self, ar_matrices, initial_inflows, RHS_noise): self.ar_matrices = ar_matrices self.inital_inflows = initial_inflows self.RHS_noise = RHS_noise def read_instance(file_name='hydro_rnd_instance_R200_UD1_T120_LAG1_OUT10K_AR.pkl', lag=None): ''' Read instance from file and returns a HydroRndInstance object. ''' file_name_path = hydro_path + '/data/' + file_name with open(file_name_path, 'rb') as input: instance = pickle.load(input) return instance if __name__ == '__main__': nr = 30 ud = 0 T = 48 file_name_pat = None # for lag in [1]:#range(1,2): # file_name_path = hydro_path+'/data/hydro_rnd_instance_R%i_UD%i_T%i_LAG%i_OUT10K_AR1.pkl' %(nr,ud,T,lag) # print(file_name_path) # with open(file_name_path, 'wb') as output: # instance = gen_instance(num_reservoirs=nr, up_stream_dep=ud, T=T, lag = lag, num_outcomes=10000, simulate=True) # pickle.dump(instance, output, pickle.HIGHEST_PROTOCOL) #instance = gen_instance(num_reservoirs=nr, up_stream_dep=ud, T=T, lag = 1, num_outcomes= 10000, simulate= True) hydro_instance = read_instance('hydro_rnd_instance_R%i_UD%i_T%i_LAG%i_OUT10K_AR1.pkl' % (nr, ud, T, 1), lag=1) matrix = hydro_instance.ar_matrices RHSnoise_density = hydro_instance.RHS_noise inflow_t0 = hydro_instance.inital_inflows simulate_AR_model(matrix, inflow_t0, RHSnoise_density, 24, 10, 1)
R_matrices[t][l][i][i - j] = np.round(np.random.normal(1.0, 0.3), 3) #for nr=30 experiments R_matrices[t][l][i][i - j] = 0.5 + 0.5 * np.sin(t) #for nr=30 experiments #R_matrices[t][l][i][i-j]=np.random.normal(0.1, (1.0/(lag*up_stream_dep+1))) #R_matrices[t][l][i][i-j]=np.random.uniform(0,0.3) #R_matrices[t][l][i][i-j]=np.random.uniform(-1/(up_stream_dep+lag),1/(up_stream_dep+lag)) #for nr=100 #=================================================== # if t>0: # R_matrices[t][l][i][i-j]=np.abs(np.random.normal(0.01, (1.0/(lag*up_stream_dep+1)))) # #R_matrices[t][l][i][i-j]=np.random.uniform(0.0/(up_stream_dep+1)**lag,(1/(up_stream_dep+1))/lag) # #R_matrices[t][l][i][i-j]=(np.random.normal(0.0, (1.0/(up_stream_dep+1))/lag) + R_matrices[t-1][l][i][i-j])/2.0 # else: # R_matrices[t][l][i][i-j]=np.random.normal(0.01, (1.0/(lag*up_stream_dep+1))) #===================================================
conditional_block
ReservoirChainGen.py
''' Created on Jan 2, 2018 @author: dduque Generates an instance of the hydro scheduling problem for a chain of reservoirs. Outputs: Autoregressive matrix for each time period ''' if __name__ == '__main__': from os import path import sys sys.path.append(path.abspath('/Users/dduque/Dropbox/WORKSPACE/SDDP')) sys.path.append(path.abspath('/Users/dduque/Dropbox/WORKSPACE/SDDP/HydroExamples')) import numpy as np import scipy.sparse as sp import matplotlib.pyplot as plt import pickle from HydroModel import hydro_path def gen_instance(num_reservoirs=1000, up_stream_dep=1, T=12, lag=1, num_outcomes=30, simulate=False): ''' Generate a random instance consisting of: - Autoregresive matrices (stored as dictionaries) - Initial inflow vector (matrix for lag > 0) - Innovations of the autoregressive process ''' np.random.seed(0) season = 12 R_matrices = {t: {l: {i: {} for i in range(num_reservoirs)} for l in range(1, lag + 1)} for t in range(0, T)} for t in range(T): for l in range(1, lag + 1): for i in range(num_reservoirs): for j in range(up_stream_dep + 1): if i - j >= 0: if (t < season): #var = 0.2 if i>num_reservoirs/2 else 0.6 #R_matrices[t][l][i][i-j]=np.random.normal(0, var) #for nr=10 experiments R_matrices[t][l][i][i - j] = np.round(np.random.normal(1.0, 0.3), 3) #for nr=30 experiments R_matrices[t][l][i][i - j] = 0.5 + 0.5 * np.sin(t) #for nr=30 experiments #R_matrices[t][l][i][i-j]=np.random.normal(0.1, (1.0/(lag*up_stream_dep+1))) #R_matrices[t][l][i][i-j]=np.random.uniform(0,0.3) #R_matrices[t][l][i][i-j]=np.random.uniform(-1/(up_stream_dep+lag),1/(up_stream_dep+lag)) #for nr=100 #=================================================== # if t>0: # R_matrices[t][l][i][i-j]=np.abs(np.random.normal(0.01, (1.0/(lag*up_stream_dep+1)))) # #R_matrices[t][l][i][i-j]=np.random.uniform(0.0/(up_stream_dep+1)**lag,(1/(up_stream_dep+1))/lag) # #R_matrices[t][l][i][i-j]=(np.random.normal(0.0, (1.0/(up_stream_dep+1))/lag) + R_matrices[t-1][l][i][i-j])/2.0 # else: # R_matrices[t][l][i][i-j]=np.random.normal(0.01, (1.0/(lag*up_stream_dep+1))) #=================================================== else: R_matrices[t][l][i][i - j] = R_matrices[t - season][l][i][i - j] print(R_matrices[2][1]) np.random.seed(1234) inflow_t0 = [[np.around(np.random.uniform(0, 30), 3) for i in range(num_reservoirs)] for l in range(lag + 1)] print(np.array(inflow_t0)[:, 0:5]) #=========================================================================== # RHS_noise = np.zeros(shape=(num_reservoirs,num_outcomes)) # #mu_s = np.random.uniform(5,10,num_reservoirs) # mu_s = np.random.uniform(0.5,1.5,num_reservoirs) # sig_s = np.random.uniform(0.2,1.2,num_reservoirs) # for i in range(num_reservoirs): # #RHS_noise[i] = np.sort(np.random.normal(mu_s[i],mu_s[i]/3,num_outcomes))
# RHS_noise[i] = np.random.lognormal(mu_s[i],sig_s[i],num_outcomes) #nr 10 and nr 100 # #RHS_noise[i] = np.sort(np.random.lognormal(mu_s[i],0.5,num_outcomes)) # print(np.max(RHS_noise, 1)) #=========================================================================== RHS_noise = np.zeros(shape=(num_reservoirs, num_outcomes, T)) #reservoirs_mean = np.random.uniform(.50,1.1,size=num_reservoirs) reservoirs_mean = np.random.uniform(5, 20, size=num_reservoirs) reservoirs_mean_shift = reservoirs_mean * 0.5 r_CV = np.random.uniform(0.2, 0.5, size=num_reservoirs) print(reservoirs_mean) print(r_CV) for t in range(T): #mean_t = np.minimum(np.array([1.5 - round(0.1 * np.sin(0.5 * (t - 6)), 2) for i in range(num_reservoirs)]),1.5) #sig_t = np.array([1 + round(0.3 * np.sin(0.5 * (t - 5)), 2) for i in range(num_reservoirs)]) r_CV = np.random.uniform(0.7, 2, size=num_reservoirs) mean_t = np.minimum( np.array([ reservoirs_mean[i] - round(reservoirs_mean_shift[i] * np.sin(0.5 * (t - 6)), 2) for i in range(num_reservoirs) ]), 100) sig_t = r_CV * mean_t #sig_t = np.array([10 + round(5 * np.sin(0.5 * (t - 2)), 2) for i in range(num_reservoirs)]) cov_mat = np.zeros((nr, nr)) for i in range(nr): for j in range(nr): if i == j: cov_mat[i, j] = sig_t[i]**2 else: cov_mat[i, j] = sig_t[i] * sig_t[j] * np.random.uniform(0.3, 0.95) reservoirs_mu = np.random.uniform(2, 20, size=num_reservoirs) #RHS_corralated = reservoirs_mu+ np.exp(np.random.multivariate_normal(mean_t, cov_mat, size= num_outcomes)) RHS_corralated = np.random.multivariate_normal(mean_t, cov_mat, size=num_outcomes) if t < season: RHS_noise[:, :, t] = RHS_corralated.transpose() else: RHS_noise[:, :, t] = RHS_noise[:, :, t - season] print(t, ': ', mean_t[0:10], ' ', sig_t[0:10]) #======================================================================= # mu_s = np.random.uniform(mean_t , mean_t, num_reservoirs) # sig_s = np.random.uniform(sig_t*0.5 , sig_t, num_reservoirs) # #loc_s = np.exp(mu_s+0.5*sig_s**2) # for i in range(num_reservoirs): # if t<season: # #RHS_noise[i,:,t] = np.around(np.random.lognormal(mu_s[i],sig_s[i],num_outcomes),3) #nr 10 and nr 100 # mu_i = np.exp(mu_s[i]+0.5*sig_s[i]**2) # var_i = mu_i*sig_s[i] # RHS_noise[i,:,t] = (np.random.normal(mu_i, var_i,num_outcomes)) #nr 10 and nr 100 # else: # RHS_noise[i,:,t] = RHS_noise[i,:,t-season] #======================================================================= if simulate: simulate_AR_model(R_matrices, inflow_t0, RHS_noise, T, num_reservoirs, lag) instance = HydroRndInstance(R_matrices, inflow_t0, RHS_noise) return instance def simulate_AR_model(R_matrices, inflow_t0, RHS_noise, T, nr, lag): prices = [18 + round(5 * np.sin(0.5 * (x - 2)), 2) for x in range(0, T)] num_reservoirs = nr plt.figure(1) num_reps = 500 res_ref = [0, 1, 2] np.random.seed(res_ref) mean_res_ref = {rr: np.zeros((T)) for rr in res_ref} for replica in range(num_reps): plot_data = {rr: [inflow_t0[-1][rr]] for rr in res_ref} inflows = list(inflow_t0) for t in range(1, T): #innovation = np.random.triangular(-1, mu_ref, 4) outcome = np.random.randint(len(RHS_noise[0, :, t])) new_inflows = [0] * num_reservoirs for l in range(1, lag + 1): for i in range(num_reservoirs): new_inflows[i] += RHS_noise[i, outcome, t] for j in range(num_reservoirs): if (j in R_matrices[t][l][i]): new_inflows[i] += R_matrices[t][l][i][j] * inflows[-l][j] inflows.append(new_inflows) inflows.pop(0) for rr in res_ref: plot_data[rr].append(inflows[-1][rr]) for (i, rr) in enumerate(res_ref): mean_res_ref[rr] = mean_res_ref[rr] + np.array(plot_data[rr]) plotpos = int('%i1%i' % (len(res_ref), i + 1)) plt.subplot(plotpos) plt.plot(plot_data[rr], alpha=0.5) data_replica = np.array([plot_data[r1] for r1 in res_ref]) for (i, rr) in enumerate(res_ref): mean_res_ref[rr] = mean_res_ref[rr] / num_reps plotpos = int('%i1%i' % (len(res_ref), i + 1)) plt.subplot(plotpos) plt.plot(mean_res_ref[rr], linewidth=2, color='black', linestyle='--') plt.plot(prices, linewidth=2, color='red', linestyle='--') plt.grid() plt.show() class HydroRndInstance(): def __init__(self, ar_matrices, initial_inflows, RHS_noise): self.ar_matrices = ar_matrices self.inital_inflows = initial_inflows self.RHS_noise = RHS_noise def read_instance(file_name='hydro_rnd_instance_R200_UD1_T120_LAG1_OUT10K_AR.pkl', lag=None): ''' Read instance from file and returns a HydroRndInstance object. ''' file_name_path = hydro_path + '/data/' + file_name with open(file_name_path, 'rb') as input: instance = pickle.load(input) return instance if __name__ == '__main__': nr = 30 ud = 0 T = 48 file_name_pat = None # for lag in [1]:#range(1,2): # file_name_path = hydro_path+'/data/hydro_rnd_instance_R%i_UD%i_T%i_LAG%i_OUT10K_AR1.pkl' %(nr,ud,T,lag) # print(file_name_path) # with open(file_name_path, 'wb') as output: # instance = gen_instance(num_reservoirs=nr, up_stream_dep=ud, T=T, lag = lag, num_outcomes=10000, simulate=True) # pickle.dump(instance, output, pickle.HIGHEST_PROTOCOL) #instance = gen_instance(num_reservoirs=nr, up_stream_dep=ud, T=T, lag = 1, num_outcomes= 10000, simulate= True) hydro_instance = read_instance('hydro_rnd_instance_R%i_UD%i_T%i_LAG%i_OUT10K_AR1.pkl' % (nr, ud, T, 1), lag=1) matrix = hydro_instance.ar_matrices RHSnoise_density = hydro_instance.RHS_noise inflow_t0 = hydro_instance.inital_inflows simulate_AR_model(matrix, inflow_t0, RHSnoise_density, 24, 10, 1)
# #RHS_noise[i] = np.random.uniform(-5,10, num_outcomes) # #RHS_noise[i] = np.random.normal(8,np.log(num_reservoirs-i)+1, num_outcomes)
random_line_split
ReservoirChainGen.py
''' Created on Jan 2, 2018 @author: dduque Generates an instance of the hydro scheduling problem for a chain of reservoirs. Outputs: Autoregressive matrix for each time period ''' if __name__ == '__main__': from os import path import sys sys.path.append(path.abspath('/Users/dduque/Dropbox/WORKSPACE/SDDP')) sys.path.append(path.abspath('/Users/dduque/Dropbox/WORKSPACE/SDDP/HydroExamples')) import numpy as np import scipy.sparse as sp import matplotlib.pyplot as plt import pickle from HydroModel import hydro_path def gen_instance(num_reservoirs=1000, up_stream_dep=1, T=12, lag=1, num_outcomes=30, simulate=False):
def simulate_AR_model(R_matrices, inflow_t0, RHS_noise, T, nr, lag): prices = [18 + round(5 * np.sin(0.5 * (x - 2)), 2) for x in range(0, T)] num_reservoirs = nr plt.figure(1) num_reps = 500 res_ref = [0, 1, 2] np.random.seed(res_ref) mean_res_ref = {rr: np.zeros((T)) for rr in res_ref} for replica in range(num_reps): plot_data = {rr: [inflow_t0[-1][rr]] for rr in res_ref} inflows = list(inflow_t0) for t in range(1, T): #innovation = np.random.triangular(-1, mu_ref, 4) outcome = np.random.randint(len(RHS_noise[0, :, t])) new_inflows = [0] * num_reservoirs for l in range(1, lag + 1): for i in range(num_reservoirs): new_inflows[i] += RHS_noise[i, outcome, t] for j in range(num_reservoirs): if (j in R_matrices[t][l][i]): new_inflows[i] += R_matrices[t][l][i][j] * inflows[-l][j] inflows.append(new_inflows) inflows.pop(0) for rr in res_ref: plot_data[rr].append(inflows[-1][rr]) for (i, rr) in enumerate(res_ref): mean_res_ref[rr] = mean_res_ref[rr] + np.array(plot_data[rr]) plotpos = int('%i1%i' % (len(res_ref), i + 1)) plt.subplot(plotpos) plt.plot(plot_data[rr], alpha=0.5) data_replica = np.array([plot_data[r1] for r1 in res_ref]) for (i, rr) in enumerate(res_ref): mean_res_ref[rr] = mean_res_ref[rr] / num_reps plotpos = int('%i1%i' % (len(res_ref), i + 1)) plt.subplot(plotpos) plt.plot(mean_res_ref[rr], linewidth=2, color='black', linestyle='--') plt.plot(prices, linewidth=2, color='red', linestyle='--') plt.grid() plt.show() class HydroRndInstance(): def __init__(self, ar_matrices, initial_inflows, RHS_noise): self.ar_matrices = ar_matrices self.inital_inflows = initial_inflows self.RHS_noise = RHS_noise def read_instance(file_name='hydro_rnd_instance_R200_UD1_T120_LAG1_OUT10K_AR.pkl', lag=None): ''' Read instance from file and returns a HydroRndInstance object. ''' file_name_path = hydro_path + '/data/' + file_name with open(file_name_path, 'rb') as input: instance = pickle.load(input) return instance if __name__ == '__main__': nr = 30 ud = 0 T = 48 file_name_pat = None # for lag in [1]:#range(1,2): # file_name_path = hydro_path+'/data/hydro_rnd_instance_R%i_UD%i_T%i_LAG%i_OUT10K_AR1.pkl' %(nr,ud,T,lag) # print(file_name_path) # with open(file_name_path, 'wb') as output: # instance = gen_instance(num_reservoirs=nr, up_stream_dep=ud, T=T, lag = lag, num_outcomes=10000, simulate=True) # pickle.dump(instance, output, pickle.HIGHEST_PROTOCOL) #instance = gen_instance(num_reservoirs=nr, up_stream_dep=ud, T=T, lag = 1, num_outcomes= 10000, simulate= True) hydro_instance = read_instance('hydro_rnd_instance_R%i_UD%i_T%i_LAG%i_OUT10K_AR1.pkl' % (nr, ud, T, 1), lag=1) matrix = hydro_instance.ar_matrices RHSnoise_density = hydro_instance.RHS_noise inflow_t0 = hydro_instance.inital_inflows simulate_AR_model(matrix, inflow_t0, RHSnoise_density, 24, 10, 1)
''' Generate a random instance consisting of: - Autoregresive matrices (stored as dictionaries) - Initial inflow vector (matrix for lag > 0) - Innovations of the autoregressive process ''' np.random.seed(0) season = 12 R_matrices = {t: {l: {i: {} for i in range(num_reservoirs)} for l in range(1, lag + 1)} for t in range(0, T)} for t in range(T): for l in range(1, lag + 1): for i in range(num_reservoirs): for j in range(up_stream_dep + 1): if i - j >= 0: if (t < season): #var = 0.2 if i>num_reservoirs/2 else 0.6 #R_matrices[t][l][i][i-j]=np.random.normal(0, var) #for nr=10 experiments R_matrices[t][l][i][i - j] = np.round(np.random.normal(1.0, 0.3), 3) #for nr=30 experiments R_matrices[t][l][i][i - j] = 0.5 + 0.5 * np.sin(t) #for nr=30 experiments #R_matrices[t][l][i][i-j]=np.random.normal(0.1, (1.0/(lag*up_stream_dep+1))) #R_matrices[t][l][i][i-j]=np.random.uniform(0,0.3) #R_matrices[t][l][i][i-j]=np.random.uniform(-1/(up_stream_dep+lag),1/(up_stream_dep+lag)) #for nr=100 #=================================================== # if t>0: # R_matrices[t][l][i][i-j]=np.abs(np.random.normal(0.01, (1.0/(lag*up_stream_dep+1)))) # #R_matrices[t][l][i][i-j]=np.random.uniform(0.0/(up_stream_dep+1)**lag,(1/(up_stream_dep+1))/lag) # #R_matrices[t][l][i][i-j]=(np.random.normal(0.0, (1.0/(up_stream_dep+1))/lag) + R_matrices[t-1][l][i][i-j])/2.0 # else: # R_matrices[t][l][i][i-j]=np.random.normal(0.01, (1.0/(lag*up_stream_dep+1))) #=================================================== else: R_matrices[t][l][i][i - j] = R_matrices[t - season][l][i][i - j] print(R_matrices[2][1]) np.random.seed(1234) inflow_t0 = [[np.around(np.random.uniform(0, 30), 3) for i in range(num_reservoirs)] for l in range(lag + 1)] print(np.array(inflow_t0)[:, 0:5]) #=========================================================================== # RHS_noise = np.zeros(shape=(num_reservoirs,num_outcomes)) # #mu_s = np.random.uniform(5,10,num_reservoirs) # mu_s = np.random.uniform(0.5,1.5,num_reservoirs) # sig_s = np.random.uniform(0.2,1.2,num_reservoirs) # for i in range(num_reservoirs): # #RHS_noise[i] = np.sort(np.random.normal(mu_s[i],mu_s[i]/3,num_outcomes)) # #RHS_noise[i] = np.random.uniform(-5,10, num_outcomes) # #RHS_noise[i] = np.random.normal(8,np.log(num_reservoirs-i)+1, num_outcomes) # RHS_noise[i] = np.random.lognormal(mu_s[i],sig_s[i],num_outcomes) #nr 10 and nr 100 # #RHS_noise[i] = np.sort(np.random.lognormal(mu_s[i],0.5,num_outcomes)) # print(np.max(RHS_noise, 1)) #=========================================================================== RHS_noise = np.zeros(shape=(num_reservoirs, num_outcomes, T)) #reservoirs_mean = np.random.uniform(.50,1.1,size=num_reservoirs) reservoirs_mean = np.random.uniform(5, 20, size=num_reservoirs) reservoirs_mean_shift = reservoirs_mean * 0.5 r_CV = np.random.uniform(0.2, 0.5, size=num_reservoirs) print(reservoirs_mean) print(r_CV) for t in range(T): #mean_t = np.minimum(np.array([1.5 - round(0.1 * np.sin(0.5 * (t - 6)), 2) for i in range(num_reservoirs)]),1.5) #sig_t = np.array([1 + round(0.3 * np.sin(0.5 * (t - 5)), 2) for i in range(num_reservoirs)]) r_CV = np.random.uniform(0.7, 2, size=num_reservoirs) mean_t = np.minimum( np.array([ reservoirs_mean[i] - round(reservoirs_mean_shift[i] * np.sin(0.5 * (t - 6)), 2) for i in range(num_reservoirs) ]), 100) sig_t = r_CV * mean_t #sig_t = np.array([10 + round(5 * np.sin(0.5 * (t - 2)), 2) for i in range(num_reservoirs)]) cov_mat = np.zeros((nr, nr)) for i in range(nr): for j in range(nr): if i == j: cov_mat[i, j] = sig_t[i]**2 else: cov_mat[i, j] = sig_t[i] * sig_t[j] * np.random.uniform(0.3, 0.95) reservoirs_mu = np.random.uniform(2, 20, size=num_reservoirs) #RHS_corralated = reservoirs_mu+ np.exp(np.random.multivariate_normal(mean_t, cov_mat, size= num_outcomes)) RHS_corralated = np.random.multivariate_normal(mean_t, cov_mat, size=num_outcomes) if t < season: RHS_noise[:, :, t] = RHS_corralated.transpose() else: RHS_noise[:, :, t] = RHS_noise[:, :, t - season] print(t, ': ', mean_t[0:10], ' ', sig_t[0:10]) #======================================================================= # mu_s = np.random.uniform(mean_t , mean_t, num_reservoirs) # sig_s = np.random.uniform(sig_t*0.5 , sig_t, num_reservoirs) # #loc_s = np.exp(mu_s+0.5*sig_s**2) # for i in range(num_reservoirs): # if t<season: # #RHS_noise[i,:,t] = np.around(np.random.lognormal(mu_s[i],sig_s[i],num_outcomes),3) #nr 10 and nr 100 # mu_i = np.exp(mu_s[i]+0.5*sig_s[i]**2) # var_i = mu_i*sig_s[i] # RHS_noise[i,:,t] = (np.random.normal(mu_i, var_i,num_outcomes)) #nr 10 and nr 100 # else: # RHS_noise[i,:,t] = RHS_noise[i,:,t-season] #======================================================================= if simulate: simulate_AR_model(R_matrices, inflow_t0, RHS_noise, T, num_reservoirs, lag) instance = HydroRndInstance(R_matrices, inflow_t0, RHS_noise) return instance
identifier_body
ReservoirChainGen.py
''' Created on Jan 2, 2018 @author: dduque Generates an instance of the hydro scheduling problem for a chain of reservoirs. Outputs: Autoregressive matrix for each time period ''' if __name__ == '__main__': from os import path import sys sys.path.append(path.abspath('/Users/dduque/Dropbox/WORKSPACE/SDDP')) sys.path.append(path.abspath('/Users/dduque/Dropbox/WORKSPACE/SDDP/HydroExamples')) import numpy as np import scipy.sparse as sp import matplotlib.pyplot as plt import pickle from HydroModel import hydro_path def gen_instance(num_reservoirs=1000, up_stream_dep=1, T=12, lag=1, num_outcomes=30, simulate=False): ''' Generate a random instance consisting of: - Autoregresive matrices (stored as dictionaries) - Initial inflow vector (matrix for lag > 0) - Innovations of the autoregressive process ''' np.random.seed(0) season = 12 R_matrices = {t: {l: {i: {} for i in range(num_reservoirs)} for l in range(1, lag + 1)} for t in range(0, T)} for t in range(T): for l in range(1, lag + 1): for i in range(num_reservoirs): for j in range(up_stream_dep + 1): if i - j >= 0: if (t < season): #var = 0.2 if i>num_reservoirs/2 else 0.6 #R_matrices[t][l][i][i-j]=np.random.normal(0, var) #for nr=10 experiments R_matrices[t][l][i][i - j] = np.round(np.random.normal(1.0, 0.3), 3) #for nr=30 experiments R_matrices[t][l][i][i - j] = 0.5 + 0.5 * np.sin(t) #for nr=30 experiments #R_matrices[t][l][i][i-j]=np.random.normal(0.1, (1.0/(lag*up_stream_dep+1))) #R_matrices[t][l][i][i-j]=np.random.uniform(0,0.3) #R_matrices[t][l][i][i-j]=np.random.uniform(-1/(up_stream_dep+lag),1/(up_stream_dep+lag)) #for nr=100 #=================================================== # if t>0: # R_matrices[t][l][i][i-j]=np.abs(np.random.normal(0.01, (1.0/(lag*up_stream_dep+1)))) # #R_matrices[t][l][i][i-j]=np.random.uniform(0.0/(up_stream_dep+1)**lag,(1/(up_stream_dep+1))/lag) # #R_matrices[t][l][i][i-j]=(np.random.normal(0.0, (1.0/(up_stream_dep+1))/lag) + R_matrices[t-1][l][i][i-j])/2.0 # else: # R_matrices[t][l][i][i-j]=np.random.normal(0.01, (1.0/(lag*up_stream_dep+1))) #=================================================== else: R_matrices[t][l][i][i - j] = R_matrices[t - season][l][i][i - j] print(R_matrices[2][1]) np.random.seed(1234) inflow_t0 = [[np.around(np.random.uniform(0, 30), 3) for i in range(num_reservoirs)] for l in range(lag + 1)] print(np.array(inflow_t0)[:, 0:5]) #=========================================================================== # RHS_noise = np.zeros(shape=(num_reservoirs,num_outcomes)) # #mu_s = np.random.uniform(5,10,num_reservoirs) # mu_s = np.random.uniform(0.5,1.5,num_reservoirs) # sig_s = np.random.uniform(0.2,1.2,num_reservoirs) # for i in range(num_reservoirs): # #RHS_noise[i] = np.sort(np.random.normal(mu_s[i],mu_s[i]/3,num_outcomes)) # #RHS_noise[i] = np.random.uniform(-5,10, num_outcomes) # #RHS_noise[i] = np.random.normal(8,np.log(num_reservoirs-i)+1, num_outcomes) # RHS_noise[i] = np.random.lognormal(mu_s[i],sig_s[i],num_outcomes) #nr 10 and nr 100 # #RHS_noise[i] = np.sort(np.random.lognormal(mu_s[i],0.5,num_outcomes)) # print(np.max(RHS_noise, 1)) #=========================================================================== RHS_noise = np.zeros(shape=(num_reservoirs, num_outcomes, T)) #reservoirs_mean = np.random.uniform(.50,1.1,size=num_reservoirs) reservoirs_mean = np.random.uniform(5, 20, size=num_reservoirs) reservoirs_mean_shift = reservoirs_mean * 0.5 r_CV = np.random.uniform(0.2, 0.5, size=num_reservoirs) print(reservoirs_mean) print(r_CV) for t in range(T): #mean_t = np.minimum(np.array([1.5 - round(0.1 * np.sin(0.5 * (t - 6)), 2) for i in range(num_reservoirs)]),1.5) #sig_t = np.array([1 + round(0.3 * np.sin(0.5 * (t - 5)), 2) for i in range(num_reservoirs)]) r_CV = np.random.uniform(0.7, 2, size=num_reservoirs) mean_t = np.minimum( np.array([ reservoirs_mean[i] - round(reservoirs_mean_shift[i] * np.sin(0.5 * (t - 6)), 2) for i in range(num_reservoirs) ]), 100) sig_t = r_CV * mean_t #sig_t = np.array([10 + round(5 * np.sin(0.5 * (t - 2)), 2) for i in range(num_reservoirs)]) cov_mat = np.zeros((nr, nr)) for i in range(nr): for j in range(nr): if i == j: cov_mat[i, j] = sig_t[i]**2 else: cov_mat[i, j] = sig_t[i] * sig_t[j] * np.random.uniform(0.3, 0.95) reservoirs_mu = np.random.uniform(2, 20, size=num_reservoirs) #RHS_corralated = reservoirs_mu+ np.exp(np.random.multivariate_normal(mean_t, cov_mat, size= num_outcomes)) RHS_corralated = np.random.multivariate_normal(mean_t, cov_mat, size=num_outcomes) if t < season: RHS_noise[:, :, t] = RHS_corralated.transpose() else: RHS_noise[:, :, t] = RHS_noise[:, :, t - season] print(t, ': ', mean_t[0:10], ' ', sig_t[0:10]) #======================================================================= # mu_s = np.random.uniform(mean_t , mean_t, num_reservoirs) # sig_s = np.random.uniform(sig_t*0.5 , sig_t, num_reservoirs) # #loc_s = np.exp(mu_s+0.5*sig_s**2) # for i in range(num_reservoirs): # if t<season: # #RHS_noise[i,:,t] = np.around(np.random.lognormal(mu_s[i],sig_s[i],num_outcomes),3) #nr 10 and nr 100 # mu_i = np.exp(mu_s[i]+0.5*sig_s[i]**2) # var_i = mu_i*sig_s[i] # RHS_noise[i,:,t] = (np.random.normal(mu_i, var_i,num_outcomes)) #nr 10 and nr 100 # else: # RHS_noise[i,:,t] = RHS_noise[i,:,t-season] #======================================================================= if simulate: simulate_AR_model(R_matrices, inflow_t0, RHS_noise, T, num_reservoirs, lag) instance = HydroRndInstance(R_matrices, inflow_t0, RHS_noise) return instance def simulate_AR_model(R_matrices, inflow_t0, RHS_noise, T, nr, lag): prices = [18 + round(5 * np.sin(0.5 * (x - 2)), 2) for x in range(0, T)] num_reservoirs = nr plt.figure(1) num_reps = 500 res_ref = [0, 1, 2] np.random.seed(res_ref) mean_res_ref = {rr: np.zeros((T)) for rr in res_ref} for replica in range(num_reps): plot_data = {rr: [inflow_t0[-1][rr]] for rr in res_ref} inflows = list(inflow_t0) for t in range(1, T): #innovation = np.random.triangular(-1, mu_ref, 4) outcome = np.random.randint(len(RHS_noise[0, :, t])) new_inflows = [0] * num_reservoirs for l in range(1, lag + 1): for i in range(num_reservoirs): new_inflows[i] += RHS_noise[i, outcome, t] for j in range(num_reservoirs): if (j in R_matrices[t][l][i]): new_inflows[i] += R_matrices[t][l][i][j] * inflows[-l][j] inflows.append(new_inflows) inflows.pop(0) for rr in res_ref: plot_data[rr].append(inflows[-1][rr]) for (i, rr) in enumerate(res_ref): mean_res_ref[rr] = mean_res_ref[rr] + np.array(plot_data[rr]) plotpos = int('%i1%i' % (len(res_ref), i + 1)) plt.subplot(plotpos) plt.plot(plot_data[rr], alpha=0.5) data_replica = np.array([plot_data[r1] for r1 in res_ref]) for (i, rr) in enumerate(res_ref): mean_res_ref[rr] = mean_res_ref[rr] / num_reps plotpos = int('%i1%i' % (len(res_ref), i + 1)) plt.subplot(plotpos) plt.plot(mean_res_ref[rr], linewidth=2, color='black', linestyle='--') plt.plot(prices, linewidth=2, color='red', linestyle='--') plt.grid() plt.show() class HydroRndInstance(): def __init__(self, ar_matrices, initial_inflows, RHS_noise): self.ar_matrices = ar_matrices self.inital_inflows = initial_inflows self.RHS_noise = RHS_noise def
(file_name='hydro_rnd_instance_R200_UD1_T120_LAG1_OUT10K_AR.pkl', lag=None): ''' Read instance from file and returns a HydroRndInstance object. ''' file_name_path = hydro_path + '/data/' + file_name with open(file_name_path, 'rb') as input: instance = pickle.load(input) return instance if __name__ == '__main__': nr = 30 ud = 0 T = 48 file_name_pat = None # for lag in [1]:#range(1,2): # file_name_path = hydro_path+'/data/hydro_rnd_instance_R%i_UD%i_T%i_LAG%i_OUT10K_AR1.pkl' %(nr,ud,T,lag) # print(file_name_path) # with open(file_name_path, 'wb') as output: # instance = gen_instance(num_reservoirs=nr, up_stream_dep=ud, T=T, lag = lag, num_outcomes=10000, simulate=True) # pickle.dump(instance, output, pickle.HIGHEST_PROTOCOL) #instance = gen_instance(num_reservoirs=nr, up_stream_dep=ud, T=T, lag = 1, num_outcomes= 10000, simulate= True) hydro_instance = read_instance('hydro_rnd_instance_R%i_UD%i_T%i_LAG%i_OUT10K_AR1.pkl' % (nr, ud, T, 1), lag=1) matrix = hydro_instance.ar_matrices RHSnoise_density = hydro_instance.RHS_noise inflow_t0 = hydro_instance.inital_inflows simulate_AR_model(matrix, inflow_t0, RHSnoise_density, 24, 10, 1)
read_instance
identifier_name
main.rs
extern crate sdl2; extern crate midir; mod keyboard; use sdl2::rect::Rect; use sdl2::render::TextureQuery; use std::{thread}; use std::time::{Instant,Duration}; use sdl2::pixels::Color; use sdl2::ttf::Font; use sdl2::event::Event; use sdl2::event::WindowEvent; use sdl2::keyboard::Keycode; use sdl2::gfx::primitives::DrawRenderer; use sdl2::render::Renderer; use std::f32; use std::f32::consts::PI; use std::collections::HashMap; use keyboard::Keyboard; use keyboard::HexAddr; use keyboard::HexKey; use keyboard::HarmonicKeyboard; use keyboard::JammerKeyboard; use midir::{MidiOutput, MidiOutputConnection}; use std::error::Error; use sdl2::rwops::RWops; const INCREMENT_ANGLE:f32 = 2.0*PI/6.0; // 60 degrees in radians const MOUSE_OID:i64 = -1; const NOTE_ON_MSG: u8 = 0x90; const NOTE_OFF_MSG: u8 = 0x80; /* TODO * Octave Shifing * Keyboard Rotation * Multi Key Highlighting * Readme for github * Factor out midi * Better error handling/remove .unwraps * Add Guitar Layout * Consider changing draw_keyboard so that it draws only the key changes and not the whole board * every time. * Correct velocity controls? */ fn get_hexagon(x:i16, y:i16, radius:i16) -> ([i16;6], [i16;6]) { // TODO this function needs to be broken up into a calculate and translate section, we don't // need to redo the sin math every time. let r:f32 = radius as f32; let mut angle:f32 = INCREMENT_ANGLE/2.0; let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { let xo = angle.sin()*r; let yo = angle.cos()*r; angle += INCREMENT_ANGLE; xs[i] = x + xo.round() as i16; ys[i] = y + yo.round() as i16; } return (xs, ys) } fn translate_hexagon(xlist:[i16;6], ylist:[i16;6], x:i16, y:i16) -> ([i16;6], [i16;6]) { let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { xs[i] = xlist[i] + x; ys[i] = ylist[i] + y; } return (xs, ys) } /// Given the x and y locations of a click, return the address of the hexagon /// The logic I'm doing in here is a little crazy. /// By rotating the cordinate grid three times I can figure out the "index" in terms of number of /// hexagons from a the starting point /// This effectivly tesalates the hexagons into 6 triangles, this algorithm gets the location of /// the triangle clicked, then figures out which hexagon that triangle belongs in. fn get_hex_address(xo:f32, yo:f32, hexagon:&HexagonDescription) -> HexAddr { let hex_height = hexagon.half_height as f32; let plane1 = yo / hex_height; let incangle = INCREMENT_ANGLE * -2.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane2 = -y / hex_height; // TODO why did I need to multiply this by two?? let incangle = INCREMENT_ANGLE * -4.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane3 = y / hex_height ; let cord1 = plane1.floor() as i16; let mut cord2 = plane2.floor() as i16; let mut cord3 = plane3.floor() as i16; // left justify the coordinate system for my own sanity while doing this modulo math cord2 -= cord1/2; cord3 += cord1/2 + 1; let mut y = cord1; let mut x = cord2/3; //println!("a: {} b:{} c:{}", cord1, cord2, cord3); if cord1 % 2 == 0 { // white down if cord2 % 3 == 0 { //println!("white"); y+=1; } else if cord3 % 3 == 1 && cord2 % 3 == 1{ //println!("white"); y +=1; } else { //println!("purple"); x+=1; } } else { // white up if cord2 % 3 == 1 { //println!("white"); } else if cord3 % 3 == 0 && cord2 % 3 == 0 { //println!("white"); } else { //println!("purple"); y +=1; if cord2 %3 != 0 { x +=1; } } } //println!("x:{}, y:{}", x, y); HexAddr{x:x, y:y} } fn note_to_color(note: u8, config: &Config) -> Color { // 0 for root, 1 for in key, 2 for sharp or flat let colors = &config.colors; let key = config.root_note; //C C# D D# E F F# G G# A A# B let major_color_mask = [ colors.root, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.in_key_and_penta, // a colors.out_of_key, // a# colors.in_key // b ]; // computed c relative let minor_color_mask = [ colors.in_key_and_penta, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.root, // a colors.out_of_key, // a# colors.in_key // b ]; let index = (note + key ) % 12; if config.is_major { major_color_mask[index as usize] } else { minor_color_mask[index as usize] } } struct Config { colors: ColorProfile, root_note: u8, // 0 for c is_major: bool, hexagon: HexagonDescription, width: u32, height: u32, rows: i16, cols: i16, velocity: u8, } struct ColorProfile { line_color: Color, root: Color, out_of_key: Color, in_key_and_penta: Color, in_key: Color, white: Color, } #[derive(Debug)] struct HexagonDescription { width:i16, height:i16, half_height:i16, radius:i16, x_vec:[i16;6], y_vec:[i16;6], } fn draw_keyboard( renderer:&mut Renderer, font: &Font, config: &Config, keyboard: &Keyboard, pressed_keys: Vec<HexAddr>) -> Result<(),String> { let rows = config.rows; let cols = config.cols; for row in 0..rows { for col in 0..cols { let addr = HexAddr{x:col, y:row}; let is_even = row % 2 == 0; let (mut x_offset, y_offset) = match is_even { true => ((config.hexagon.width + config.hexagon.radius) * col, row * config.hexagon.half_height), false => ((config.hexagon.width + config.hexagon.radius) * col + config.hexagon.radius + config.hexagon.radius/2, row * config.hexagon.half_height), }; x_offset -= config.hexagon.width/2; let (xs, ys) = translate_hexagon(config.hexagon.x_vec, config.hexagon.y_vec, x_offset, y_offset); let key_info = keyboard.get_key_info(addr); let (color, label) = if let Some(key_info) = key_info { (note_to_color(key_info.note, config), key_info.label) } else { (config.colors.line_color, " ".to_string()) }; let polygon_color = match pressed_keys.contains(&addr) { true => config.colors.white, false => color, }; try!(renderer.filled_polygon(&xs, &ys, polygon_color)); try!(renderer.polygon(&xs, &ys, config.colors.line_color)); // TODO cache textures for the hex labels // if we don't have a keyboard then just print the row and column numbers let surface = font.render(label.as_str()).blended(config.colors.line_color).unwrap(); let mut texture = renderer.create_texture_from_surface(&surface).unwrap(); let TextureQuery { width, height, .. } = texture.query(); let label_x = (x_offset as i32 - width as i32/2) as i32; let label_y = (y_offset as i32 - height as i32/2) as i32; let target = Rect::new(label_x, label_y, width, height); try!(renderer.copy(&mut texture, None, Some(target))); } } Ok(()) } struct KeyboardState<'a> { active_presses_map : HashMap<i64, HexAddr>, config: &'a Config, connection_out: &'a mut MidiOutputConnection, } impl<'a> KeyboardState<'a> { fn start_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_ON_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn end_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_OFF_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn on_press(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); self.active_presses_map.insert(oid, addr); self.start_note(addr, keyboard); } fn on_release(&mut self, oid: i64, keyboard: &mut Keyboard) { match self.active_presses_map.remove(&oid) { Some(addr) => self.end_note(addr, keyboard), None => (), } } fn on_move(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); match self.active_presses_map.get(&oid) { None => self.start_note(addr, keyboard), Some(&old_addr) => { if addr != old_addr { self.start_note(addr, keyboard); self.end_note(old_addr, keyboard); } } }; self.active_presses_map.insert(oid, addr); } fn get_pressed(&self) -> Vec<HexAddr> { // TODO this iteration is SLOW and this function is called once per hexagon // TODO make this function FAST! let mut vec = Vec::new(); for (_, &value) in &self.active_presses_map { vec.push(value); } vec } } fn get_midi_connection() -> Result<MidiOutputConnection,Box<Error>> { // TODO improve midi selection criteria, maybe pick off of command line. let midi_out = try!(MidiOutput::new("Isomidi")); let out_port: u32 = match midi_out.port_count() { 0 => return Err("no output port found".into()), _ => { println!("Choosing the last available output port: {}", midi_out.port_name(0).unwrap()); midi_out.port_count() -1 } }; println!("\nOpening connection"); Ok(try!(midi_out.connect(out_port, "isomidi").map_err(|e| e.kind()))) } fn main() { ///////////////////////// ///// CONSTANTS ///////////////////////// // https://coolors.co/f4d06f-ff8811-9dd9d2-fff8f0-392f5a let radius = 75; let screen_height = 1200; let screen_width = 1800; let ttf_font_bytes = include_bytes!("FantasqueSansMono-Regular.ttf"); let mut connection_out = get_midi_connection().unwrap(); ///////////////////////// //// SDL Setup ///////////////////////// let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); video_subsystem.gl_attr().set_multisample_samples(8); let ttf_context = sdl2::ttf::init().unwrap();
.build() .unwrap(); let mut renderer = window.renderer().build().unwrap(); let font_rwop = RWops::from_bytes(ttf_font_bytes).unwrap(); let keyboard_font = ttf_context.load_font_from_rwops(font_rwop, 20).unwrap(); // be bold // keyboard_font.set_style(sdl2::ttf::STYLE_BOLD); // Draw a black screen renderer.set_draw_color(Color::RGB(0, 0, 0)); renderer.clear(); renderer.present(); let mut event_pump = sdl_context.event_pump().unwrap(); ///////////////////////// //// Load the keyboard ///////////////////////// //let mut keyboard = JammerKeyboard {}; 'config: loop { ///////////////////////// ///// Derived Constants ///////////////////////// let colors = ColorProfile { line_color: Color::RGB(0, 0, 0), root : Color::RGB(0xf4,0xD0,0x6F), // root out_of_key : Color::RGB(0xff,0x88,0x11), // sharp/flat in_key_and_penta : Color::RGB(0x9D,0x9D,0xD2), // in key & pentatonic in_key : Color::RGB(0xba, 0xba, 0xdf), // in key white : Color::RGB(0x39,0x2F,0x5A), }; let (hexagon_x, hexagon_y) = get_hexagon(0,0,radius); let half_height = ((INCREMENT_ANGLE).sin() * radius as f32).round() as i16; let hexagon = HexagonDescription { width : (radius * 2 ) as i16, half_height: half_height, height : half_height * 2, radius: radius, x_vec: hexagon_x, y_vec: hexagon_y, }; println!("hexagon: {:?}", hexagon); let size = { let mut window = renderer.window_mut().unwrap(); window.size() }; let rows = (size.1 as i16/hexagon.height as i16) * 2 + 3; let cols = (size.0 as i16/(hexagon.width + hexagon.radius)) as i16 + 2; let mut config = Config { colors: colors, root_note: 0, // C is_major: true, hexagon: hexagon, width: size.0, height: size.1, rows: rows, cols: cols, velocity: 70, }; let mut keyboard = HarmonicKeyboard {}; let mut keyboard_state = KeyboardState { config: &config, active_presses_map: HashMap::new(), connection_out: &mut connection_out, }; ///////////////////////// //// Main loop ///////////////////////// let mut frame_count = 0; let mut last_time = Instant::now(); let mut first_run = true; 'render: loop { // TODO sleep till next event? let sleep_time = Duration::from_millis(10); thread::sleep(sleep_time); // TODO: How are we going to do multi finger tracking and mouse tracking? // list of active fingerids / mouse id plus the current hex addr. // on hex addr change fire on_key_press let mut trigger_draw = false; if first_run { trigger_draw = true; first_run = false } for event in event_pump.poll_iter() { match event { Event::Quit {..} | Event::KeyDown { keycode: Some(Keycode::Escape), .. } => { println!("Exiting"); break 'config }, Event::MouseButtonDown {x, y, ..} => { keyboard_state.on_press(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::MouseButtonUp {..} => { keyboard_state.on_release(MOUSE_OID, &mut keyboard); trigger_draw = true; }, Event::MouseMotion {x, y, mousestate, ..} => { // track only if left mouse button is down if mousestate.left() { keyboard_state.on_move(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; } }, Event::FingerDown {x, y, finger_id, ..} => { keyboard_state.on_press(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerMotion {x, y, finger_id, ..} => { keyboard_state.on_move(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerUp {finger_id, ..} => { keyboard_state.on_release(finger_id, &mut keyboard); trigger_draw = true; }, Event::Window {win_event, ..} => { match win_event { WindowEvent::SizeChanged (width, height) => { // breaks out of the render loop and reconfigures the application break 'render } _ => {} } }, _ => {} } } if trigger_draw { renderer.set_draw_color(config.colors.line_color); renderer.clear(); draw_keyboard(&mut renderer, &keyboard_font, &config, &keyboard, keyboard_state.get_pressed()).unwrap(); } renderer.present(); // TODO render a stats section in app frame_count += 1; if last_time.elapsed() > Duration::from_secs(1) { println!("fps {}", frame_count); frame_count = 0; last_time = Instant::now(); } } } }
let window = video_subsystem.window("Isomidi", screen_width, screen_height) .position_centered() .opengl() .resizable()
random_line_split
main.rs
extern crate sdl2; extern crate midir; mod keyboard; use sdl2::rect::Rect; use sdl2::render::TextureQuery; use std::{thread}; use std::time::{Instant,Duration}; use sdl2::pixels::Color; use sdl2::ttf::Font; use sdl2::event::Event; use sdl2::event::WindowEvent; use sdl2::keyboard::Keycode; use sdl2::gfx::primitives::DrawRenderer; use sdl2::render::Renderer; use std::f32; use std::f32::consts::PI; use std::collections::HashMap; use keyboard::Keyboard; use keyboard::HexAddr; use keyboard::HexKey; use keyboard::HarmonicKeyboard; use keyboard::JammerKeyboard; use midir::{MidiOutput, MidiOutputConnection}; use std::error::Error; use sdl2::rwops::RWops; const INCREMENT_ANGLE:f32 = 2.0*PI/6.0; // 60 degrees in radians const MOUSE_OID:i64 = -1; const NOTE_ON_MSG: u8 = 0x90; const NOTE_OFF_MSG: u8 = 0x80; /* TODO * Octave Shifing * Keyboard Rotation * Multi Key Highlighting * Readme for github * Factor out midi * Better error handling/remove .unwraps * Add Guitar Layout * Consider changing draw_keyboard so that it draws only the key changes and not the whole board * every time. * Correct velocity controls? */ fn get_hexagon(x:i16, y:i16, radius:i16) -> ([i16;6], [i16;6]) { // TODO this function needs to be broken up into a calculate and translate section, we don't // need to redo the sin math every time. let r:f32 = radius as f32; let mut angle:f32 = INCREMENT_ANGLE/2.0; let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { let xo = angle.sin()*r; let yo = angle.cos()*r; angle += INCREMENT_ANGLE; xs[i] = x + xo.round() as i16; ys[i] = y + yo.round() as i16; } return (xs, ys) } fn translate_hexagon(xlist:[i16;6], ylist:[i16;6], x:i16, y:i16) -> ([i16;6], [i16;6]) { let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { xs[i] = xlist[i] + x; ys[i] = ylist[i] + y; } return (xs, ys) } /// Given the x and y locations of a click, return the address of the hexagon /// The logic I'm doing in here is a little crazy. /// By rotating the cordinate grid three times I can figure out the "index" in terms of number of /// hexagons from a the starting point /// This effectivly tesalates the hexagons into 6 triangles, this algorithm gets the location of /// the triangle clicked, then figures out which hexagon that triangle belongs in. fn get_hex_address(xo:f32, yo:f32, hexagon:&HexagonDescription) -> HexAddr { let hex_height = hexagon.half_height as f32; let plane1 = yo / hex_height; let incangle = INCREMENT_ANGLE * -2.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane2 = -y / hex_height; // TODO why did I need to multiply this by two?? let incangle = INCREMENT_ANGLE * -4.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane3 = y / hex_height ; let cord1 = plane1.floor() as i16; let mut cord2 = plane2.floor() as i16; let mut cord3 = plane3.floor() as i16; // left justify the coordinate system for my own sanity while doing this modulo math cord2 -= cord1/2; cord3 += cord1/2 + 1; let mut y = cord1; let mut x = cord2/3; //println!("a: {} b:{} c:{}", cord1, cord2, cord3); if cord1 % 2 == 0 { // white down if cord2 % 3 == 0 { //println!("white"); y+=1; } else if cord3 % 3 == 1 && cord2 % 3 == 1{ //println!("white"); y +=1; } else { //println!("purple"); x+=1; } } else { // white up if cord2 % 3 == 1 { //println!("white"); } else if cord3 % 3 == 0 && cord2 % 3 == 0 { //println!("white"); } else { //println!("purple"); y +=1; if cord2 %3 != 0 { x +=1; } } } //println!("x:{}, y:{}", x, y); HexAddr{x:x, y:y} } fn note_to_color(note: u8, config: &Config) -> Color { // 0 for root, 1 for in key, 2 for sharp or flat let colors = &config.colors; let key = config.root_note; //C C# D D# E F F# G G# A A# B let major_color_mask = [ colors.root, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.in_key_and_penta, // a colors.out_of_key, // a# colors.in_key // b ]; // computed c relative let minor_color_mask = [ colors.in_key_and_penta, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.root, // a colors.out_of_key, // a# colors.in_key // b ]; let index = (note + key ) % 12; if config.is_major { major_color_mask[index as usize] } else { minor_color_mask[index as usize] } } struct Config { colors: ColorProfile, root_note: u8, // 0 for c is_major: bool, hexagon: HexagonDescription, width: u32, height: u32, rows: i16, cols: i16, velocity: u8, } struct ColorProfile { line_color: Color, root: Color, out_of_key: Color, in_key_and_penta: Color, in_key: Color, white: Color, } #[derive(Debug)] struct HexagonDescription { width:i16, height:i16, half_height:i16, radius:i16, x_vec:[i16;6], y_vec:[i16;6], } fn draw_keyboard( renderer:&mut Renderer, font: &Font, config: &Config, keyboard: &Keyboard, pressed_keys: Vec<HexAddr>) -> Result<(),String> { let rows = config.rows; let cols = config.cols; for row in 0..rows { for col in 0..cols { let addr = HexAddr{x:col, y:row}; let is_even = row % 2 == 0; let (mut x_offset, y_offset) = match is_even { true => ((config.hexagon.width + config.hexagon.radius) * col, row * config.hexagon.half_height), false => ((config.hexagon.width + config.hexagon.radius) * col + config.hexagon.radius + config.hexagon.radius/2, row * config.hexagon.half_height), }; x_offset -= config.hexagon.width/2; let (xs, ys) = translate_hexagon(config.hexagon.x_vec, config.hexagon.y_vec, x_offset, y_offset); let key_info = keyboard.get_key_info(addr); let (color, label) = if let Some(key_info) = key_info { (note_to_color(key_info.note, config), key_info.label) } else { (config.colors.line_color, " ".to_string()) }; let polygon_color = match pressed_keys.contains(&addr) { true => config.colors.white, false => color, }; try!(renderer.filled_polygon(&xs, &ys, polygon_color)); try!(renderer.polygon(&xs, &ys, config.colors.line_color)); // TODO cache textures for the hex labels // if we don't have a keyboard then just print the row and column numbers let surface = font.render(label.as_str()).blended(config.colors.line_color).unwrap(); let mut texture = renderer.create_texture_from_surface(&surface).unwrap(); let TextureQuery { width, height, .. } = texture.query(); let label_x = (x_offset as i32 - width as i32/2) as i32; let label_y = (y_offset as i32 - height as i32/2) as i32; let target = Rect::new(label_x, label_y, width, height); try!(renderer.copy(&mut texture, None, Some(target))); } } Ok(()) } struct KeyboardState<'a> { active_presses_map : HashMap<i64, HexAddr>, config: &'a Config, connection_out: &'a mut MidiOutputConnection, } impl<'a> KeyboardState<'a> { fn start_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_ON_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn end_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_OFF_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn on_press(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); self.active_presses_map.insert(oid, addr); self.start_note(addr, keyboard); } fn on_release(&mut self, oid: i64, keyboard: &mut Keyboard) { match self.active_presses_map.remove(&oid) { Some(addr) => self.end_note(addr, keyboard), None => (), } } fn on_move(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); match self.active_presses_map.get(&oid) { None => self.start_note(addr, keyboard), Some(&old_addr) =>
}; self.active_presses_map.insert(oid, addr); } fn get_pressed(&self) -> Vec<HexAddr> { // TODO this iteration is SLOW and this function is called once per hexagon // TODO make this function FAST! let mut vec = Vec::new(); for (_, &value) in &self.active_presses_map { vec.push(value); } vec } } fn get_midi_connection() -> Result<MidiOutputConnection,Box<Error>> { // TODO improve midi selection criteria, maybe pick off of command line. let midi_out = try!(MidiOutput::new("Isomidi")); let out_port: u32 = match midi_out.port_count() { 0 => return Err("no output port found".into()), _ => { println!("Choosing the last available output port: {}", midi_out.port_name(0).unwrap()); midi_out.port_count() -1 } }; println!("\nOpening connection"); Ok(try!(midi_out.connect(out_port, "isomidi").map_err(|e| e.kind()))) } fn main() { ///////////////////////// ///// CONSTANTS ///////////////////////// // https://coolors.co/f4d06f-ff8811-9dd9d2-fff8f0-392f5a let radius = 75; let screen_height = 1200; let screen_width = 1800; let ttf_font_bytes = include_bytes!("FantasqueSansMono-Regular.ttf"); let mut connection_out = get_midi_connection().unwrap(); ///////////////////////// //// SDL Setup ///////////////////////// let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); video_subsystem.gl_attr().set_multisample_samples(8); let ttf_context = sdl2::ttf::init().unwrap(); let window = video_subsystem.window("Isomidi", screen_width, screen_height) .position_centered() .opengl() .resizable() .build() .unwrap(); let mut renderer = window.renderer().build().unwrap(); let font_rwop = RWops::from_bytes(ttf_font_bytes).unwrap(); let keyboard_font = ttf_context.load_font_from_rwops(font_rwop, 20).unwrap(); // be bold // keyboard_font.set_style(sdl2::ttf::STYLE_BOLD); // Draw a black screen renderer.set_draw_color(Color::RGB(0, 0, 0)); renderer.clear(); renderer.present(); let mut event_pump = sdl_context.event_pump().unwrap(); ///////////////////////// //// Load the keyboard ///////////////////////// //let mut keyboard = JammerKeyboard {}; 'config: loop { ///////////////////////// ///// Derived Constants ///////////////////////// let colors = ColorProfile { line_color: Color::RGB(0, 0, 0), root : Color::RGB(0xf4,0xD0,0x6F), // root out_of_key : Color::RGB(0xff,0x88,0x11), // sharp/flat in_key_and_penta : Color::RGB(0x9D,0x9D,0xD2), // in key & pentatonic in_key : Color::RGB(0xba, 0xba, 0xdf), // in key white : Color::RGB(0x39,0x2F,0x5A), }; let (hexagon_x, hexagon_y) = get_hexagon(0,0,radius); let half_height = ((INCREMENT_ANGLE).sin() * radius as f32).round() as i16; let hexagon = HexagonDescription { width : (radius * 2 ) as i16, half_height: half_height, height : half_height * 2, radius: radius, x_vec: hexagon_x, y_vec: hexagon_y, }; println!("hexagon: {:?}", hexagon); let size = { let mut window = renderer.window_mut().unwrap(); window.size() }; let rows = (size.1 as i16/hexagon.height as i16) * 2 + 3; let cols = (size.0 as i16/(hexagon.width + hexagon.radius)) as i16 + 2; let mut config = Config { colors: colors, root_note: 0, // C is_major: true, hexagon: hexagon, width: size.0, height: size.1, rows: rows, cols: cols, velocity: 70, }; let mut keyboard = HarmonicKeyboard {}; let mut keyboard_state = KeyboardState { config: &config, active_presses_map: HashMap::new(), connection_out: &mut connection_out, }; ///////////////////////// //// Main loop ///////////////////////// let mut frame_count = 0; let mut last_time = Instant::now(); let mut first_run = true; 'render: loop { // TODO sleep till next event? let sleep_time = Duration::from_millis(10); thread::sleep(sleep_time); // TODO: How are we going to do multi finger tracking and mouse tracking? // list of active fingerids / mouse id plus the current hex addr. // on hex addr change fire on_key_press let mut trigger_draw = false; if first_run { trigger_draw = true; first_run = false } for event in event_pump.poll_iter() { match event { Event::Quit {..} | Event::KeyDown { keycode: Some(Keycode::Escape), .. } => { println!("Exiting"); break 'config }, Event::MouseButtonDown {x, y, ..} => { keyboard_state.on_press(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::MouseButtonUp {..} => { keyboard_state.on_release(MOUSE_OID, &mut keyboard); trigger_draw = true; }, Event::MouseMotion {x, y, mousestate, ..} => { // track only if left mouse button is down if mousestate.left() { keyboard_state.on_move(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; } }, Event::FingerDown {x, y, finger_id, ..} => { keyboard_state.on_press(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerMotion {x, y, finger_id, ..} => { keyboard_state.on_move(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerUp {finger_id, ..} => { keyboard_state.on_release(finger_id, &mut keyboard); trigger_draw = true; }, Event::Window {win_event, ..} => { match win_event { WindowEvent::SizeChanged (width, height) => { // breaks out of the render loop and reconfigures the application break 'render } _ => {} } }, _ => {} } } if trigger_draw { renderer.set_draw_color(config.colors.line_color); renderer.clear(); draw_keyboard(&mut renderer, &keyboard_font, &config, &keyboard, keyboard_state.get_pressed()).unwrap(); } renderer.present(); // TODO render a stats section in app frame_count += 1; if last_time.elapsed() > Duration::from_secs(1) { println!("fps {}", frame_count); frame_count = 0; last_time = Instant::now(); } } } }
{ if addr != old_addr { self.start_note(addr, keyboard); self.end_note(old_addr, keyboard); } }
conditional_block
main.rs
extern crate sdl2; extern crate midir; mod keyboard; use sdl2::rect::Rect; use sdl2::render::TextureQuery; use std::{thread}; use std::time::{Instant,Duration}; use sdl2::pixels::Color; use sdl2::ttf::Font; use sdl2::event::Event; use sdl2::event::WindowEvent; use sdl2::keyboard::Keycode; use sdl2::gfx::primitives::DrawRenderer; use sdl2::render::Renderer; use std::f32; use std::f32::consts::PI; use std::collections::HashMap; use keyboard::Keyboard; use keyboard::HexAddr; use keyboard::HexKey; use keyboard::HarmonicKeyboard; use keyboard::JammerKeyboard; use midir::{MidiOutput, MidiOutputConnection}; use std::error::Error; use sdl2::rwops::RWops; const INCREMENT_ANGLE:f32 = 2.0*PI/6.0; // 60 degrees in radians const MOUSE_OID:i64 = -1; const NOTE_ON_MSG: u8 = 0x90; const NOTE_OFF_MSG: u8 = 0x80; /* TODO * Octave Shifing * Keyboard Rotation * Multi Key Highlighting * Readme for github * Factor out midi * Better error handling/remove .unwraps * Add Guitar Layout * Consider changing draw_keyboard so that it draws only the key changes and not the whole board * every time. * Correct velocity controls? */ fn get_hexagon(x:i16, y:i16, radius:i16) -> ([i16;6], [i16;6]) { // TODO this function needs to be broken up into a calculate and translate section, we don't // need to redo the sin math every time. let r:f32 = radius as f32; let mut angle:f32 = INCREMENT_ANGLE/2.0; let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { let xo = angle.sin()*r; let yo = angle.cos()*r; angle += INCREMENT_ANGLE; xs[i] = x + xo.round() as i16; ys[i] = y + yo.round() as i16; } return (xs, ys) } fn translate_hexagon(xlist:[i16;6], ylist:[i16;6], x:i16, y:i16) -> ([i16;6], [i16;6]) { let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { xs[i] = xlist[i] + x; ys[i] = ylist[i] + y; } return (xs, ys) } /// Given the x and y locations of a click, return the address of the hexagon /// The logic I'm doing in here is a little crazy. /// By rotating the cordinate grid three times I can figure out the "index" in terms of number of /// hexagons from a the starting point /// This effectivly tesalates the hexagons into 6 triangles, this algorithm gets the location of /// the triangle clicked, then figures out which hexagon that triangle belongs in. fn
(xo:f32, yo:f32, hexagon:&HexagonDescription) -> HexAddr { let hex_height = hexagon.half_height as f32; let plane1 = yo / hex_height; let incangle = INCREMENT_ANGLE * -2.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane2 = -y / hex_height; // TODO why did I need to multiply this by two?? let incangle = INCREMENT_ANGLE * -4.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane3 = y / hex_height ; let cord1 = plane1.floor() as i16; let mut cord2 = plane2.floor() as i16; let mut cord3 = plane3.floor() as i16; // left justify the coordinate system for my own sanity while doing this modulo math cord2 -= cord1/2; cord3 += cord1/2 + 1; let mut y = cord1; let mut x = cord2/3; //println!("a: {} b:{} c:{}", cord1, cord2, cord3); if cord1 % 2 == 0 { // white down if cord2 % 3 == 0 { //println!("white"); y+=1; } else if cord3 % 3 == 1 && cord2 % 3 == 1{ //println!("white"); y +=1; } else { //println!("purple"); x+=1; } } else { // white up if cord2 % 3 == 1 { //println!("white"); } else if cord3 % 3 == 0 && cord2 % 3 == 0 { //println!("white"); } else { //println!("purple"); y +=1; if cord2 %3 != 0 { x +=1; } } } //println!("x:{}, y:{}", x, y); HexAddr{x:x, y:y} } fn note_to_color(note: u8, config: &Config) -> Color { // 0 for root, 1 for in key, 2 for sharp or flat let colors = &config.colors; let key = config.root_note; //C C# D D# E F F# G G# A A# B let major_color_mask = [ colors.root, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.in_key_and_penta, // a colors.out_of_key, // a# colors.in_key // b ]; // computed c relative let minor_color_mask = [ colors.in_key_and_penta, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.root, // a colors.out_of_key, // a# colors.in_key // b ]; let index = (note + key ) % 12; if config.is_major { major_color_mask[index as usize] } else { minor_color_mask[index as usize] } } struct Config { colors: ColorProfile, root_note: u8, // 0 for c is_major: bool, hexagon: HexagonDescription, width: u32, height: u32, rows: i16, cols: i16, velocity: u8, } struct ColorProfile { line_color: Color, root: Color, out_of_key: Color, in_key_and_penta: Color, in_key: Color, white: Color, } #[derive(Debug)] struct HexagonDescription { width:i16, height:i16, half_height:i16, radius:i16, x_vec:[i16;6], y_vec:[i16;6], } fn draw_keyboard( renderer:&mut Renderer, font: &Font, config: &Config, keyboard: &Keyboard, pressed_keys: Vec<HexAddr>) -> Result<(),String> { let rows = config.rows; let cols = config.cols; for row in 0..rows { for col in 0..cols { let addr = HexAddr{x:col, y:row}; let is_even = row % 2 == 0; let (mut x_offset, y_offset) = match is_even { true => ((config.hexagon.width + config.hexagon.radius) * col, row * config.hexagon.half_height), false => ((config.hexagon.width + config.hexagon.radius) * col + config.hexagon.radius + config.hexagon.radius/2, row * config.hexagon.half_height), }; x_offset -= config.hexagon.width/2; let (xs, ys) = translate_hexagon(config.hexagon.x_vec, config.hexagon.y_vec, x_offset, y_offset); let key_info = keyboard.get_key_info(addr); let (color, label) = if let Some(key_info) = key_info { (note_to_color(key_info.note, config), key_info.label) } else { (config.colors.line_color, " ".to_string()) }; let polygon_color = match pressed_keys.contains(&addr) { true => config.colors.white, false => color, }; try!(renderer.filled_polygon(&xs, &ys, polygon_color)); try!(renderer.polygon(&xs, &ys, config.colors.line_color)); // TODO cache textures for the hex labels // if we don't have a keyboard then just print the row and column numbers let surface = font.render(label.as_str()).blended(config.colors.line_color).unwrap(); let mut texture = renderer.create_texture_from_surface(&surface).unwrap(); let TextureQuery { width, height, .. } = texture.query(); let label_x = (x_offset as i32 - width as i32/2) as i32; let label_y = (y_offset as i32 - height as i32/2) as i32; let target = Rect::new(label_x, label_y, width, height); try!(renderer.copy(&mut texture, None, Some(target))); } } Ok(()) } struct KeyboardState<'a> { active_presses_map : HashMap<i64, HexAddr>, config: &'a Config, connection_out: &'a mut MidiOutputConnection, } impl<'a> KeyboardState<'a> { fn start_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_ON_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn end_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_OFF_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn on_press(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); self.active_presses_map.insert(oid, addr); self.start_note(addr, keyboard); } fn on_release(&mut self, oid: i64, keyboard: &mut Keyboard) { match self.active_presses_map.remove(&oid) { Some(addr) => self.end_note(addr, keyboard), None => (), } } fn on_move(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); match self.active_presses_map.get(&oid) { None => self.start_note(addr, keyboard), Some(&old_addr) => { if addr != old_addr { self.start_note(addr, keyboard); self.end_note(old_addr, keyboard); } } }; self.active_presses_map.insert(oid, addr); } fn get_pressed(&self) -> Vec<HexAddr> { // TODO this iteration is SLOW and this function is called once per hexagon // TODO make this function FAST! let mut vec = Vec::new(); for (_, &value) in &self.active_presses_map { vec.push(value); } vec } } fn get_midi_connection() -> Result<MidiOutputConnection,Box<Error>> { // TODO improve midi selection criteria, maybe pick off of command line. let midi_out = try!(MidiOutput::new("Isomidi")); let out_port: u32 = match midi_out.port_count() { 0 => return Err("no output port found".into()), _ => { println!("Choosing the last available output port: {}", midi_out.port_name(0).unwrap()); midi_out.port_count() -1 } }; println!("\nOpening connection"); Ok(try!(midi_out.connect(out_port, "isomidi").map_err(|e| e.kind()))) } fn main() { ///////////////////////// ///// CONSTANTS ///////////////////////// // https://coolors.co/f4d06f-ff8811-9dd9d2-fff8f0-392f5a let radius = 75; let screen_height = 1200; let screen_width = 1800; let ttf_font_bytes = include_bytes!("FantasqueSansMono-Regular.ttf"); let mut connection_out = get_midi_connection().unwrap(); ///////////////////////// //// SDL Setup ///////////////////////// let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); video_subsystem.gl_attr().set_multisample_samples(8); let ttf_context = sdl2::ttf::init().unwrap(); let window = video_subsystem.window("Isomidi", screen_width, screen_height) .position_centered() .opengl() .resizable() .build() .unwrap(); let mut renderer = window.renderer().build().unwrap(); let font_rwop = RWops::from_bytes(ttf_font_bytes).unwrap(); let keyboard_font = ttf_context.load_font_from_rwops(font_rwop, 20).unwrap(); // be bold // keyboard_font.set_style(sdl2::ttf::STYLE_BOLD); // Draw a black screen renderer.set_draw_color(Color::RGB(0, 0, 0)); renderer.clear(); renderer.present(); let mut event_pump = sdl_context.event_pump().unwrap(); ///////////////////////// //// Load the keyboard ///////////////////////// //let mut keyboard = JammerKeyboard {}; 'config: loop { ///////////////////////// ///// Derived Constants ///////////////////////// let colors = ColorProfile { line_color: Color::RGB(0, 0, 0), root : Color::RGB(0xf4,0xD0,0x6F), // root out_of_key : Color::RGB(0xff,0x88,0x11), // sharp/flat in_key_and_penta : Color::RGB(0x9D,0x9D,0xD2), // in key & pentatonic in_key : Color::RGB(0xba, 0xba, 0xdf), // in key white : Color::RGB(0x39,0x2F,0x5A), }; let (hexagon_x, hexagon_y) = get_hexagon(0,0,radius); let half_height = ((INCREMENT_ANGLE).sin() * radius as f32).round() as i16; let hexagon = HexagonDescription { width : (radius * 2 ) as i16, half_height: half_height, height : half_height * 2, radius: radius, x_vec: hexagon_x, y_vec: hexagon_y, }; println!("hexagon: {:?}", hexagon); let size = { let mut window = renderer.window_mut().unwrap(); window.size() }; let rows = (size.1 as i16/hexagon.height as i16) * 2 + 3; let cols = (size.0 as i16/(hexagon.width + hexagon.radius)) as i16 + 2; let mut config = Config { colors: colors, root_note: 0, // C is_major: true, hexagon: hexagon, width: size.0, height: size.1, rows: rows, cols: cols, velocity: 70, }; let mut keyboard = HarmonicKeyboard {}; let mut keyboard_state = KeyboardState { config: &config, active_presses_map: HashMap::new(), connection_out: &mut connection_out, }; ///////////////////////// //// Main loop ///////////////////////// let mut frame_count = 0; let mut last_time = Instant::now(); let mut first_run = true; 'render: loop { // TODO sleep till next event? let sleep_time = Duration::from_millis(10); thread::sleep(sleep_time); // TODO: How are we going to do multi finger tracking and mouse tracking? // list of active fingerids / mouse id plus the current hex addr. // on hex addr change fire on_key_press let mut trigger_draw = false; if first_run { trigger_draw = true; first_run = false } for event in event_pump.poll_iter() { match event { Event::Quit {..} | Event::KeyDown { keycode: Some(Keycode::Escape), .. } => { println!("Exiting"); break 'config }, Event::MouseButtonDown {x, y, ..} => { keyboard_state.on_press(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::MouseButtonUp {..} => { keyboard_state.on_release(MOUSE_OID, &mut keyboard); trigger_draw = true; }, Event::MouseMotion {x, y, mousestate, ..} => { // track only if left mouse button is down if mousestate.left() { keyboard_state.on_move(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; } }, Event::FingerDown {x, y, finger_id, ..} => { keyboard_state.on_press(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerMotion {x, y, finger_id, ..} => { keyboard_state.on_move(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerUp {finger_id, ..} => { keyboard_state.on_release(finger_id, &mut keyboard); trigger_draw = true; }, Event::Window {win_event, ..} => { match win_event { WindowEvent::SizeChanged (width, height) => { // breaks out of the render loop and reconfigures the application break 'render } _ => {} } }, _ => {} } } if trigger_draw { renderer.set_draw_color(config.colors.line_color); renderer.clear(); draw_keyboard(&mut renderer, &keyboard_font, &config, &keyboard, keyboard_state.get_pressed()).unwrap(); } renderer.present(); // TODO render a stats section in app frame_count += 1; if last_time.elapsed() > Duration::from_secs(1) { println!("fps {}", frame_count); frame_count = 0; last_time = Instant::now(); } } } }
get_hex_address
identifier_name
main.rs
extern crate sdl2; extern crate midir; mod keyboard; use sdl2::rect::Rect; use sdl2::render::TextureQuery; use std::{thread}; use std::time::{Instant,Duration}; use sdl2::pixels::Color; use sdl2::ttf::Font; use sdl2::event::Event; use sdl2::event::WindowEvent; use sdl2::keyboard::Keycode; use sdl2::gfx::primitives::DrawRenderer; use sdl2::render::Renderer; use std::f32; use std::f32::consts::PI; use std::collections::HashMap; use keyboard::Keyboard; use keyboard::HexAddr; use keyboard::HexKey; use keyboard::HarmonicKeyboard; use keyboard::JammerKeyboard; use midir::{MidiOutput, MidiOutputConnection}; use std::error::Error; use sdl2::rwops::RWops; const INCREMENT_ANGLE:f32 = 2.0*PI/6.0; // 60 degrees in radians const MOUSE_OID:i64 = -1; const NOTE_ON_MSG: u8 = 0x90; const NOTE_OFF_MSG: u8 = 0x80; /* TODO * Octave Shifing * Keyboard Rotation * Multi Key Highlighting * Readme for github * Factor out midi * Better error handling/remove .unwraps * Add Guitar Layout * Consider changing draw_keyboard so that it draws only the key changes and not the whole board * every time. * Correct velocity controls? */ fn get_hexagon(x:i16, y:i16, radius:i16) -> ([i16;6], [i16;6])
fn translate_hexagon(xlist:[i16;6], ylist:[i16;6], x:i16, y:i16) -> ([i16;6], [i16;6]) { let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { xs[i] = xlist[i] + x; ys[i] = ylist[i] + y; } return (xs, ys) } /// Given the x and y locations of a click, return the address of the hexagon /// The logic I'm doing in here is a little crazy. /// By rotating the cordinate grid three times I can figure out the "index" in terms of number of /// hexagons from a the starting point /// This effectivly tesalates the hexagons into 6 triangles, this algorithm gets the location of /// the triangle clicked, then figures out which hexagon that triangle belongs in. fn get_hex_address(xo:f32, yo:f32, hexagon:&HexagonDescription) -> HexAddr { let hex_height = hexagon.half_height as f32; let plane1 = yo / hex_height; let incangle = INCREMENT_ANGLE * -2.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane2 = -y / hex_height; // TODO why did I need to multiply this by two?? let incangle = INCREMENT_ANGLE * -4.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane3 = y / hex_height ; let cord1 = plane1.floor() as i16; let mut cord2 = plane2.floor() as i16; let mut cord3 = plane3.floor() as i16; // left justify the coordinate system for my own sanity while doing this modulo math cord2 -= cord1/2; cord3 += cord1/2 + 1; let mut y = cord1; let mut x = cord2/3; //println!("a: {} b:{} c:{}", cord1, cord2, cord3); if cord1 % 2 == 0 { // white down if cord2 % 3 == 0 { //println!("white"); y+=1; } else if cord3 % 3 == 1 && cord2 % 3 == 1{ //println!("white"); y +=1; } else { //println!("purple"); x+=1; } } else { // white up if cord2 % 3 == 1 { //println!("white"); } else if cord3 % 3 == 0 && cord2 % 3 == 0 { //println!("white"); } else { //println!("purple"); y +=1; if cord2 %3 != 0 { x +=1; } } } //println!("x:{}, y:{}", x, y); HexAddr{x:x, y:y} } fn note_to_color(note: u8, config: &Config) -> Color { // 0 for root, 1 for in key, 2 for sharp or flat let colors = &config.colors; let key = config.root_note; //C C# D D# E F F# G G# A A# B let major_color_mask = [ colors.root, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.in_key_and_penta, // a colors.out_of_key, // a# colors.in_key // b ]; // computed c relative let minor_color_mask = [ colors.in_key_and_penta, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.root, // a colors.out_of_key, // a# colors.in_key // b ]; let index = (note + key ) % 12; if config.is_major { major_color_mask[index as usize] } else { minor_color_mask[index as usize] } } struct Config { colors: ColorProfile, root_note: u8, // 0 for c is_major: bool, hexagon: HexagonDescription, width: u32, height: u32, rows: i16, cols: i16, velocity: u8, } struct ColorProfile { line_color: Color, root: Color, out_of_key: Color, in_key_and_penta: Color, in_key: Color, white: Color, } #[derive(Debug)] struct HexagonDescription { width:i16, height:i16, half_height:i16, radius:i16, x_vec:[i16;6], y_vec:[i16;6], } fn draw_keyboard( renderer:&mut Renderer, font: &Font, config: &Config, keyboard: &Keyboard, pressed_keys: Vec<HexAddr>) -> Result<(),String> { let rows = config.rows; let cols = config.cols; for row in 0..rows { for col in 0..cols { let addr = HexAddr{x:col, y:row}; let is_even = row % 2 == 0; let (mut x_offset, y_offset) = match is_even { true => ((config.hexagon.width + config.hexagon.radius) * col, row * config.hexagon.half_height), false => ((config.hexagon.width + config.hexagon.radius) * col + config.hexagon.radius + config.hexagon.radius/2, row * config.hexagon.half_height), }; x_offset -= config.hexagon.width/2; let (xs, ys) = translate_hexagon(config.hexagon.x_vec, config.hexagon.y_vec, x_offset, y_offset); let key_info = keyboard.get_key_info(addr); let (color, label) = if let Some(key_info) = key_info { (note_to_color(key_info.note, config), key_info.label) } else { (config.colors.line_color, " ".to_string()) }; let polygon_color = match pressed_keys.contains(&addr) { true => config.colors.white, false => color, }; try!(renderer.filled_polygon(&xs, &ys, polygon_color)); try!(renderer.polygon(&xs, &ys, config.colors.line_color)); // TODO cache textures for the hex labels // if we don't have a keyboard then just print the row and column numbers let surface = font.render(label.as_str()).blended(config.colors.line_color).unwrap(); let mut texture = renderer.create_texture_from_surface(&surface).unwrap(); let TextureQuery { width, height, .. } = texture.query(); let label_x = (x_offset as i32 - width as i32/2) as i32; let label_y = (y_offset as i32 - height as i32/2) as i32; let target = Rect::new(label_x, label_y, width, height); try!(renderer.copy(&mut texture, None, Some(target))); } } Ok(()) } struct KeyboardState<'a> { active_presses_map : HashMap<i64, HexAddr>, config: &'a Config, connection_out: &'a mut MidiOutputConnection, } impl<'a> KeyboardState<'a> { fn start_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_ON_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn end_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_OFF_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn on_press(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); self.active_presses_map.insert(oid, addr); self.start_note(addr, keyboard); } fn on_release(&mut self, oid: i64, keyboard: &mut Keyboard) { match self.active_presses_map.remove(&oid) { Some(addr) => self.end_note(addr, keyboard), None => (), } } fn on_move(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); match self.active_presses_map.get(&oid) { None => self.start_note(addr, keyboard), Some(&old_addr) => { if addr != old_addr { self.start_note(addr, keyboard); self.end_note(old_addr, keyboard); } } }; self.active_presses_map.insert(oid, addr); } fn get_pressed(&self) -> Vec<HexAddr> { // TODO this iteration is SLOW and this function is called once per hexagon // TODO make this function FAST! let mut vec = Vec::new(); for (_, &value) in &self.active_presses_map { vec.push(value); } vec } } fn get_midi_connection() -> Result<MidiOutputConnection,Box<Error>> { // TODO improve midi selection criteria, maybe pick off of command line. let midi_out = try!(MidiOutput::new("Isomidi")); let out_port: u32 = match midi_out.port_count() { 0 => return Err("no output port found".into()), _ => { println!("Choosing the last available output port: {}", midi_out.port_name(0).unwrap()); midi_out.port_count() -1 } }; println!("\nOpening connection"); Ok(try!(midi_out.connect(out_port, "isomidi").map_err(|e| e.kind()))) } fn main() { ///////////////////////// ///// CONSTANTS ///////////////////////// // https://coolors.co/f4d06f-ff8811-9dd9d2-fff8f0-392f5a let radius = 75; let screen_height = 1200; let screen_width = 1800; let ttf_font_bytes = include_bytes!("FantasqueSansMono-Regular.ttf"); let mut connection_out = get_midi_connection().unwrap(); ///////////////////////// //// SDL Setup ///////////////////////// let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); video_subsystem.gl_attr().set_multisample_samples(8); let ttf_context = sdl2::ttf::init().unwrap(); let window = video_subsystem.window("Isomidi", screen_width, screen_height) .position_centered() .opengl() .resizable() .build() .unwrap(); let mut renderer = window.renderer().build().unwrap(); let font_rwop = RWops::from_bytes(ttf_font_bytes).unwrap(); let keyboard_font = ttf_context.load_font_from_rwops(font_rwop, 20).unwrap(); // be bold // keyboard_font.set_style(sdl2::ttf::STYLE_BOLD); // Draw a black screen renderer.set_draw_color(Color::RGB(0, 0, 0)); renderer.clear(); renderer.present(); let mut event_pump = sdl_context.event_pump().unwrap(); ///////////////////////// //// Load the keyboard ///////////////////////// //let mut keyboard = JammerKeyboard {}; 'config: loop { ///////////////////////// ///// Derived Constants ///////////////////////// let colors = ColorProfile { line_color: Color::RGB(0, 0, 0), root : Color::RGB(0xf4,0xD0,0x6F), // root out_of_key : Color::RGB(0xff,0x88,0x11), // sharp/flat in_key_and_penta : Color::RGB(0x9D,0x9D,0xD2), // in key & pentatonic in_key : Color::RGB(0xba, 0xba, 0xdf), // in key white : Color::RGB(0x39,0x2F,0x5A), }; let (hexagon_x, hexagon_y) = get_hexagon(0,0,radius); let half_height = ((INCREMENT_ANGLE).sin() * radius as f32).round() as i16; let hexagon = HexagonDescription { width : (radius * 2 ) as i16, half_height: half_height, height : half_height * 2, radius: radius, x_vec: hexagon_x, y_vec: hexagon_y, }; println!("hexagon: {:?}", hexagon); let size = { let mut window = renderer.window_mut().unwrap(); window.size() }; let rows = (size.1 as i16/hexagon.height as i16) * 2 + 3; let cols = (size.0 as i16/(hexagon.width + hexagon.radius)) as i16 + 2; let mut config = Config { colors: colors, root_note: 0, // C is_major: true, hexagon: hexagon, width: size.0, height: size.1, rows: rows, cols: cols, velocity: 70, }; let mut keyboard = HarmonicKeyboard {}; let mut keyboard_state = KeyboardState { config: &config, active_presses_map: HashMap::new(), connection_out: &mut connection_out, }; ///////////////////////// //// Main loop ///////////////////////// let mut frame_count = 0; let mut last_time = Instant::now(); let mut first_run = true; 'render: loop { // TODO sleep till next event? let sleep_time = Duration::from_millis(10); thread::sleep(sleep_time); // TODO: How are we going to do multi finger tracking and mouse tracking? // list of active fingerids / mouse id plus the current hex addr. // on hex addr change fire on_key_press let mut trigger_draw = false; if first_run { trigger_draw = true; first_run = false } for event in event_pump.poll_iter() { match event { Event::Quit {..} | Event::KeyDown { keycode: Some(Keycode::Escape), .. } => { println!("Exiting"); break 'config }, Event::MouseButtonDown {x, y, ..} => { keyboard_state.on_press(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::MouseButtonUp {..} => { keyboard_state.on_release(MOUSE_OID, &mut keyboard); trigger_draw = true; }, Event::MouseMotion {x, y, mousestate, ..} => { // track only if left mouse button is down if mousestate.left() { keyboard_state.on_move(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; } }, Event::FingerDown {x, y, finger_id, ..} => { keyboard_state.on_press(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerMotion {x, y, finger_id, ..} => { keyboard_state.on_move(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerUp {finger_id, ..} => { keyboard_state.on_release(finger_id, &mut keyboard); trigger_draw = true; }, Event::Window {win_event, ..} => { match win_event { WindowEvent::SizeChanged (width, height) => { // breaks out of the render loop and reconfigures the application break 'render } _ => {} } }, _ => {} } } if trigger_draw { renderer.set_draw_color(config.colors.line_color); renderer.clear(); draw_keyboard(&mut renderer, &keyboard_font, &config, &keyboard, keyboard_state.get_pressed()).unwrap(); } renderer.present(); // TODO render a stats section in app frame_count += 1; if last_time.elapsed() > Duration::from_secs(1) { println!("fps {}", frame_count); frame_count = 0; last_time = Instant::now(); } } } }
{ // TODO this function needs to be broken up into a calculate and translate section, we don't // need to redo the sin math every time. let r:f32 = radius as f32; let mut angle:f32 = INCREMENT_ANGLE/2.0; let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { let xo = angle.sin()*r; let yo = angle.cos()*r; angle += INCREMENT_ANGLE; xs[i] = x + xo.round() as i16; ys[i] = y + yo.round() as i16; } return (xs, ys) }
identifier_body
render.rs
use crate::{ html::{Attribute, Children, Element, EventListener, EventToMessage, Html}, program::Program, }; use itertools::{EitherOrBoth, Itertools}; use std::fmt::Debug; use std::rc::Rc; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; pub struct Renderer<Model, Msg> { program: Rc<Program<Model, Msg>>, to_remove: Vec<(web_sys::Node, web_sys::Node)>, } fn eiter_or_both_to_option_tuple<T>(pair: EitherOrBoth<T, T>) -> (Option<T>, Option<T>) { use itertools::EitherOrBoth::{Both, Left, Right}; match pair { Both(a, b) => (Some(a), Some(b)), Left(a) => (Some(a), None), Right(b) => (None, Some(b)), } } impl<Model, Msg> Renderer<Model, Msg> where Msg: PartialEq + Debug + Clone + 'static, Model: Debug + Clone + 'static, { pub fn render( root: &web_sys::Node, program: &Rc<Program<Model, Msg>>, new_tree: &Html<Msg>, old_tree: &Option<Html<Msg>>, ) -> Result<(), JsValue> { let mut renderer = Renderer { program: program.clone(), to_remove: vec![], }; // TODO: We should probably not assume that the number here is 0 renderer.update_element(root, Some(new_tree), old_tree.as_ref(), 0)?; for (parent, child) in &renderer.to_remove { parent.remove_child(&child)?; } Ok(()) } fn update_element( &mut self, parent: &web_sys::Node, new: Option<&Html<Msg>>, old: Option<&Html<Msg>>, index: u32, ) -> Result<(), JsValue> { match (old, new) { (None, Some(new_html)) => { // Node is added parent.append_child(&self.create_node(new_html)?)?; } (Some(_removed), None) => { // Node is removed if let Some(child) = parent.child_nodes().item(index) { // Don't remove childs until after every iteration is finished. If not, the // indexes will not point to the correct nodes anymore self.to_remove.push((parent.clone(), child)); } else { // console_log!( // "Could not find node with index {} when removing {}", // index, // removed.to_html_text(0) // ); } } (Some(old), Some(new)) => match (old, new) { (Html::Element(old_tag), Html::Element(new_tag)) if old_tag.name == new_tag.name && old_tag.key() == new_tag.key() => { let current_node: web_sys::Element = match parent.child_nodes().item(index) { Some(n) => n.dyn_into()?, None => { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index ))); } }; // We have a node (current_node) that has changed from old_tag to new_tag, though // the tag is still the same. This means we need to diff children and attributes // First we diff attributes // We start by removing the ones that are no longer active for old_attr in &old_tag.attrs { let new_attr = new_tag.attrs.iter().find(|e| e == &old_attr); if new_attr.is_none() { remove_attribute(&current_node, old_attr)?; } else if let Attribute::Event(old_listener) = old_attr { if let Some(Attribute::Event(new_listener)) = new_attr { if let Some(js_closure) = old_listener.js_closure.0.borrow_mut().take() { new_listener.js_closure.0.replace(Some(js_closure)); } } } } // Then we add the ones that are added for attr in &new_tag.attrs { if !old_tag.attrs.contains(attr) { self.add_attribute(&current_node, attr)?; } } if let (Children::Nodes(old_children), Children::Nodes(new_children)) = (&old_tag.children, &new_tag.children) { for (child_index, pair) in old_children .iter() .zip_longest(new_children.iter()) .enumerate() { let (old_child, new_child) = eiter_or_both_to_option_tuple(pair); self.update_element( &current_node, new_child, old_child, child_index as u32, )?; } } } (Html::Text(s1), Html::Text(s2)) => { if s1 != s2 { if let Some(child) = parent.child_nodes().item(index) { child.set_text_content(Some(&s2)); } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } } _ => { if let Some(child) = parent.child_nodes().item(index) { parent.replace_child(&self.create_node(new)?, &child)?; } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } }, (None, None) => { // Should never happen, but if it happens we can just do nothing and it will be okay } } Ok(()) } fn create_node(&self, input: &Html<Msg>) -> Result<web_sys::Node, JsValue> { match input { Html::Element(Element { name, attrs, children, .. }) => { let el = self.program.browser.document.create_element(&name)?; for attr in attrs { self.add_attribute(&el, attr)?; } let node: web_sys::Node = el.into(); if let Children::Nodes(children) = children { for child in children { let child_node = self.create_node(&child)?; node.append_child(&child_node)?; } } Ok(node) } Html::Text(text) => { let node = self.program.browser.document.create_text_node(&text); Ok(node.into()) } } } fn add_attribute( &self, node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => Ok(()), Attribute::Text(key, value) => node.set_attribute(&key, &value), Attribute::Bool(key) => node.set_attribute(&key, "true"), Attribute::Event(EventListener { type_, to_message, stop_propagation, prevent_default, js_closure, }) => { let to_message = to_message.clone(); let program = self.program.clone(); let stop_propagation = *stop_propagation; let prevent_default = *prevent_default; let closure = Closure::wrap(Box::new(move |event: web_sys::Event| { if prevent_default { event.prevent_default(); } if stop_propagation { event.stop_propagation(); } let result = match &to_message { EventToMessage::StaticMsg(msg) => Program::dispatch(&program, msg), }; if let Err(error) = result { log::error!("{:#?}", error); } }) as Box<Fn(_)>);
node_et .add_event_listener_with_callback(&type_, closure.as_ref().unchecked_ref())?; let ret = js_closure.0.replace(Some(closure)); if ret.is_some() { log::warn!("to_message did already have a closure???"); } Ok(()) } } } } fn remove_attribute<Msg>( node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => {} // TODO: I think I know why elm normalizes before adding and removing attributes. We should probably do the same Attribute::Text(key, _) => { node.remove_attribute(key)?; } Attribute::Bool(key) => { node.remove_attribute(key)?; } Attribute::Event(EventListener { type_, js_closure, .. }) => { if let Some(closure) = js_closure.0.replace(None) { let node_et: &web_sys::EventTarget = &node; node_et.remove_event_listener_with_callback( &type_, closure.as_ref().unchecked_ref(), )?; } else { log::warn!("Could not get a function to remove listener"); } } } Ok(()) }
let node_et: &web_sys::EventTarget = &node;
random_line_split
render.rs
use crate::{ html::{Attribute, Children, Element, EventListener, EventToMessage, Html}, program::Program, }; use itertools::{EitherOrBoth, Itertools}; use std::fmt::Debug; use std::rc::Rc; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; pub struct Renderer<Model, Msg> { program: Rc<Program<Model, Msg>>, to_remove: Vec<(web_sys::Node, web_sys::Node)>, } fn eiter_or_both_to_option_tuple<T>(pair: EitherOrBoth<T, T>) -> (Option<T>, Option<T>) { use itertools::EitherOrBoth::{Both, Left, Right}; match pair { Both(a, b) => (Some(a), Some(b)), Left(a) => (Some(a), None), Right(b) => (None, Some(b)), } } impl<Model, Msg> Renderer<Model, Msg> where Msg: PartialEq + Debug + Clone + 'static, Model: Debug + Clone + 'static, { pub fn render( root: &web_sys::Node, program: &Rc<Program<Model, Msg>>, new_tree: &Html<Msg>, old_tree: &Option<Html<Msg>>, ) -> Result<(), JsValue> { let mut renderer = Renderer { program: program.clone(), to_remove: vec![], }; // TODO: We should probably not assume that the number here is 0 renderer.update_element(root, Some(new_tree), old_tree.as_ref(), 0)?; for (parent, child) in &renderer.to_remove { parent.remove_child(&child)?; } Ok(()) } fn
( &mut self, parent: &web_sys::Node, new: Option<&Html<Msg>>, old: Option<&Html<Msg>>, index: u32, ) -> Result<(), JsValue> { match (old, new) { (None, Some(new_html)) => { // Node is added parent.append_child(&self.create_node(new_html)?)?; } (Some(_removed), None) => { // Node is removed if let Some(child) = parent.child_nodes().item(index) { // Don't remove childs until after every iteration is finished. If not, the // indexes will not point to the correct nodes anymore self.to_remove.push((parent.clone(), child)); } else { // console_log!( // "Could not find node with index {} when removing {}", // index, // removed.to_html_text(0) // ); } } (Some(old), Some(new)) => match (old, new) { (Html::Element(old_tag), Html::Element(new_tag)) if old_tag.name == new_tag.name && old_tag.key() == new_tag.key() => { let current_node: web_sys::Element = match parent.child_nodes().item(index) { Some(n) => n.dyn_into()?, None => { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index ))); } }; // We have a node (current_node) that has changed from old_tag to new_tag, though // the tag is still the same. This means we need to diff children and attributes // First we diff attributes // We start by removing the ones that are no longer active for old_attr in &old_tag.attrs { let new_attr = new_tag.attrs.iter().find(|e| e == &old_attr); if new_attr.is_none() { remove_attribute(&current_node, old_attr)?; } else if let Attribute::Event(old_listener) = old_attr { if let Some(Attribute::Event(new_listener)) = new_attr { if let Some(js_closure) = old_listener.js_closure.0.borrow_mut().take() { new_listener.js_closure.0.replace(Some(js_closure)); } } } } // Then we add the ones that are added for attr in &new_tag.attrs { if !old_tag.attrs.contains(attr) { self.add_attribute(&current_node, attr)?; } } if let (Children::Nodes(old_children), Children::Nodes(new_children)) = (&old_tag.children, &new_tag.children) { for (child_index, pair) in old_children .iter() .zip_longest(new_children.iter()) .enumerate() { let (old_child, new_child) = eiter_or_both_to_option_tuple(pair); self.update_element( &current_node, new_child, old_child, child_index as u32, )?; } } } (Html::Text(s1), Html::Text(s2)) => { if s1 != s2 { if let Some(child) = parent.child_nodes().item(index) { child.set_text_content(Some(&s2)); } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } } _ => { if let Some(child) = parent.child_nodes().item(index) { parent.replace_child(&self.create_node(new)?, &child)?; } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } }, (None, None) => { // Should never happen, but if it happens we can just do nothing and it will be okay } } Ok(()) } fn create_node(&self, input: &Html<Msg>) -> Result<web_sys::Node, JsValue> { match input { Html::Element(Element { name, attrs, children, .. }) => { let el = self.program.browser.document.create_element(&name)?; for attr in attrs { self.add_attribute(&el, attr)?; } let node: web_sys::Node = el.into(); if let Children::Nodes(children) = children { for child in children { let child_node = self.create_node(&child)?; node.append_child(&child_node)?; } } Ok(node) } Html::Text(text) => { let node = self.program.browser.document.create_text_node(&text); Ok(node.into()) } } } fn add_attribute( &self, node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => Ok(()), Attribute::Text(key, value) => node.set_attribute(&key, &value), Attribute::Bool(key) => node.set_attribute(&key, "true"), Attribute::Event(EventListener { type_, to_message, stop_propagation, prevent_default, js_closure, }) => { let to_message = to_message.clone(); let program = self.program.clone(); let stop_propagation = *stop_propagation; let prevent_default = *prevent_default; let closure = Closure::wrap(Box::new(move |event: web_sys::Event| { if prevent_default { event.prevent_default(); } if stop_propagation { event.stop_propagation(); } let result = match &to_message { EventToMessage::StaticMsg(msg) => Program::dispatch(&program, msg), }; if let Err(error) = result { log::error!("{:#?}", error); } }) as Box<Fn(_)>); let node_et: &web_sys::EventTarget = &node; node_et .add_event_listener_with_callback(&type_, closure.as_ref().unchecked_ref())?; let ret = js_closure.0.replace(Some(closure)); if ret.is_some() { log::warn!("to_message did already have a closure???"); } Ok(()) } } } } fn remove_attribute<Msg>( node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => {} // TODO: I think I know why elm normalizes before adding and removing attributes. We should probably do the same Attribute::Text(key, _) => { node.remove_attribute(key)?; } Attribute::Bool(key) => { node.remove_attribute(key)?; } Attribute::Event(EventListener { type_, js_closure, .. }) => { if let Some(closure) = js_closure.0.replace(None) { let node_et: &web_sys::EventTarget = &node; node_et.remove_event_listener_with_callback( &type_, closure.as_ref().unchecked_ref(), )?; } else { log::warn!("Could not get a function to remove listener"); } } } Ok(()) }
update_element
identifier_name
render.rs
use crate::{ html::{Attribute, Children, Element, EventListener, EventToMessage, Html}, program::Program, }; use itertools::{EitherOrBoth, Itertools}; use std::fmt::Debug; use std::rc::Rc; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; pub struct Renderer<Model, Msg> { program: Rc<Program<Model, Msg>>, to_remove: Vec<(web_sys::Node, web_sys::Node)>, } fn eiter_or_both_to_option_tuple<T>(pair: EitherOrBoth<T, T>) -> (Option<T>, Option<T>) { use itertools::EitherOrBoth::{Both, Left, Right}; match pair { Both(a, b) => (Some(a), Some(b)), Left(a) => (Some(a), None), Right(b) => (None, Some(b)), } } impl<Model, Msg> Renderer<Model, Msg> where Msg: PartialEq + Debug + Clone + 'static, Model: Debug + Clone + 'static, { pub fn render( root: &web_sys::Node, program: &Rc<Program<Model, Msg>>, new_tree: &Html<Msg>, old_tree: &Option<Html<Msg>>, ) -> Result<(), JsValue> { let mut renderer = Renderer { program: program.clone(), to_remove: vec![], }; // TODO: We should probably not assume that the number here is 0 renderer.update_element(root, Some(new_tree), old_tree.as_ref(), 0)?; for (parent, child) in &renderer.to_remove { parent.remove_child(&child)?; } Ok(()) } fn update_element( &mut self, parent: &web_sys::Node, new: Option<&Html<Msg>>, old: Option<&Html<Msg>>, index: u32, ) -> Result<(), JsValue> { match (old, new) { (None, Some(new_html)) => { // Node is added parent.append_child(&self.create_node(new_html)?)?; } (Some(_removed), None) => { // Node is removed if let Some(child) = parent.child_nodes().item(index) { // Don't remove childs until after every iteration is finished. If not, the // indexes will not point to the correct nodes anymore self.to_remove.push((parent.clone(), child)); } else { // console_log!( // "Could not find node with index {} when removing {}", // index, // removed.to_html_text(0) // ); } } (Some(old), Some(new)) => match (old, new) { (Html::Element(old_tag), Html::Element(new_tag)) if old_tag.name == new_tag.name && old_tag.key() == new_tag.key() => { let current_node: web_sys::Element = match parent.child_nodes().item(index) { Some(n) => n.dyn_into()?, None => { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index ))); } }; // We have a node (current_node) that has changed from old_tag to new_tag, though // the tag is still the same. This means we need to diff children and attributes // First we diff attributes // We start by removing the ones that are no longer active for old_attr in &old_tag.attrs { let new_attr = new_tag.attrs.iter().find(|e| e == &old_attr); if new_attr.is_none() { remove_attribute(&current_node, old_attr)?; } else if let Attribute::Event(old_listener) = old_attr { if let Some(Attribute::Event(new_listener)) = new_attr { if let Some(js_closure) = old_listener.js_closure.0.borrow_mut().take() { new_listener.js_closure.0.replace(Some(js_closure)); } } } } // Then we add the ones that are added for attr in &new_tag.attrs { if !old_tag.attrs.contains(attr) { self.add_attribute(&current_node, attr)?; } } if let (Children::Nodes(old_children), Children::Nodes(new_children)) = (&old_tag.children, &new_tag.children) { for (child_index, pair) in old_children .iter() .zip_longest(new_children.iter()) .enumerate() { let (old_child, new_child) = eiter_or_both_to_option_tuple(pair); self.update_element( &current_node, new_child, old_child, child_index as u32, )?; } } } (Html::Text(s1), Html::Text(s2)) => { if s1 != s2 { if let Some(child) = parent.child_nodes().item(index) { child.set_text_content(Some(&s2)); } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } } _ => { if let Some(child) = parent.child_nodes().item(index) { parent.replace_child(&self.create_node(new)?, &child)?; } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } }, (None, None) => { // Should never happen, but if it happens we can just do nothing and it will be okay } } Ok(()) } fn create_node(&self, input: &Html<Msg>) -> Result<web_sys::Node, JsValue> { match input { Html::Element(Element { name, attrs, children, .. }) => { let el = self.program.browser.document.create_element(&name)?; for attr in attrs { self.add_attribute(&el, attr)?; } let node: web_sys::Node = el.into(); if let Children::Nodes(children) = children { for child in children { let child_node = self.create_node(&child)?; node.append_child(&child_node)?; } } Ok(node) } Html::Text(text) => { let node = self.program.browser.document.create_text_node(&text); Ok(node.into()) } } } fn add_attribute( &self, node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => Ok(()), Attribute::Text(key, value) => node.set_attribute(&key, &value), Attribute::Bool(key) => node.set_attribute(&key, "true"), Attribute::Event(EventListener { type_, to_message, stop_propagation, prevent_default, js_closure, }) => { let to_message = to_message.clone(); let program = self.program.clone(); let stop_propagation = *stop_propagation; let prevent_default = *prevent_default; let closure = Closure::wrap(Box::new(move |event: web_sys::Event| { if prevent_default { event.prevent_default(); } if stop_propagation { event.stop_propagation(); } let result = match &to_message { EventToMessage::StaticMsg(msg) => Program::dispatch(&program, msg), }; if let Err(error) = result { log::error!("{:#?}", error); } }) as Box<Fn(_)>); let node_et: &web_sys::EventTarget = &node; node_et .add_event_listener_with_callback(&type_, closure.as_ref().unchecked_ref())?; let ret = js_closure.0.replace(Some(closure)); if ret.is_some() { log::warn!("to_message did already have a closure???"); } Ok(()) } } } } fn remove_attribute<Msg>( node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => {} // TODO: I think I know why elm normalizes before adding and removing attributes. We should probably do the same Attribute::Text(key, _) => { node.remove_attribute(key)?; } Attribute::Bool(key) => { node.remove_attribute(key)?; } Attribute::Event(EventListener { type_, js_closure, .. }) =>
} Ok(()) }
{ if let Some(closure) = js_closure.0.replace(None) { let node_et: &web_sys::EventTarget = &node; node_et.remove_event_listener_with_callback( &type_, closure.as_ref().unchecked_ref(), )?; } else { log::warn!("Could not get a function to remove listener"); } }
conditional_block
main.rs
#![cfg_attr(target_os = "none", no_std)] #![cfg_attr(target_os = "none", no_main)] mod api; mod time; // why is this here? because it's the only place it'll fit. :-/ use api::*; use net::NetIpAddr; use num_traits::*; use xous::msg_scalar_unpack; use std::collections::HashMap; use std::convert::TryInto; use std::io::ErrorKind; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use std::time::Duration; use std::thread; use xous_ipc::{Buffer, String}; // KISS DNS // The DNS implementation here is based on https://github.com/vinc/moros/blob/43ac7cdc8ccc860dc1b6f0f060b5dbcd01424c03/src/usr/host.rs // MOROS is MIT licensed. // See RFC 1035 for implementation details #[repr(u16)] enum QueryType { A = 1, // NS = 2, // MD = 3, // MF = 4, // CNAME = 5, // SOA = 6, // MX = 15, // TXT = 16, } #[repr(u16)] enum QueryClass { IN = 1, } struct Message { pub datagram: Vec<u8>, } const FLAG_RD: u16 = 0x0100; // Recursion desired impl Message { pub fn from(datagram: &[u8]) -> Self { Self { datagram: Vec::from(datagram), } } pub fn query(qname: &str, qtype: QueryType, qclass: QueryClass, id: u16) -> Self { let mut datagram = Vec::new(); for b in id.to_be_bytes().iter() { datagram.push(*b); // Transaction ID } for b in FLAG_RD.to_be_bytes().iter() { datagram.push(*b); // Flags } for b in (1 as u16).to_be_bytes().iter() { datagram.push(*b); // Questions } for _ in 0..6 { datagram.push(0); // Answer + Authority + Additional } for label in qname.split('.') { datagram.push(label.len() as u8); // QNAME label length for b in label.bytes() { datagram.push(b); // QNAME label bytes } } datagram.push(0); // Root null label for b in (qtype as u16).to_be_bytes().iter() { datagram.push(*b); // QTYPE } for b in (qclass as u16).to_be_bytes().iter() { datagram.push(*b); // QCLASS } Self { datagram } } pub fn id(&self) -> u16 { u16::from_be_bytes(self.datagram[0..2].try_into().unwrap()) } pub fn header(&self) -> u16 { u16::from_be_bytes(self.datagram[2..4].try_into().unwrap()) } pub fn is_response(&self) -> bool { if (self.header() & (1 << 15)) == 0 { false } else { true } } fn fast_foward_name(&self, start: usize) -> Result<usize, DnsResponseCode> { use DnsResponseCode::FormatError; let mut index = start; loop { log::trace!("cname index: {}", index); if *(self.datagram.get(index).ok_or(FormatError)?) == 0 { index += 1; break; } else { index += *(self.datagram.get(index).ok_or(FormatError)?) as usize; index += 1; } } Ok(index) } pub fn parse_response(&self) -> Result<HashMap<IpAddr, u32>, DnsResponseCode> { use DnsResponseCode::FormatError; log::trace!("parsing packet: {:?}", self.datagram); let mut map = HashMap::<IpAddr, u32>::new(); // ASSUME: the query ID and response bit fields have already been checked // and that the rcode is valid let qdcount = u16::from_be_bytes(self.datagram[4..6].try_into().unwrap()); let ancount = u16::from_be_bytes(self.datagram[6..8].try_into().unwrap()); let mut index = 12; // fast forward past the qname for queries in 0..qdcount { log::trace!("parsing query{}, index {}", queries, index); index = self.fast_foward_name(index)?; log::trace!("fast forward through qname to {}", index); // index is now at qtype let qtype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if qtype != 1 && qtype != 28 { log::error!("Problem parsing qname, qtype is not 1 or 28: {}", qtype); return Err(FormatError); } index += 2; let qclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if qclass != 1 { log::error!("Problem parsing qname, qclass is not 1: {}", qclass); return Err(FormatError); } index += 2; } // index is now at the aname section for aname in 0..ancount { log::trace!("parsing aname{}, index {}", aname, index); // first check to see if we're dealing with a pointer or a name if self.datagram[index] >= 0xc0 { // pointer index += 1; if self.datagram[index] != 0xc { log::error!( "Found aname pointer, but value does not conform to length of aname header" ); return Err(FormatError); } index += 1; } else { // name, fast forward past the name index = self.fast_foward_name(index)?; log::trace!("fast forward aname to {}", index); } // index is now at type let atype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if atype != 1 && atype != 28 { log::error!("Problem parsing aname, type is not 1 or 28: {}", atype); return Err(FormatError); } index += 2; let aclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if aclass != 1 { log::error!("Problem parsing aname, aclass is not 1: {}", aclass); return Err(FormatError); } index += 2; // this is our TTL let ttl = u32::from_be_bytes(self.datagram[index..index + 4].try_into().unwrap()); log::trace!("got ttl: {}", ttl); index += 4; // this is the payload length let addr_len = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); index += 2; match addr_len { // ipv4 4 => { if atype != 1 { log::error!("Got a 4-byte address, but ATYPE != A (1)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv4Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 4] = [0; 4]; for (&src, dst) in self.datagram[index..index + 4].iter().zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V4(Ipv4Addr::from(rdata)); index += 4; map.insert(addr, ttl); } // ipv6 16 => { if atype != 28 { log::error!("Got a 16-byte address, but ATYPE != AAAA (28)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv6Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 16] = [0; 16]; for (&src, dst) in self.datagram[index..index + 16] .iter() .zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V6(Ipv6Addr::from(rdata)); index += 16; map.insert(addr, ttl); } _ => { log::error!("Length field does not match a known record type"); return Err(FormatError); } } } Ok(map) } /* example response for: betrusted.io->185.199.111.153 Header: 61, ca, id 81, 80, header 0, 1, qdcount 0, 4, ancount 0, 0, nscount 0, 0, arcount qname: 9, length 9 62, 65, 74, 72, 75, 73, 74, 65, 64, "betrusted" 2, length 2 69, 6f, "io" 0, end of name qtype: 0, 1, type A qclass: 0, 1, type IN aname0: c0, name is a pointer (any value > 192 is a pointer) c, offset of 12 from start of aname0 0, 1, type A 0, 1, class IN 0, 0, e, 10, 0xe10 = 3600 seconds TTL 0, 4, 4 bytes address b9, c7, 6c, 99, address aname1: c0, name is a pointer c, 0, 1, type A 0, 1, class IN 0, 0, e, 10, TTL 0, 4, 4 byte address b9, c7, 6d, 99, address aname2: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6e, 99, aname3: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6f, 99 */ /* pub fn is_query(&self) -> bool { !self.is_response() } */ pub fn rcode(&self) -> DnsResponseCode { match (self.header() >> 11) & 0xF { 0 => DnsResponseCode::NoError, 1 => DnsResponseCode::FormatError, 2 => DnsResponseCode::ServerFailure, 3 => DnsResponseCode::NameError, 4 => DnsResponseCode::NotImplemented, 5 => DnsResponseCode::Refused, _ => DnsResponseCode::UnknownError, } } } pub struct Resolver { /// DnsServerManager is a service of the Net crate that automatically updates the DNS server list mgr: net::protocols::DnsServerManager, socket: UdpSocket, buf: [u8; DNS_PKT_MAX_LEN], trng: trng::Trng, freeze: bool, } impl Resolver { pub fn new(xns: &xous_names::XousNames) -> Resolver { let trng = trng::Trng::new(&xns).unwrap(); let local_port = (49152 + trng.get_u32().unwrap() % 16384) as u16; let socket = UdpSocket::bind( format!("0.0.0.0:{}", local_port), ) .expect("couldn't create socket for DNS resolver"); let timeout = Duration::from_millis(10_000); // 10 seconds for DNS to resolve by default socket.set_read_timeout(Some(timeout)).unwrap(); socket.set_nonblocking(false).unwrap(); // we want this to block. // we /could/ do a non-blocking DNS resolver, but...what would you do in the meantime?? // blocking is probably what we actually want this time. Resolver { mgr: net::protocols::DnsServerManager::register(&xns) .expect("Couldn't register the DNS server list auto-manager"), socket, buf: [0; DNS_PKT_MAX_LEN], trng, freeze: false, } } pub fn add_server(&mut self, addr: IpAddr) { self.mgr.add_server(addr); } pub fn remove_server(&mut self, addr: IpAddr) { self.mgr.remove_server(addr); } pub fn clear_all_servers(&mut self) { self.mgr.clear(); } pub fn set_freeze_config(&mut self, freeze: bool) { self.freeze = freeze; self.mgr.set_freeze(freeze); } pub fn get_freeze(&self) -> bool { self.freeze } /// this allows us to re-use the TRNG object pub fn trng_u32(&self) -> u32 { self.trng.get_u32().unwrap() } pub fn resolve(&mut self, name: &str) -> Result<HashMap<IpAddr, u32>, DnsResponseCode> { if let Some(dns_address) = self.mgr.get_random() { let dns_port = 53; let server = SocketAddr::new(dns_address, dns_port); let qname = name; let qtype = QueryType::A; let qclass = QueryClass::IN; let query = Message::query(qname, qtype, qclass, self.trng.get_u32().unwrap() as u16); self.socket .send_to(&query.datagram, &server) .map_err(|_| DnsResponseCode::NetworkError)?; match self.socket.recv(&mut self.buf) { Ok(len) => { let message = Message::from(&self.buf[..len]); if message.id() == query.id() && message.is_response() { return match message.rcode() { DnsResponseCode::NoError => message.parse_response(), rcode => Err(rcode), }; } else { Err(DnsResponseCode::NetworkError) } } Err(e) => match e.kind() { ErrorKind::WouldBlock => Err(DnsResponseCode::NetworkError), _ => Err(DnsResponseCode::UnknownError), }, } } else { Err(DnsResponseCode::NoServerSpecified) } } } #[derive(PartialEq, Debug)] #[repr(C)] enum NameConversionError { /// The length of the memory buffer was invalid InvalidMemoryBuffer = 1, /// The specified nameserver string was not UTF-8 InvalidString = 3, /// The message was not a mutable memory message InvalidMessageType = 4, } fn name_from_msg(env: &xous::MessageEnvelope) -> Result<&str, NameConversionError> { let msg = env .body .memory_message() .ok_or(NameConversionError::InvalidMessageType)?; let valid_bytes = msg.valid.map(|v| v.get()).unwrap_or_else(|| msg.buf.len()); if valid_bytes > DNS_NAME_LENGTH_LIMIT || valid_bytes < 1 { log::error!("valid bytes exceeded DNS name limit"); return Err(NameConversionError::InvalidMemoryBuffer); } // Safe because we've already validated that it's a valid range let str_slice = unsafe { core::slice::from_raw_parts(msg.buf.as_ptr(), valid_bytes) }; let name_string = core::str::from_utf8(str_slice).map_err(|_| NameConversionError::InvalidString)?; Ok(name_string) } fn fill_response(mut env: xous::MessageEnvelope, entries: &HashMap<IpAddr, u32>) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); // First tag = 1 for "Error" -- we'll fill this in at the end when it's successful *i.next()? = 1; // Limit the number of entries to 128, which is a nice number. Given that an IPv6 // address is 17 bytes, that means that ~240 IPv6 addresses will fit in a 4 kB page.
let mut entry_count = entries.len(); if entry_count > 128 { entry_count = 128; } *i.next()? = entry_count.try_into().ok()?; // Start filling in the addreses for addr in entries.keys() { match addr { &IpAddr::V4(a) => { // IPv4 *i.next()? = 4; for entry in a.octets() { *i.next()? = entry; } } &IpAddr::V6(a) => { // IPv6 for entry in a.octets() { *i.next()? = entry; } *i.next()? = 6; } } } // Convert the entry to a "Success" message drop(i); s[0] = 0; None } fn fill_error(mut env: xous::MessageEnvelope, code: DnsResponseCode) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); *i.next()? = 1; *i.next()? = code as u8; None } fn main() -> ! { log_server::init_wait().unwrap(); log::set_max_level(log::LevelFilter::Info); log::info!("my PID is {}", xous::process::id()); // Time is stuck in the DNS crate because the status crate is out of resources, and the DNS // crate is fairly under-utilized and ideal for sticking a service like time in it. // // this kicks off the thread that services the `libstd` calls for time-related things. // we want this started really early, because it sanity checks the RTC and a bunch of other stuff. time::start_time_server(); time::start_time_ux(); let xns = xous_names::XousNames::new().unwrap(); let dns_sid = xns .register_name(api::SERVER_NAME_DNS, None) .expect("can't register server"); log::trace!("registered with NS -- {:?}", dns_sid); // this will magically populate a list of DNS servers when they become available let mut resolver = Resolver::new(&xns); // if you wanted to force a server into the initial config, you can do it here, for example: // resolver.add_server(IpAddr::V4(Ipv4Addr::new(1,1,1,1))); // the `u32` value is the TTL of the IpAddr let mut dns_cache = HashMap::<std::string::String, HashMap<IpAddr, u32>>::new(); // build a thread that pings the UpdateTtl function once every few minutes to expire the DNS cache thread::spawn({ let local_cid = xous::connect(dns_sid).unwrap(); move || { const TTL_INTERVAL_SECS: usize = 300; // every 5 minutes update the map let tt = ticktimer_server::Ticktimer::new().unwrap(); loop { tt.sleep_ms(TTL_INTERVAL_SECS * 1000).unwrap(); xous::send_message( local_cid, xous::Message::new_scalar( Opcode::UpdateTtl.to_usize().unwrap(), TTL_INTERVAL_SECS, 0, 0, 0, ), ) .expect("couldn't increment DNS cache"); } } }); log::trace!("ready to accept requests"); loop { let mut msg = xous::receive_message(dns_sid).unwrap(); match FromPrimitive::from_usize(msg.body.id()) { Some(Opcode::RawLookup) => { match name_from_msg(&msg).map(|s| s.to_owned()) { Ok(owned_name) => { // handle the special case of "localhost" as a string if owned_name == "localhost" { let mut local = HashMap::<IpAddr, u32>::new(); local.insert(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 86400); fill_response(msg, &local); continue; } log::trace!("performing a lookup of {}", owned_name); // Try to get the result out of the DNS cache if let Some(entries) = dns_cache.get(&owned_name) { fill_response(msg, entries); continue; } // This entry is not in the cache, so perform a lookup match resolver.resolve(&owned_name) { Ok(cache_entry) => { fill_response(msg, &cache_entry); dns_cache.insert(owned_name, cache_entry); continue; } Err(e) => { fill_error(msg, e); continue; } } } Err(e) => { log::error!("unable to do name lookup: {:?}", e); fill_error(msg, DnsResponseCode::NameError); continue; } }; } Some(Opcode::Lookup) => { let mut buf = unsafe { Buffer::from_memory_message_mut(msg.body.memory_message_mut().unwrap()) }; let name = buf .to_original::<String<DNS_NAME_LENGTH_LIMIT>, _>() .unwrap(); let name_std = std::string::String::from(name.as_str().unwrap()); if let Some(cache_entry) = dns_cache.get(&name_std) { // pick a random entry let rand = resolver.trng_u32() as usize % cache_entry.len(); for (index, (ip_addr, _)) in cache_entry.iter().enumerate() { if rand == index { log::debug!("DNS cached: {}->{:?}", name, ip_addr); let response = DnsResponse { addr: Some(NetIpAddr::from(*ip_addr)), code: DnsResponseCode::NoError, }; buf.replace(response).unwrap(); break; } } } else { match resolver.resolve(name.as_str().unwrap()) { Ok(cache_entry) => { if cache_entry.len() > 0 { dns_cache.insert(name_std, cache_entry); // now pick the entry back out again, as it was consumed... let name_std = std::string::String::from(name.as_str().unwrap()); let cache_entry = dns_cache.get(&name_std).unwrap(); // pick a random entry from the query response let rand = resolver.trng_u32() as usize % cache_entry.len(); for (index, (ip_addr, _)) in cache_entry.iter().enumerate() { if rand == index { let response = DnsResponse { addr: Some(NetIpAddr::from(*ip_addr)), code: DnsResponseCode::NoError, }; buf.replace(response).unwrap(); break; } } } else { // no names found let response = DnsResponse { addr: None, code: DnsResponseCode::NameError, }; buf.replace(response).unwrap(); } } Err(e) => { log::debug!("DNS query failed: {}->{:?}", name, e); let response = DnsResponse { addr: None, code: e, }; buf.replace(response).unwrap(); } } } } Some(Opcode::UpdateTtl) => msg_scalar_unpack!(msg, incr_secs, _, _, _, { let increment = if incr_secs < u32::MAX as usize { incr_secs as u32 } else { u32::MAX }; if !resolver.get_freeze() { let mut expired_names = Vec::<std::string::String>::new(); for (name, cache_map) in dns_cache.iter_mut() { // each entry can have multiple names with a different TTL // decrement the TTL, and note which go to zero let mut expired_entries = Vec::<IpAddr>::new(); for (entry, ttl) in cache_map.iter_mut() { log::debug!("entry: {:?}, ttl: {}, incr: {}", entry, ttl, increment); if *ttl < increment { *ttl = 0; expired_entries.push(*entry); } else { *ttl = *ttl - increment as u32; } } // remove the entries that are 0 for entry in expired_entries { log::debug!("DNS cache expiring {:?}", entry); cache_map.remove(&entry); } // if all the entries are removed, mark for removal from the cache entirely if cache_map.len() == 0 { // have to copy the name to a new object to track it let name = std::string::String::from(name.as_str()); expired_names.push(name); } } for name in expired_names { log::debug!("DNS cache removing {}", &name); dns_cache.remove(&name); } } }), Some(Opcode::Flush) => { dns_cache.clear(); } Some(Opcode::FreezeConfig) => { resolver.set_freeze_config(true); } Some(Opcode::ThawConfig) => { resolver.set_freeze_config(false); } Some(Opcode::Quit) => { log::warn!("got quit!"); break; } None => { log::error!("couldn't convert opcode: {:?}", msg); } } } // clean up our program log::trace!("main loop exit, destroying servers"); xns.unregister_server(dns_sid).unwrap(); xous::destroy_server(dns_sid).unwrap(); log::trace!("quitting"); xous::terminate_process(0) }
// 128 is just a conservative value rounded down.
random_line_split
main.rs
#![cfg_attr(target_os = "none", no_std)] #![cfg_attr(target_os = "none", no_main)] mod api; mod time; // why is this here? because it's the only place it'll fit. :-/ use api::*; use net::NetIpAddr; use num_traits::*; use xous::msg_scalar_unpack; use std::collections::HashMap; use std::convert::TryInto; use std::io::ErrorKind; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use std::time::Duration; use std::thread; use xous_ipc::{Buffer, String}; // KISS DNS // The DNS implementation here is based on https://github.com/vinc/moros/blob/43ac7cdc8ccc860dc1b6f0f060b5dbcd01424c03/src/usr/host.rs // MOROS is MIT licensed. // See RFC 1035 for implementation details #[repr(u16)] enum QueryType { A = 1, // NS = 2, // MD = 3, // MF = 4, // CNAME = 5, // SOA = 6, // MX = 15, // TXT = 16, } #[repr(u16)] enum QueryClass { IN = 1, } struct Message { pub datagram: Vec<u8>, } const FLAG_RD: u16 = 0x0100; // Recursion desired impl Message { pub fn from(datagram: &[u8]) -> Self { Self { datagram: Vec::from(datagram), } } pub fn query(qname: &str, qtype: QueryType, qclass: QueryClass, id: u16) -> Self { let mut datagram = Vec::new(); for b in id.to_be_bytes().iter() { datagram.push(*b); // Transaction ID } for b in FLAG_RD.to_be_bytes().iter() { datagram.push(*b); // Flags } for b in (1 as u16).to_be_bytes().iter() { datagram.push(*b); // Questions } for _ in 0..6 { datagram.push(0); // Answer + Authority + Additional } for label in qname.split('.') { datagram.push(label.len() as u8); // QNAME label length for b in label.bytes() { datagram.push(b); // QNAME label bytes } } datagram.push(0); // Root null label for b in (qtype as u16).to_be_bytes().iter() { datagram.push(*b); // QTYPE } for b in (qclass as u16).to_be_bytes().iter() { datagram.push(*b); // QCLASS } Self { datagram } } pub fn id(&self) -> u16 { u16::from_be_bytes(self.datagram[0..2].try_into().unwrap()) } pub fn header(&self) -> u16 { u16::from_be_bytes(self.datagram[2..4].try_into().unwrap()) } pub fn is_response(&self) -> bool { if (self.header() & (1 << 15)) == 0 { false } else { true } } fn fast_foward_name(&self, start: usize) -> Result<usize, DnsResponseCode> { use DnsResponseCode::FormatError; let mut index = start; loop { log::trace!("cname index: {}", index); if *(self.datagram.get(index).ok_or(FormatError)?) == 0 { index += 1; break; } else { index += *(self.datagram.get(index).ok_or(FormatError)?) as usize; index += 1; } } Ok(index) } pub fn parse_response(&self) -> Result<HashMap<IpAddr, u32>, DnsResponseCode> { use DnsResponseCode::FormatError; log::trace!("parsing packet: {:?}", self.datagram); let mut map = HashMap::<IpAddr, u32>::new(); // ASSUME: the query ID and response bit fields have already been checked // and that the rcode is valid let qdcount = u16::from_be_bytes(self.datagram[4..6].try_into().unwrap()); let ancount = u16::from_be_bytes(self.datagram[6..8].try_into().unwrap()); let mut index = 12; // fast forward past the qname for queries in 0..qdcount { log::trace!("parsing query{}, index {}", queries, index); index = self.fast_foward_name(index)?; log::trace!("fast forward through qname to {}", index); // index is now at qtype let qtype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if qtype != 1 && qtype != 28 { log::error!("Problem parsing qname, qtype is not 1 or 28: {}", qtype); return Err(FormatError); } index += 2; let qclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if qclass != 1 { log::error!("Problem parsing qname, qclass is not 1: {}", qclass); return Err(FormatError); } index += 2; } // index is now at the aname section for aname in 0..ancount { log::trace!("parsing aname{}, index {}", aname, index); // first check to see if we're dealing with a pointer or a name if self.datagram[index] >= 0xc0 { // pointer index += 1; if self.datagram[index] != 0xc { log::error!( "Found aname pointer, but value does not conform to length of aname header" ); return Err(FormatError); } index += 1; } else { // name, fast forward past the name index = self.fast_foward_name(index)?; log::trace!("fast forward aname to {}", index); } // index is now at type let atype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if atype != 1 && atype != 28 { log::error!("Problem parsing aname, type is not 1 or 28: {}", atype); return Err(FormatError); } index += 2; let aclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if aclass != 1 { log::error!("Problem parsing aname, aclass is not 1: {}", aclass); return Err(FormatError); } index += 2; // this is our TTL let ttl = u32::from_be_bytes(self.datagram[index..index + 4].try_into().unwrap()); log::trace!("got ttl: {}", ttl); index += 4; // this is the payload length let addr_len = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); index += 2; match addr_len { // ipv4 4 => { if atype != 1 { log::error!("Got a 4-byte address, but ATYPE != A (1)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv4Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 4] = [0; 4]; for (&src, dst) in self.datagram[index..index + 4].iter().zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V4(Ipv4Addr::from(rdata)); index += 4; map.insert(addr, ttl); } // ipv6 16 => { if atype != 28 { log::error!("Got a 16-byte address, but ATYPE != AAAA (28)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv6Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 16] = [0; 16]; for (&src, dst) in self.datagram[index..index + 16] .iter() .zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V6(Ipv6Addr::from(rdata)); index += 16; map.insert(addr, ttl); } _ => { log::error!("Length field does not match a known record type"); return Err(FormatError); } } } Ok(map) } /* example response for: betrusted.io->185.199.111.153 Header: 61, ca, id 81, 80, header 0, 1, qdcount 0, 4, ancount 0, 0, nscount 0, 0, arcount qname: 9, length 9 62, 65, 74, 72, 75, 73, 74, 65, 64, "betrusted" 2, length 2 69, 6f, "io" 0, end of name qtype: 0, 1, type A qclass: 0, 1, type IN aname0: c0, name is a pointer (any value > 192 is a pointer) c, offset of 12 from start of aname0 0, 1, type A 0, 1, class IN 0, 0, e, 10, 0xe10 = 3600 seconds TTL 0, 4, 4 bytes address b9, c7, 6c, 99, address aname1: c0, name is a pointer c, 0, 1, type A 0, 1, class IN 0, 0, e, 10, TTL 0, 4, 4 byte address b9, c7, 6d, 99, address aname2: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6e, 99, aname3: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6f, 99 */ /* pub fn is_query(&self) -> bool { !self.is_response() } */ pub fn rcode(&self) -> DnsResponseCode
} pub struct Resolver { /// DnsServerManager is a service of the Net crate that automatically updates the DNS server list mgr: net::protocols::DnsServerManager, socket: UdpSocket, buf: [u8; DNS_PKT_MAX_LEN], trng: trng::Trng, freeze: bool, } impl Resolver { pub fn new(xns: &xous_names::XousNames) -> Resolver { let trng = trng::Trng::new(&xns).unwrap(); let local_port = (49152 + trng.get_u32().unwrap() % 16384) as u16; let socket = UdpSocket::bind( format!("0.0.0.0:{}", local_port), ) .expect("couldn't create socket for DNS resolver"); let timeout = Duration::from_millis(10_000); // 10 seconds for DNS to resolve by default socket.set_read_timeout(Some(timeout)).unwrap(); socket.set_nonblocking(false).unwrap(); // we want this to block. // we /could/ do a non-blocking DNS resolver, but...what would you do in the meantime?? // blocking is probably what we actually want this time. Resolver { mgr: net::protocols::DnsServerManager::register(&xns) .expect("Couldn't register the DNS server list auto-manager"), socket, buf: [0; DNS_PKT_MAX_LEN], trng, freeze: false, } } pub fn add_server(&mut self, addr: IpAddr) { self.mgr.add_server(addr); } pub fn remove_server(&mut self, addr: IpAddr) { self.mgr.remove_server(addr); } pub fn clear_all_servers(&mut self) { self.mgr.clear(); } pub fn set_freeze_config(&mut self, freeze: bool) { self.freeze = freeze; self.mgr.set_freeze(freeze); } pub fn get_freeze(&self) -> bool { self.freeze } /// this allows us to re-use the TRNG object pub fn trng_u32(&self) -> u32 { self.trng.get_u32().unwrap() } pub fn resolve(&mut self, name: &str) -> Result<HashMap<IpAddr, u32>, DnsResponseCode> { if let Some(dns_address) = self.mgr.get_random() { let dns_port = 53; let server = SocketAddr::new(dns_address, dns_port); let qname = name; let qtype = QueryType::A; let qclass = QueryClass::IN; let query = Message::query(qname, qtype, qclass, self.trng.get_u32().unwrap() as u16); self.socket .send_to(&query.datagram, &server) .map_err(|_| DnsResponseCode::NetworkError)?; match self.socket.recv(&mut self.buf) { Ok(len) => { let message = Message::from(&self.buf[..len]); if message.id() == query.id() && message.is_response() { return match message.rcode() { DnsResponseCode::NoError => message.parse_response(), rcode => Err(rcode), }; } else { Err(DnsResponseCode::NetworkError) } } Err(e) => match e.kind() { ErrorKind::WouldBlock => Err(DnsResponseCode::NetworkError), _ => Err(DnsResponseCode::UnknownError), }, } } else { Err(DnsResponseCode::NoServerSpecified) } } } #[derive(PartialEq, Debug)] #[repr(C)] enum NameConversionError { /// The length of the memory buffer was invalid InvalidMemoryBuffer = 1, /// The specified nameserver string was not UTF-8 InvalidString = 3, /// The message was not a mutable memory message InvalidMessageType = 4, } fn name_from_msg(env: &xous::MessageEnvelope) -> Result<&str, NameConversionError> { let msg = env .body .memory_message() .ok_or(NameConversionError::InvalidMessageType)?; let valid_bytes = msg.valid.map(|v| v.get()).unwrap_or_else(|| msg.buf.len()); if valid_bytes > DNS_NAME_LENGTH_LIMIT || valid_bytes < 1 { log::error!("valid bytes exceeded DNS name limit"); return Err(NameConversionError::InvalidMemoryBuffer); } // Safe because we've already validated that it's a valid range let str_slice = unsafe { core::slice::from_raw_parts(msg.buf.as_ptr(), valid_bytes) }; let name_string = core::str::from_utf8(str_slice).map_err(|_| NameConversionError::InvalidString)?; Ok(name_string) } fn fill_response(mut env: xous::MessageEnvelope, entries: &HashMap<IpAddr, u32>) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); // First tag = 1 for "Error" -- we'll fill this in at the end when it's successful *i.next()? = 1; // Limit the number of entries to 128, which is a nice number. Given that an IPv6 // address is 17 bytes, that means that ~240 IPv6 addresses will fit in a 4 kB page. // 128 is just a conservative value rounded down. let mut entry_count = entries.len(); if entry_count > 128 { entry_count = 128; } *i.next()? = entry_count.try_into().ok()?; // Start filling in the addreses for addr in entries.keys() { match addr { &IpAddr::V4(a) => { // IPv4 *i.next()? = 4; for entry in a.octets() { *i.next()? = entry; } } &IpAddr::V6(a) => { // IPv6 for entry in a.octets() { *i.next()? = entry; } *i.next()? = 6; } } } // Convert the entry to a "Success" message drop(i); s[0] = 0; None } fn fill_error(mut env: xous::MessageEnvelope, code: DnsResponseCode) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); *i.next()? = 1; *i.next()? = code as u8; None } fn main() -> ! { log_server::init_wait().unwrap(); log::set_max_level(log::LevelFilter::Info); log::info!("my PID is {}", xous::process::id()); // Time is stuck in the DNS crate because the status crate is out of resources, and the DNS // crate is fairly under-utilized and ideal for sticking a service like time in it. // // this kicks off the thread that services the `libstd` calls for time-related things. // we want this started really early, because it sanity checks the RTC and a bunch of other stuff. time::start_time_server(); time::start_time_ux(); let xns = xous_names::XousNames::new().unwrap(); let dns_sid = xns .register_name(api::SERVER_NAME_DNS, None) .expect("can't register server"); log::trace!("registered with NS -- {:?}", dns_sid); // this will magically populate a list of DNS servers when they become available let mut resolver = Resolver::new(&xns); // if you wanted to force a server into the initial config, you can do it here, for example: // resolver.add_server(IpAddr::V4(Ipv4Addr::new(1,1,1,1))); // the `u32` value is the TTL of the IpAddr let mut dns_cache = HashMap::<std::string::String, HashMap<IpAddr, u32>>::new(); // build a thread that pings the UpdateTtl function once every few minutes to expire the DNS cache thread::spawn({ let local_cid = xous::connect(dns_sid).unwrap(); move || { const TTL_INTERVAL_SECS: usize = 300; // every 5 minutes update the map let tt = ticktimer_server::Ticktimer::new().unwrap(); loop { tt.sleep_ms(TTL_INTERVAL_SECS * 1000).unwrap(); xous::send_message( local_cid, xous::Message::new_scalar( Opcode::UpdateTtl.to_usize().unwrap(), TTL_INTERVAL_SECS, 0, 0, 0, ), ) .expect("couldn't increment DNS cache"); } } }); log::trace!("ready to accept requests"); loop { let mut msg = xous::receive_message(dns_sid).unwrap(); match FromPrimitive::from_usize(msg.body.id()) { Some(Opcode::RawLookup) => { match name_from_msg(&msg).map(|s| s.to_owned()) { Ok(owned_name) => { // handle the special case of "localhost" as a string if owned_name == "localhost" { let mut local = HashMap::<IpAddr, u32>::new(); local.insert(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 86400); fill_response(msg, &local); continue; } log::trace!("performing a lookup of {}", owned_name); // Try to get the result out of the DNS cache if let Some(entries) = dns_cache.get(&owned_name) { fill_response(msg, entries); continue; } // This entry is not in the cache, so perform a lookup match resolver.resolve(&owned_name) { Ok(cache_entry) => { fill_response(msg, &cache_entry); dns_cache.insert(owned_name, cache_entry); continue; } Err(e) => { fill_error(msg, e); continue; } } } Err(e) => { log::error!("unable to do name lookup: {:?}", e); fill_error(msg, DnsResponseCode::NameError); continue; } }; } Some(Opcode::Lookup) => { let mut buf = unsafe { Buffer::from_memory_message_mut(msg.body.memory_message_mut().unwrap()) }; let name = buf .to_original::<String<DNS_NAME_LENGTH_LIMIT>, _>() .unwrap(); let name_std = std::string::String::from(name.as_str().unwrap()); if let Some(cache_entry) = dns_cache.get(&name_std) { // pick a random entry let rand = resolver.trng_u32() as usize % cache_entry.len(); for (index, (ip_addr, _)) in cache_entry.iter().enumerate() { if rand == index { log::debug!("DNS cached: {}->{:?}", name, ip_addr); let response = DnsResponse { addr: Some(NetIpAddr::from(*ip_addr)), code: DnsResponseCode::NoError, }; buf.replace(response).unwrap(); break; } } } else { match resolver.resolve(name.as_str().unwrap()) { Ok(cache_entry) => { if cache_entry.len() > 0 { dns_cache.insert(name_std, cache_entry); // now pick the entry back out again, as it was consumed... let name_std = std::string::String::from(name.as_str().unwrap()); let cache_entry = dns_cache.get(&name_std).unwrap(); // pick a random entry from the query response let rand = resolver.trng_u32() as usize % cache_entry.len(); for (index, (ip_addr, _)) in cache_entry.iter().enumerate() { if rand == index { let response = DnsResponse { addr: Some(NetIpAddr::from(*ip_addr)), code: DnsResponseCode::NoError, }; buf.replace(response).unwrap(); break; } } } else { // no names found let response = DnsResponse { addr: None, code: DnsResponseCode::NameError, }; buf.replace(response).unwrap(); } } Err(e) => { log::debug!("DNS query failed: {}->{:?}", name, e); let response = DnsResponse { addr: None, code: e, }; buf.replace(response).unwrap(); } } } } Some(Opcode::UpdateTtl) => msg_scalar_unpack!(msg, incr_secs, _, _, _, { let increment = if incr_secs < u32::MAX as usize { incr_secs as u32 } else { u32::MAX }; if !resolver.get_freeze() { let mut expired_names = Vec::<std::string::String>::new(); for (name, cache_map) in dns_cache.iter_mut() { // each entry can have multiple names with a different TTL // decrement the TTL, and note which go to zero let mut expired_entries = Vec::<IpAddr>::new(); for (entry, ttl) in cache_map.iter_mut() { log::debug!("entry: {:?}, ttl: {}, incr: {}", entry, ttl, increment); if *ttl < increment { *ttl = 0; expired_entries.push(*entry); } else { *ttl = *ttl - increment as u32; } } // remove the entries that are 0 for entry in expired_entries { log::debug!("DNS cache expiring {:?}", entry); cache_map.remove(&entry); } // if all the entries are removed, mark for removal from the cache entirely if cache_map.len() == 0 { // have to copy the name to a new object to track it let name = std::string::String::from(name.as_str()); expired_names.push(name); } } for name in expired_names { log::debug!("DNS cache removing {}", &name); dns_cache.remove(&name); } } }), Some(Opcode::Flush) => { dns_cache.clear(); } Some(Opcode::FreezeConfig) => { resolver.set_freeze_config(true); } Some(Opcode::ThawConfig) => { resolver.set_freeze_config(false); } Some(Opcode::Quit) => { log::warn!("got quit!"); break; } None => { log::error!("couldn't convert opcode: {:?}", msg); } } } // clean up our program log::trace!("main loop exit, destroying servers"); xns.unregister_server(dns_sid).unwrap(); xous::destroy_server(dns_sid).unwrap(); log::trace!("quitting"); xous::terminate_process(0) }
{ match (self.header() >> 11) & 0xF { 0 => DnsResponseCode::NoError, 1 => DnsResponseCode::FormatError, 2 => DnsResponseCode::ServerFailure, 3 => DnsResponseCode::NameError, 4 => DnsResponseCode::NotImplemented, 5 => DnsResponseCode::Refused, _ => DnsResponseCode::UnknownError, } }
identifier_body
main.rs
#![cfg_attr(target_os = "none", no_std)] #![cfg_attr(target_os = "none", no_main)] mod api; mod time; // why is this here? because it's the only place it'll fit. :-/ use api::*; use net::NetIpAddr; use num_traits::*; use xous::msg_scalar_unpack; use std::collections::HashMap; use std::convert::TryInto; use std::io::ErrorKind; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use std::time::Duration; use std::thread; use xous_ipc::{Buffer, String}; // KISS DNS // The DNS implementation here is based on https://github.com/vinc/moros/blob/43ac7cdc8ccc860dc1b6f0f060b5dbcd01424c03/src/usr/host.rs // MOROS is MIT licensed. // See RFC 1035 for implementation details #[repr(u16)] enum QueryType { A = 1, // NS = 2, // MD = 3, // MF = 4, // CNAME = 5, // SOA = 6, // MX = 15, // TXT = 16, } #[repr(u16)] enum QueryClass { IN = 1, } struct Message { pub datagram: Vec<u8>, } const FLAG_RD: u16 = 0x0100; // Recursion desired impl Message { pub fn from(datagram: &[u8]) -> Self { Self { datagram: Vec::from(datagram), } } pub fn query(qname: &str, qtype: QueryType, qclass: QueryClass, id: u16) -> Self { let mut datagram = Vec::new(); for b in id.to_be_bytes().iter() { datagram.push(*b); // Transaction ID } for b in FLAG_RD.to_be_bytes().iter() { datagram.push(*b); // Flags } for b in (1 as u16).to_be_bytes().iter() { datagram.push(*b); // Questions } for _ in 0..6 { datagram.push(0); // Answer + Authority + Additional } for label in qname.split('.') { datagram.push(label.len() as u8); // QNAME label length for b in label.bytes() { datagram.push(b); // QNAME label bytes } } datagram.push(0); // Root null label for b in (qtype as u16).to_be_bytes().iter() { datagram.push(*b); // QTYPE } for b in (qclass as u16).to_be_bytes().iter() { datagram.push(*b); // QCLASS } Self { datagram } } pub fn id(&self) -> u16 { u16::from_be_bytes(self.datagram[0..2].try_into().unwrap()) } pub fn header(&self) -> u16 { u16::from_be_bytes(self.datagram[2..4].try_into().unwrap()) } pub fn is_response(&self) -> bool { if (self.header() & (1 << 15)) == 0 { false } else { true } } fn fast_foward_name(&self, start: usize) -> Result<usize, DnsResponseCode> { use DnsResponseCode::FormatError; let mut index = start; loop { log::trace!("cname index: {}", index); if *(self.datagram.get(index).ok_or(FormatError)?) == 0 { index += 1; break; } else { index += *(self.datagram.get(index).ok_or(FormatError)?) as usize; index += 1; } } Ok(index) } pub fn parse_response(&self) -> Result<HashMap<IpAddr, u32>, DnsResponseCode> { use DnsResponseCode::FormatError; log::trace!("parsing packet: {:?}", self.datagram); let mut map = HashMap::<IpAddr, u32>::new(); // ASSUME: the query ID and response bit fields have already been checked // and that the rcode is valid let qdcount = u16::from_be_bytes(self.datagram[4..6].try_into().unwrap()); let ancount = u16::from_be_bytes(self.datagram[6..8].try_into().unwrap()); let mut index = 12; // fast forward past the qname for queries in 0..qdcount { log::trace!("parsing query{}, index {}", queries, index); index = self.fast_foward_name(index)?; log::trace!("fast forward through qname to {}", index); // index is now at qtype let qtype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if qtype != 1 && qtype != 28 { log::error!("Problem parsing qname, qtype is not 1 or 28: {}", qtype); return Err(FormatError); } index += 2; let qclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if qclass != 1 { log::error!("Problem parsing qname, qclass is not 1: {}", qclass); return Err(FormatError); } index += 2; } // index is now at the aname section for aname in 0..ancount { log::trace!("parsing aname{}, index {}", aname, index); // first check to see if we're dealing with a pointer or a name if self.datagram[index] >= 0xc0 { // pointer index += 1; if self.datagram[index] != 0xc { log::error!( "Found aname pointer, but value does not conform to length of aname header" ); return Err(FormatError); } index += 1; } else { // name, fast forward past the name index = self.fast_foward_name(index)?; log::trace!("fast forward aname to {}", index); } // index is now at type let atype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if atype != 1 && atype != 28 { log::error!("Problem parsing aname, type is not 1 or 28: {}", atype); return Err(FormatError); } index += 2; let aclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if aclass != 1 { log::error!("Problem parsing aname, aclass is not 1: {}", aclass); return Err(FormatError); } index += 2; // this is our TTL let ttl = u32::from_be_bytes(self.datagram[index..index + 4].try_into().unwrap()); log::trace!("got ttl: {}", ttl); index += 4; // this is the payload length let addr_len = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); index += 2; match addr_len { // ipv4 4 => { if atype != 1 { log::error!("Got a 4-byte address, but ATYPE != A (1)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv4Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 4] = [0; 4]; for (&src, dst) in self.datagram[index..index + 4].iter().zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V4(Ipv4Addr::from(rdata)); index += 4; map.insert(addr, ttl); } // ipv6 16 => { if atype != 28 { log::error!("Got a 16-byte address, but ATYPE != AAAA (28)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv6Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 16] = [0; 16]; for (&src, dst) in self.datagram[index..index + 16] .iter() .zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V6(Ipv6Addr::from(rdata)); index += 16; map.insert(addr, ttl); } _ => { log::error!("Length field does not match a known record type"); return Err(FormatError); } } } Ok(map) } /* example response for: betrusted.io->185.199.111.153 Header: 61, ca, id 81, 80, header 0, 1, qdcount 0, 4, ancount 0, 0, nscount 0, 0, arcount qname: 9, length 9 62, 65, 74, 72, 75, 73, 74, 65, 64, "betrusted" 2, length 2 69, 6f, "io" 0, end of name qtype: 0, 1, type A qclass: 0, 1, type IN aname0: c0, name is a pointer (any value > 192 is a pointer) c, offset of 12 from start of aname0 0, 1, type A 0, 1, class IN 0, 0, e, 10, 0xe10 = 3600 seconds TTL 0, 4, 4 bytes address b9, c7, 6c, 99, address aname1: c0, name is a pointer c, 0, 1, type A 0, 1, class IN 0, 0, e, 10, TTL 0, 4, 4 byte address b9, c7, 6d, 99, address aname2: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6e, 99, aname3: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6f, 99 */ /* pub fn is_query(&self) -> bool { !self.is_response() } */ pub fn rcode(&self) -> DnsResponseCode { match (self.header() >> 11) & 0xF { 0 => DnsResponseCode::NoError, 1 => DnsResponseCode::FormatError, 2 => DnsResponseCode::ServerFailure, 3 => DnsResponseCode::NameError, 4 => DnsResponseCode::NotImplemented, 5 => DnsResponseCode::Refused, _ => DnsResponseCode::UnknownError, } } } pub struct Resolver { /// DnsServerManager is a service of the Net crate that automatically updates the DNS server list mgr: net::protocols::DnsServerManager, socket: UdpSocket, buf: [u8; DNS_PKT_MAX_LEN], trng: trng::Trng, freeze: bool, } impl Resolver { pub fn new(xns: &xous_names::XousNames) -> Resolver { let trng = trng::Trng::new(&xns).unwrap(); let local_port = (49152 + trng.get_u32().unwrap() % 16384) as u16; let socket = UdpSocket::bind( format!("0.0.0.0:{}", local_port), ) .expect("couldn't create socket for DNS resolver"); let timeout = Duration::from_millis(10_000); // 10 seconds for DNS to resolve by default socket.set_read_timeout(Some(timeout)).unwrap(); socket.set_nonblocking(false).unwrap(); // we want this to block. // we /could/ do a non-blocking DNS resolver, but...what would you do in the meantime?? // blocking is probably what we actually want this time. Resolver { mgr: net::protocols::DnsServerManager::register(&xns) .expect("Couldn't register the DNS server list auto-manager"), socket, buf: [0; DNS_PKT_MAX_LEN], trng, freeze: false, } } pub fn add_server(&mut self, addr: IpAddr) { self.mgr.add_server(addr); } pub fn remove_server(&mut self, addr: IpAddr) { self.mgr.remove_server(addr); } pub fn clear_all_servers(&mut self) { self.mgr.clear(); } pub fn
(&mut self, freeze: bool) { self.freeze = freeze; self.mgr.set_freeze(freeze); } pub fn get_freeze(&self) -> bool { self.freeze } /// this allows us to re-use the TRNG object pub fn trng_u32(&self) -> u32 { self.trng.get_u32().unwrap() } pub fn resolve(&mut self, name: &str) -> Result<HashMap<IpAddr, u32>, DnsResponseCode> { if let Some(dns_address) = self.mgr.get_random() { let dns_port = 53; let server = SocketAddr::new(dns_address, dns_port); let qname = name; let qtype = QueryType::A; let qclass = QueryClass::IN; let query = Message::query(qname, qtype, qclass, self.trng.get_u32().unwrap() as u16); self.socket .send_to(&query.datagram, &server) .map_err(|_| DnsResponseCode::NetworkError)?; match self.socket.recv(&mut self.buf) { Ok(len) => { let message = Message::from(&self.buf[..len]); if message.id() == query.id() && message.is_response() { return match message.rcode() { DnsResponseCode::NoError => message.parse_response(), rcode => Err(rcode), }; } else { Err(DnsResponseCode::NetworkError) } } Err(e) => match e.kind() { ErrorKind::WouldBlock => Err(DnsResponseCode::NetworkError), _ => Err(DnsResponseCode::UnknownError), }, } } else { Err(DnsResponseCode::NoServerSpecified) } } } #[derive(PartialEq, Debug)] #[repr(C)] enum NameConversionError { /// The length of the memory buffer was invalid InvalidMemoryBuffer = 1, /// The specified nameserver string was not UTF-8 InvalidString = 3, /// The message was not a mutable memory message InvalidMessageType = 4, } fn name_from_msg(env: &xous::MessageEnvelope) -> Result<&str, NameConversionError> { let msg = env .body .memory_message() .ok_or(NameConversionError::InvalidMessageType)?; let valid_bytes = msg.valid.map(|v| v.get()).unwrap_or_else(|| msg.buf.len()); if valid_bytes > DNS_NAME_LENGTH_LIMIT || valid_bytes < 1 { log::error!("valid bytes exceeded DNS name limit"); return Err(NameConversionError::InvalidMemoryBuffer); } // Safe because we've already validated that it's a valid range let str_slice = unsafe { core::slice::from_raw_parts(msg.buf.as_ptr(), valid_bytes) }; let name_string = core::str::from_utf8(str_slice).map_err(|_| NameConversionError::InvalidString)?; Ok(name_string) } fn fill_response(mut env: xous::MessageEnvelope, entries: &HashMap<IpAddr, u32>) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); // First tag = 1 for "Error" -- we'll fill this in at the end when it's successful *i.next()? = 1; // Limit the number of entries to 128, which is a nice number. Given that an IPv6 // address is 17 bytes, that means that ~240 IPv6 addresses will fit in a 4 kB page. // 128 is just a conservative value rounded down. let mut entry_count = entries.len(); if entry_count > 128 { entry_count = 128; } *i.next()? = entry_count.try_into().ok()?; // Start filling in the addreses for addr in entries.keys() { match addr { &IpAddr::V4(a) => { // IPv4 *i.next()? = 4; for entry in a.octets() { *i.next()? = entry; } } &IpAddr::V6(a) => { // IPv6 for entry in a.octets() { *i.next()? = entry; } *i.next()? = 6; } } } // Convert the entry to a "Success" message drop(i); s[0] = 0; None } fn fill_error(mut env: xous::MessageEnvelope, code: DnsResponseCode) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); *i.next()? = 1; *i.next()? = code as u8; None } fn main() -> ! { log_server::init_wait().unwrap(); log::set_max_level(log::LevelFilter::Info); log::info!("my PID is {}", xous::process::id()); // Time is stuck in the DNS crate because the status crate is out of resources, and the DNS // crate is fairly under-utilized and ideal for sticking a service like time in it. // // this kicks off the thread that services the `libstd` calls for time-related things. // we want this started really early, because it sanity checks the RTC and a bunch of other stuff. time::start_time_server(); time::start_time_ux(); let xns = xous_names::XousNames::new().unwrap(); let dns_sid = xns .register_name(api::SERVER_NAME_DNS, None) .expect("can't register server"); log::trace!("registered with NS -- {:?}", dns_sid); // this will magically populate a list of DNS servers when they become available let mut resolver = Resolver::new(&xns); // if you wanted to force a server into the initial config, you can do it here, for example: // resolver.add_server(IpAddr::V4(Ipv4Addr::new(1,1,1,1))); // the `u32` value is the TTL of the IpAddr let mut dns_cache = HashMap::<std::string::String, HashMap<IpAddr, u32>>::new(); // build a thread that pings the UpdateTtl function once every few minutes to expire the DNS cache thread::spawn({ let local_cid = xous::connect(dns_sid).unwrap(); move || { const TTL_INTERVAL_SECS: usize = 300; // every 5 minutes update the map let tt = ticktimer_server::Ticktimer::new().unwrap(); loop { tt.sleep_ms(TTL_INTERVAL_SECS * 1000).unwrap(); xous::send_message( local_cid, xous::Message::new_scalar( Opcode::UpdateTtl.to_usize().unwrap(), TTL_INTERVAL_SECS, 0, 0, 0, ), ) .expect("couldn't increment DNS cache"); } } }); log::trace!("ready to accept requests"); loop { let mut msg = xous::receive_message(dns_sid).unwrap(); match FromPrimitive::from_usize(msg.body.id()) { Some(Opcode::RawLookup) => { match name_from_msg(&msg).map(|s| s.to_owned()) { Ok(owned_name) => { // handle the special case of "localhost" as a string if owned_name == "localhost" { let mut local = HashMap::<IpAddr, u32>::new(); local.insert(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 86400); fill_response(msg, &local); continue; } log::trace!("performing a lookup of {}", owned_name); // Try to get the result out of the DNS cache if let Some(entries) = dns_cache.get(&owned_name) { fill_response(msg, entries); continue; } // This entry is not in the cache, so perform a lookup match resolver.resolve(&owned_name) { Ok(cache_entry) => { fill_response(msg, &cache_entry); dns_cache.insert(owned_name, cache_entry); continue; } Err(e) => { fill_error(msg, e); continue; } } } Err(e) => { log::error!("unable to do name lookup: {:?}", e); fill_error(msg, DnsResponseCode::NameError); continue; } }; } Some(Opcode::Lookup) => { let mut buf = unsafe { Buffer::from_memory_message_mut(msg.body.memory_message_mut().unwrap()) }; let name = buf .to_original::<String<DNS_NAME_LENGTH_LIMIT>, _>() .unwrap(); let name_std = std::string::String::from(name.as_str().unwrap()); if let Some(cache_entry) = dns_cache.get(&name_std) { // pick a random entry let rand = resolver.trng_u32() as usize % cache_entry.len(); for (index, (ip_addr, _)) in cache_entry.iter().enumerate() { if rand == index { log::debug!("DNS cached: {}->{:?}", name, ip_addr); let response = DnsResponse { addr: Some(NetIpAddr::from(*ip_addr)), code: DnsResponseCode::NoError, }; buf.replace(response).unwrap(); break; } } } else { match resolver.resolve(name.as_str().unwrap()) { Ok(cache_entry) => { if cache_entry.len() > 0 { dns_cache.insert(name_std, cache_entry); // now pick the entry back out again, as it was consumed... let name_std = std::string::String::from(name.as_str().unwrap()); let cache_entry = dns_cache.get(&name_std).unwrap(); // pick a random entry from the query response let rand = resolver.trng_u32() as usize % cache_entry.len(); for (index, (ip_addr, _)) in cache_entry.iter().enumerate() { if rand == index { let response = DnsResponse { addr: Some(NetIpAddr::from(*ip_addr)), code: DnsResponseCode::NoError, }; buf.replace(response).unwrap(); break; } } } else { // no names found let response = DnsResponse { addr: None, code: DnsResponseCode::NameError, }; buf.replace(response).unwrap(); } } Err(e) => { log::debug!("DNS query failed: {}->{:?}", name, e); let response = DnsResponse { addr: None, code: e, }; buf.replace(response).unwrap(); } } } } Some(Opcode::UpdateTtl) => msg_scalar_unpack!(msg, incr_secs, _, _, _, { let increment = if incr_secs < u32::MAX as usize { incr_secs as u32 } else { u32::MAX }; if !resolver.get_freeze() { let mut expired_names = Vec::<std::string::String>::new(); for (name, cache_map) in dns_cache.iter_mut() { // each entry can have multiple names with a different TTL // decrement the TTL, and note which go to zero let mut expired_entries = Vec::<IpAddr>::new(); for (entry, ttl) in cache_map.iter_mut() { log::debug!("entry: {:?}, ttl: {}, incr: {}", entry, ttl, increment); if *ttl < increment { *ttl = 0; expired_entries.push(*entry); } else { *ttl = *ttl - increment as u32; } } // remove the entries that are 0 for entry in expired_entries { log::debug!("DNS cache expiring {:?}", entry); cache_map.remove(&entry); } // if all the entries are removed, mark for removal from the cache entirely if cache_map.len() == 0 { // have to copy the name to a new object to track it let name = std::string::String::from(name.as_str()); expired_names.push(name); } } for name in expired_names { log::debug!("DNS cache removing {}", &name); dns_cache.remove(&name); } } }), Some(Opcode::Flush) => { dns_cache.clear(); } Some(Opcode::FreezeConfig) => { resolver.set_freeze_config(true); } Some(Opcode::ThawConfig) => { resolver.set_freeze_config(false); } Some(Opcode::Quit) => { log::warn!("got quit!"); break; } None => { log::error!("couldn't convert opcode: {:?}", msg); } } } // clean up our program log::trace!("main loop exit, destroying servers"); xns.unregister_server(dns_sid).unwrap(); xous::destroy_server(dns_sid).unwrap(); log::trace!("quitting"); xous::terminate_process(0) }
set_freeze_config
identifier_name
nodes_controller.go
package nodes import ( "context" "os" "time" dynatracev1alpha1 "github.com/Dynatrace/dynatrace-oneagent-operator/api/v1alpha1" "github.com/Dynatrace/dynatrace-oneagent-operator/controllers/utils" "github.com/Dynatrace/dynatrace-oneagent-operator/dtclient" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" ) const ( cacheName = "dynatrace-node-cache" ) var unschedulableTaints = []string{"ToBeDeletedByClusterAutoscaler"} type ReconcileNodes struct { namespace string client client.Client cache cache.Cache scheme *runtime.Scheme logger logr.Logger dtClientFunc utils.DynatraceClientFunc local bool } // Add creates a new Nodes Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager, ns string) error { return mgr.Add(&ReconcileNodes{ namespace: ns, client: mgr.GetClient(), cache: mgr.GetCache(), scheme: mgr.GetScheme(), logger: log.Log.WithName("nodes.controller"), dtClientFunc: utils.BuildDynatraceClient, local: os.Getenv("RUN_LOCAL") == "true", }) } // Start starts the Nodes Reconciler, and will block until a stop signal is sent. func (r *ReconcileNodes) Start(stop context.Context) error { r.cache.WaitForCacheSync(stop) chDels, err := r.watchDeletions(stop.Done()) if err != nil { // I've seen watchDeletions() fail because the Cache Informers weren't ready. WaitForCacheSync() // should block until they are, however, but I believe I saw this not being true once. // // Start() failing would exit the Operator process. Since this is a minor feature, let's disable // for now until further investigation is done. r.logger.Info("failed to initialize watcher for deleted nodes - disabled", "error", err) chDels = make(chan string) } chUpdates, err := r.watchUpdates() if err != nil { r.logger.Info("failed to initialize watcher for updating nodes - disabled", "error", err) chUpdates = make(chan string) } chAll := watchTicks(stop.Done(), 5*time.Minute) for { select { case <-stop.Done(): r.logger.Info("stopping nodes controller") return nil case node := <-chDels: if err := r.onDeletion(node); err != nil { r.logger.Error(err, "failed to reconcile deletion", "node", node) } case node := <-chUpdates: if err := r.onUpdate(node); err != nil { r.logger.Error(err, "failed to reconcile updates", "node", node) } case <-chAll: if err := r.reconcileAll(); err != nil { r.logger.Error(err, "failed to reconcile nodes") } } } } func (r *ReconcileNodes) onUpdate(node string) error { c, err := r.getCache() if err != nil { return err } if err = r.updateNode(c, node); err != nil { return err } return r.updateCache(c) } func (r *ReconcileNodes) onDeletion(node string) error { logger := r.logger.WithValues("node", node) logger.Info("node deletion notification received") c, err := r.getCache() if err != nil { return err } if err = r.removeNode(c, node, func(oaName string) (*dynatracev1alpha1.OneAgent, error) { var oa dynatracev1alpha1.OneAgent if err := r.client.Get(context.TODO(), client.ObjectKey{Name: oaName, Namespace: r.namespace}, &oa); err != nil { return nil, err } return &oa, nil }); err != nil { return err } return r.updateCache(c) } func (r *ReconcileNodes) reconcileAll() error { r.logger.Info("reconciling nodes") var oaLst dynatracev1alpha1.OneAgentList if err := r.client.List(context.TODO(), &oaLst, client.InNamespace(r.namespace)); err != nil { return err } oas := make(map[string]*dynatracev1alpha1.OneAgent, len(oaLst.Items)) for i := range oaLst.Items { oas[oaLst.Items[i].Name] = &oaLst.Items[i] } c, err := r.getCache() if err != nil { return err } var nodeLst corev1.NodeList if err := r.client.List(context.TODO(), &nodeLst); err != nil { return err } nodes := map[string]bool{} for i := range nodeLst.Items { node := nodeLst.Items[i] nodes[node.Name] = true // Sometimes Azure does not cordon off nodes before deleting them since they use taints, // this case is handled in the update event handler if isUnschedulable(&node) { if err = r.reconcileUnschedulableNode(&node, c); err != nil { return err } } } // Add or update all nodes seen on OneAgent instances to the c. for _, oa := range oas { if oa.Status.Instances != nil { for node, info := range oa.Status.Instances { if _, ok := nodes[node]; !ok { continue } info := CacheEntry{ Instance: oa.Name, IPAddress: info.IPAddress, LastSeen: time.Now().UTC(), } if cached, err := c.Get(node); err == nil { info.LastMarkedForTermination = cached.LastMarkedForTermination } if err := c.Set(node, info); err != nil { return err } } } } // Notify and remove all nodes on the c that aren't in the cluster. for _, node := range c.Keys() { if _, ok := nodes[node]; ok { continue } if err := r.removeNode(c, node, func(name string) (*dynatracev1alpha1.OneAgent, error) { if oa, ok := oas[name]; ok { return oa, nil } return nil, errors.NewNotFound(schema.GroupResource{ Group: oaLst.GroupVersionKind().Group, Resource: oaLst.GroupVersionKind().Kind, }, name) }); err != nil { r.logger.Error(err, "failed to remove node", "node", node) } } return r.updateCache(c) } func (r *ReconcileNodes) getCache() (*Cache, error) { var cm corev1.ConfigMap err := r.client.Get(context.TODO(), client.ObjectKey{Name: cacheName, Namespace: r.namespace}, &cm) if err == nil { return &Cache{Obj: &cm}, nil } if errors.IsNotFound(err) { r.logger.Info("no cache found, creating") cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: cacheName, Namespace: r.namespace, }, Data: map[string]string{}, } if !r.local { // If running locally, don't set the controller. deploy, err := utils.GetDeployment(r.client, r.namespace) if err != nil { return nil, err } if err = controllerutil.SetControllerReference(deploy, cm, r.scheme); err != nil { return nil, err } } return &Cache{Create: true, Obj: cm}, nil } return nil, err } func (r *ReconcileNodes) updateCache(c *Cache) error { if !c.Changed() { return nil } if c.Create { return r.client.Create(context.TODO(), c.Obj) } return r.client.Update(context.TODO(), c.Obj) } func (r *ReconcileNodes) removeNode(c *Cache, node string, oaFunc func(name string) (*dynatracev1alpha1.OneAgent, error)) error { logger := r.logger.WithValues("node", node) nodeInfo, err := c.Get(node) if err == ErrNotFound { logger.Info("ignoring uncached node") return nil } else if err != nil { return err } if time.Now().UTC().Sub(nodeInfo.LastSeen).Hours() > 1 { logger.Info("removing stale node") } else if nodeInfo.IPAddress == "" { logger.Info("removing node with unknown IP") } else { oa, err := oaFunc(nodeInfo.Instance) if errors.IsNotFound(err) { logger.Info("oneagent got already deleted") c.Delete(node) return nil } if err != nil { return err } err = r.markForTermination(c, oa, nodeInfo.IPAddress, node) if err != nil { return err } } c.Delete(node) return nil } func (r *ReconcileNodes) updateNode(c *Cache, nodeName string) error { node := &corev1.Node{} err := r.client.Get(context.TODO(), client.ObjectKey{Name: nodeName}, node) if err != nil { return err } if !isUnschedulable(node) { return nil } return r.reconcileUnschedulableNode(node, c) } func (r *ReconcileNodes) sendMarkedForTermination(oa *dynatracev1alpha1.OneAgent, nodeIP string, lastSeen time.Time) error { dtc, err := r.dtClientFunc(r.client, oa, true, true) if err != nil { return err } entityID, err := dtc.GetEntityIDForIP(nodeIP) if err != nil { return err } ts := uint64(lastSeen.Add(-10*time.Minute).UnixNano()) / uint64(time.Millisecond) return dtc.SendEvent(&dtclient.EventData{ EventType: dtclient.MarkedForTerminationEvent, Source: "OneAgent Operator", Description: "Kubernetes node cordoned. Node might be drained or terminated.", StartInMillis: ts, EndInMillis: ts, AttachRules: dtclient.EventDataAttachRules{ EntityIDs: []string{entityID}, }, }) } func (r *ReconcileNodes) reconcileUnschedulableNode(node *corev1.Node, c *Cache) error
func (r *ReconcileNodes) markForTermination(c *Cache, oneAgent *dynatracev1alpha1.OneAgent, ipAddress string, nodeName string) error { cachedNode, err := c.Get(nodeName) if err != nil { return err } if !isMarkableForTermination(&cachedNode) { return nil } r.logger.Info("sending mark for termination event to dynatrace server", "ip", ipAddress, "node", nodeName) if err = updateLastMarkedForTerminationTimestamp(c, &cachedNode, nodeName); err != nil { return err } return r.sendMarkedForTermination(oneAgent, ipAddress, cachedNode.LastSeen) } func isUnschedulable(node *corev1.Node) bool { return node.Spec.Unschedulable || hasUnschedulableTaint(node) } func hasUnschedulableTaint(node *corev1.Node) bool { for _, taint := range node.Spec.Taints { for _, unschedulableTaint := range unschedulableTaints { if taint.Key == unschedulableTaint { return true } } } return false } // isMarkableForTermination checks if the timestamp from last mark is at least one hour old func isMarkableForTermination(nodeInfo *CacheEntry) bool { // If the last mark was an hour ago, mark again // Zero value for time.Time is 0001-01-01, so first mark is also executed lastMarked := nodeInfo.LastMarkedForTermination return lastMarked.UTC().Add(time.Hour).Before(time.Now().UTC()) } func updateLastMarkedForTerminationTimestamp(c *Cache, nodeInfo *CacheEntry, nodeName string) error { nodeInfo.LastMarkedForTermination = time.Now().UTC() return c.Set(nodeName, *nodeInfo) }
{ oneAgent, err := r.determineOneAgentForNode(node.Name) if err != nil { return err } if oneAgent == nil { return nil } // determineOneAgentForNode only returns a oneagent object if a node instance is present instance := oneAgent.Status.Instances[node.Name] if _, err = c.Get(node.Name); err != nil { if err == ErrNotFound { // If node not found in c add it cachedNode := CacheEntry{ Instance: oneAgent.Name, IPAddress: instance.IPAddress, LastSeen: time.Now().UTC(), } err = c.Set(node.Name, cachedNode) if err != nil { return err } } else { return err } } return r.markForTermination(c, oneAgent, instance.IPAddress, node.Name) }
identifier_body
nodes_controller.go
package nodes import ( "context" "os" "time" dynatracev1alpha1 "github.com/Dynatrace/dynatrace-oneagent-operator/api/v1alpha1" "github.com/Dynatrace/dynatrace-oneagent-operator/controllers/utils" "github.com/Dynatrace/dynatrace-oneagent-operator/dtclient" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" ) const ( cacheName = "dynatrace-node-cache" ) var unschedulableTaints = []string{"ToBeDeletedByClusterAutoscaler"} type ReconcileNodes struct { namespace string client client.Client cache cache.Cache scheme *runtime.Scheme logger logr.Logger dtClientFunc utils.DynatraceClientFunc local bool } // Add creates a new Nodes Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager, ns string) error { return mgr.Add(&ReconcileNodes{ namespace: ns, client: mgr.GetClient(), cache: mgr.GetCache(), scheme: mgr.GetScheme(), logger: log.Log.WithName("nodes.controller"), dtClientFunc: utils.BuildDynatraceClient, local: os.Getenv("RUN_LOCAL") == "true", }) } // Start starts the Nodes Reconciler, and will block until a stop signal is sent. func (r *ReconcileNodes) Start(stop context.Context) error { r.cache.WaitForCacheSync(stop) chDels, err := r.watchDeletions(stop.Done()) if err != nil { // I've seen watchDeletions() fail because the Cache Informers weren't ready. WaitForCacheSync() // should block until they are, however, but I believe I saw this not being true once. // // Start() failing would exit the Operator process. Since this is a minor feature, let's disable // for now until further investigation is done. r.logger.Info("failed to initialize watcher for deleted nodes - disabled", "error", err) chDels = make(chan string) } chUpdates, err := r.watchUpdates() if err != nil { r.logger.Info("failed to initialize watcher for updating nodes - disabled", "error", err) chUpdates = make(chan string) } chAll := watchTicks(stop.Done(), 5*time.Minute) for { select { case <-stop.Done(): r.logger.Info("stopping nodes controller") return nil case node := <-chDels: if err := r.onDeletion(node); err != nil { r.logger.Error(err, "failed to reconcile deletion", "node", node) } case node := <-chUpdates: if err := r.onUpdate(node); err != nil { r.logger.Error(err, "failed to reconcile updates", "node", node) } case <-chAll: if err := r.reconcileAll(); err != nil { r.logger.Error(err, "failed to reconcile nodes") } } } } func (r *ReconcileNodes) onUpdate(node string) error { c, err := r.getCache() if err != nil { return err } if err = r.updateNode(c, node); err != nil { return err } return r.updateCache(c) } func (r *ReconcileNodes) onDeletion(node string) error { logger := r.logger.WithValues("node", node) logger.Info("node deletion notification received") c, err := r.getCache() if err != nil { return err } if err = r.removeNode(c, node, func(oaName string) (*dynatracev1alpha1.OneAgent, error) { var oa dynatracev1alpha1.OneAgent if err := r.client.Get(context.TODO(), client.ObjectKey{Name: oaName, Namespace: r.namespace}, &oa); err != nil { return nil, err } return &oa, nil }); err != nil { return err } return r.updateCache(c) } func (r *ReconcileNodes) reconcileAll() error { r.logger.Info("reconciling nodes") var oaLst dynatracev1alpha1.OneAgentList if err := r.client.List(context.TODO(), &oaLst, client.InNamespace(r.namespace)); err != nil { return err } oas := make(map[string]*dynatracev1alpha1.OneAgent, len(oaLst.Items)) for i := range oaLst.Items { oas[oaLst.Items[i].Name] = &oaLst.Items[i] } c, err := r.getCache() if err != nil { return err } var nodeLst corev1.NodeList if err := r.client.List(context.TODO(), &nodeLst); err != nil { return err } nodes := map[string]bool{} for i := range nodeLst.Items { node := nodeLst.Items[i] nodes[node.Name] = true // Sometimes Azure does not cordon off nodes before deleting them since they use taints, // this case is handled in the update event handler if isUnschedulable(&node) { if err = r.reconcileUnschedulableNode(&node, c); err != nil { return err } } } // Add or update all nodes seen on OneAgent instances to the c. for _, oa := range oas { if oa.Status.Instances != nil { for node, info := range oa.Status.Instances { if _, ok := nodes[node]; !ok { continue } info := CacheEntry{ Instance: oa.Name, IPAddress: info.IPAddress, LastSeen: time.Now().UTC(), } if cached, err := c.Get(node); err == nil { info.LastMarkedForTermination = cached.LastMarkedForTermination } if err := c.Set(node, info); err != nil { return err } } } } // Notify and remove all nodes on the c that aren't in the cluster. for _, node := range c.Keys() { if _, ok := nodes[node]; ok { continue } if err := r.removeNode(c, node, func(name string) (*dynatracev1alpha1.OneAgent, error) { if oa, ok := oas[name]; ok { return oa, nil } return nil, errors.NewNotFound(schema.GroupResource{ Group: oaLst.GroupVersionKind().Group, Resource: oaLst.GroupVersionKind().Kind, }, name) }); err != nil { r.logger.Error(err, "failed to remove node", "node", node) } } return r.updateCache(c) } func (r *ReconcileNodes) getCache() (*Cache, error) { var cm corev1.ConfigMap err := r.client.Get(context.TODO(), client.ObjectKey{Name: cacheName, Namespace: r.namespace}, &cm) if err == nil { return &Cache{Obj: &cm}, nil } if errors.IsNotFound(err) { r.logger.Info("no cache found, creating") cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: cacheName, Namespace: r.namespace, }, Data: map[string]string{}, } if !r.local { // If running locally, don't set the controller. deploy, err := utils.GetDeployment(r.client, r.namespace) if err != nil { return nil, err } if err = controllerutil.SetControllerReference(deploy, cm, r.scheme); err != nil { return nil, err } } return &Cache{Create: true, Obj: cm}, nil } return nil, err } func (r *ReconcileNodes) updateCache(c *Cache) error { if !c.Changed() { return nil } if c.Create { return r.client.Create(context.TODO(), c.Obj) } return r.client.Update(context.TODO(), c.Obj) } func (r *ReconcileNodes) removeNode(c *Cache, node string, oaFunc func(name string) (*dynatracev1alpha1.OneAgent, error)) error { logger := r.logger.WithValues("node", node) nodeInfo, err := c.Get(node) if err == ErrNotFound { logger.Info("ignoring uncached node") return nil } else if err != nil { return err } if time.Now().UTC().Sub(nodeInfo.LastSeen).Hours() > 1 { logger.Info("removing stale node") } else if nodeInfo.IPAddress == "" { logger.Info("removing node with unknown IP") } else { oa, err := oaFunc(nodeInfo.Instance) if errors.IsNotFound(err) { logger.Info("oneagent got already deleted") c.Delete(node) return nil } if err != nil { return err } err = r.markForTermination(c, oa, nodeInfo.IPAddress, node) if err != nil { return err } } c.Delete(node) return nil } func (r *ReconcileNodes) updateNode(c *Cache, nodeName string) error { node := &corev1.Node{} err := r.client.Get(context.TODO(), client.ObjectKey{Name: nodeName}, node) if err != nil { return err } if !isUnschedulable(node) { return nil } return r.reconcileUnschedulableNode(node, c) } func (r *ReconcileNodes) sendMarkedForTermination(oa *dynatracev1alpha1.OneAgent, nodeIP string, lastSeen time.Time) error { dtc, err := r.dtClientFunc(r.client, oa, true, true) if err != nil { return err } entityID, err := dtc.GetEntityIDForIP(nodeIP) if err != nil { return err } ts := uint64(lastSeen.Add(-10*time.Minute).UnixNano()) / uint64(time.Millisecond) return dtc.SendEvent(&dtclient.EventData{ EventType: dtclient.MarkedForTerminationEvent, Source: "OneAgent Operator", Description: "Kubernetes node cordoned. Node might be drained or terminated.", StartInMillis: ts, EndInMillis: ts, AttachRules: dtclient.EventDataAttachRules{ EntityIDs: []string{entityID}, }, }) } func (r *ReconcileNodes) reconcileUnschedulableNode(node *corev1.Node, c *Cache) error { oneAgent, err := r.determineOneAgentForNode(node.Name) if err != nil { return err } if oneAgent == nil { return nil } // determineOneAgentForNode only returns a oneagent object if a node instance is present instance := oneAgent.Status.Instances[node.Name] if _, err = c.Get(node.Name); err != nil { if err == ErrNotFound { // If node not found in c add it cachedNode := CacheEntry{ Instance: oneAgent.Name, IPAddress: instance.IPAddress, LastSeen: time.Now().UTC(), } err = c.Set(node.Name, cachedNode) if err != nil { return err } } else { return err } } return r.markForTermination(c, oneAgent, instance.IPAddress, node.Name) } func (r *ReconcileNodes) markForTermination(c *Cache, oneAgent *dynatracev1alpha1.OneAgent, ipAddress string, nodeName string) error { cachedNode, err := c.Get(nodeName) if err != nil { return err } if !isMarkableForTermination(&cachedNode) { return nil } r.logger.Info("sending mark for termination event to dynatrace server", "ip", ipAddress, "node", nodeName) if err = updateLastMarkedForTerminationTimestamp(c, &cachedNode, nodeName); err != nil { return err } return r.sendMarkedForTermination(oneAgent, ipAddress, cachedNode.LastSeen) } func isUnschedulable(node *corev1.Node) bool { return node.Spec.Unschedulable || hasUnschedulableTaint(node) } func hasUnschedulableTaint(node *corev1.Node) bool { for _, taint := range node.Spec.Taints { for _, unschedulableTaint := range unschedulableTaints { if taint.Key == unschedulableTaint { return true } } } return false } // isMarkableForTermination checks if the timestamp from last mark is at least one hour old func isMarkableForTermination(nodeInfo *CacheEntry) bool { // If the last mark was an hour ago, mark again // Zero value for time.Time is 0001-01-01, so first mark is also executed lastMarked := nodeInfo.LastMarkedForTermination return lastMarked.UTC().Add(time.Hour).Before(time.Now().UTC()) } func
(c *Cache, nodeInfo *CacheEntry, nodeName string) error { nodeInfo.LastMarkedForTermination = time.Now().UTC() return c.Set(nodeName, *nodeInfo) }
updateLastMarkedForTerminationTimestamp
identifier_name
nodes_controller.go
package nodes import ( "context" "os" "time" dynatracev1alpha1 "github.com/Dynatrace/dynatrace-oneagent-operator/api/v1alpha1" "github.com/Dynatrace/dynatrace-oneagent-operator/controllers/utils" "github.com/Dynatrace/dynatrace-oneagent-operator/dtclient" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" ) const ( cacheName = "dynatrace-node-cache" ) var unschedulableTaints = []string{"ToBeDeletedByClusterAutoscaler"} type ReconcileNodes struct { namespace string client client.Client cache cache.Cache scheme *runtime.Scheme logger logr.Logger dtClientFunc utils.DynatraceClientFunc local bool } // Add creates a new Nodes Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager, ns string) error { return mgr.Add(&ReconcileNodes{ namespace: ns, client: mgr.GetClient(), cache: mgr.GetCache(), scheme: mgr.GetScheme(), logger: log.Log.WithName("nodes.controller"), dtClientFunc: utils.BuildDynatraceClient, local: os.Getenv("RUN_LOCAL") == "true", }) } // Start starts the Nodes Reconciler, and will block until a stop signal is sent. func (r *ReconcileNodes) Start(stop context.Context) error { r.cache.WaitForCacheSync(stop) chDels, err := r.watchDeletions(stop.Done()) if err != nil { // I've seen watchDeletions() fail because the Cache Informers weren't ready. WaitForCacheSync() // should block until they are, however, but I believe I saw this not being true once. // // Start() failing would exit the Operator process. Since this is a minor feature, let's disable // for now until further investigation is done. r.logger.Info("failed to initialize watcher for deleted nodes - disabled", "error", err) chDels = make(chan string) } chUpdates, err := r.watchUpdates() if err != nil { r.logger.Info("failed to initialize watcher for updating nodes - disabled", "error", err) chUpdates = make(chan string) } chAll := watchTicks(stop.Done(), 5*time.Minute) for { select { case <-stop.Done(): r.logger.Info("stopping nodes controller") return nil case node := <-chDels: if err := r.onDeletion(node); err != nil { r.logger.Error(err, "failed to reconcile deletion", "node", node) } case node := <-chUpdates: if err := r.onUpdate(node); err != nil { r.logger.Error(err, "failed to reconcile updates", "node", node) } case <-chAll: if err := r.reconcileAll(); err != nil { r.logger.Error(err, "failed to reconcile nodes") } } } } func (r *ReconcileNodes) onUpdate(node string) error { c, err := r.getCache() if err != nil { return err } if err = r.updateNode(c, node); err != nil { return err } return r.updateCache(c) } func (r *ReconcileNodes) onDeletion(node string) error { logger := r.logger.WithValues("node", node) logger.Info("node deletion notification received") c, err := r.getCache() if err != nil { return err } if err = r.removeNode(c, node, func(oaName string) (*dynatracev1alpha1.OneAgent, error) { var oa dynatracev1alpha1.OneAgent if err := r.client.Get(context.TODO(), client.ObjectKey{Name: oaName, Namespace: r.namespace}, &oa); err != nil { return nil, err } return &oa, nil }); err != nil { return err } return r.updateCache(c) } func (r *ReconcileNodes) reconcileAll() error { r.logger.Info("reconciling nodes") var oaLst dynatracev1alpha1.OneAgentList if err := r.client.List(context.TODO(), &oaLst, client.InNamespace(r.namespace)); err != nil { return err } oas := make(map[string]*dynatracev1alpha1.OneAgent, len(oaLst.Items)) for i := range oaLst.Items { oas[oaLst.Items[i].Name] = &oaLst.Items[i] } c, err := r.getCache() if err != nil { return err } var nodeLst corev1.NodeList if err := r.client.List(context.TODO(), &nodeLst); err != nil { return err } nodes := map[string]bool{} for i := range nodeLst.Items { node := nodeLst.Items[i] nodes[node.Name] = true // Sometimes Azure does not cordon off nodes before deleting them since they use taints, // this case is handled in the update event handler if isUnschedulable(&node) { if err = r.reconcileUnschedulableNode(&node, c); err != nil { return err } } } // Add or update all nodes seen on OneAgent instances to the c. for _, oa := range oas { if oa.Status.Instances != nil { for node, info := range oa.Status.Instances { if _, ok := nodes[node]; !ok { continue } info := CacheEntry{ Instance: oa.Name, IPAddress: info.IPAddress, LastSeen: time.Now().UTC(), } if cached, err := c.Get(node); err == nil { info.LastMarkedForTermination = cached.LastMarkedForTermination } if err := c.Set(node, info); err != nil { return err } } } } // Notify and remove all nodes on the c that aren't in the cluster. for _, node := range c.Keys() { if _, ok := nodes[node]; ok { continue } if err := r.removeNode(c, node, func(name string) (*dynatracev1alpha1.OneAgent, error) { if oa, ok := oas[name]; ok { return oa, nil } return nil, errors.NewNotFound(schema.GroupResource{ Group: oaLst.GroupVersionKind().Group, Resource: oaLst.GroupVersionKind().Kind,
r.logger.Error(err, "failed to remove node", "node", node) } } return r.updateCache(c) } func (r *ReconcileNodes) getCache() (*Cache, error) { var cm corev1.ConfigMap err := r.client.Get(context.TODO(), client.ObjectKey{Name: cacheName, Namespace: r.namespace}, &cm) if err == nil { return &Cache{Obj: &cm}, nil } if errors.IsNotFound(err) { r.logger.Info("no cache found, creating") cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: cacheName, Namespace: r.namespace, }, Data: map[string]string{}, } if !r.local { // If running locally, don't set the controller. deploy, err := utils.GetDeployment(r.client, r.namespace) if err != nil { return nil, err } if err = controllerutil.SetControllerReference(deploy, cm, r.scheme); err != nil { return nil, err } } return &Cache{Create: true, Obj: cm}, nil } return nil, err } func (r *ReconcileNodes) updateCache(c *Cache) error { if !c.Changed() { return nil } if c.Create { return r.client.Create(context.TODO(), c.Obj) } return r.client.Update(context.TODO(), c.Obj) } func (r *ReconcileNodes) removeNode(c *Cache, node string, oaFunc func(name string) (*dynatracev1alpha1.OneAgent, error)) error { logger := r.logger.WithValues("node", node) nodeInfo, err := c.Get(node) if err == ErrNotFound { logger.Info("ignoring uncached node") return nil } else if err != nil { return err } if time.Now().UTC().Sub(nodeInfo.LastSeen).Hours() > 1 { logger.Info("removing stale node") } else if nodeInfo.IPAddress == "" { logger.Info("removing node with unknown IP") } else { oa, err := oaFunc(nodeInfo.Instance) if errors.IsNotFound(err) { logger.Info("oneagent got already deleted") c.Delete(node) return nil } if err != nil { return err } err = r.markForTermination(c, oa, nodeInfo.IPAddress, node) if err != nil { return err } } c.Delete(node) return nil } func (r *ReconcileNodes) updateNode(c *Cache, nodeName string) error { node := &corev1.Node{} err := r.client.Get(context.TODO(), client.ObjectKey{Name: nodeName}, node) if err != nil { return err } if !isUnschedulable(node) { return nil } return r.reconcileUnschedulableNode(node, c) } func (r *ReconcileNodes) sendMarkedForTermination(oa *dynatracev1alpha1.OneAgent, nodeIP string, lastSeen time.Time) error { dtc, err := r.dtClientFunc(r.client, oa, true, true) if err != nil { return err } entityID, err := dtc.GetEntityIDForIP(nodeIP) if err != nil { return err } ts := uint64(lastSeen.Add(-10*time.Minute).UnixNano()) / uint64(time.Millisecond) return dtc.SendEvent(&dtclient.EventData{ EventType: dtclient.MarkedForTerminationEvent, Source: "OneAgent Operator", Description: "Kubernetes node cordoned. Node might be drained or terminated.", StartInMillis: ts, EndInMillis: ts, AttachRules: dtclient.EventDataAttachRules{ EntityIDs: []string{entityID}, }, }) } func (r *ReconcileNodes) reconcileUnschedulableNode(node *corev1.Node, c *Cache) error { oneAgent, err := r.determineOneAgentForNode(node.Name) if err != nil { return err } if oneAgent == nil { return nil } // determineOneAgentForNode only returns a oneagent object if a node instance is present instance := oneAgent.Status.Instances[node.Name] if _, err = c.Get(node.Name); err != nil { if err == ErrNotFound { // If node not found in c add it cachedNode := CacheEntry{ Instance: oneAgent.Name, IPAddress: instance.IPAddress, LastSeen: time.Now().UTC(), } err = c.Set(node.Name, cachedNode) if err != nil { return err } } else { return err } } return r.markForTermination(c, oneAgent, instance.IPAddress, node.Name) } func (r *ReconcileNodes) markForTermination(c *Cache, oneAgent *dynatracev1alpha1.OneAgent, ipAddress string, nodeName string) error { cachedNode, err := c.Get(nodeName) if err != nil { return err } if !isMarkableForTermination(&cachedNode) { return nil } r.logger.Info("sending mark for termination event to dynatrace server", "ip", ipAddress, "node", nodeName) if err = updateLastMarkedForTerminationTimestamp(c, &cachedNode, nodeName); err != nil { return err } return r.sendMarkedForTermination(oneAgent, ipAddress, cachedNode.LastSeen) } func isUnschedulable(node *corev1.Node) bool { return node.Spec.Unschedulable || hasUnschedulableTaint(node) } func hasUnschedulableTaint(node *corev1.Node) bool { for _, taint := range node.Spec.Taints { for _, unschedulableTaint := range unschedulableTaints { if taint.Key == unschedulableTaint { return true } } } return false } // isMarkableForTermination checks if the timestamp from last mark is at least one hour old func isMarkableForTermination(nodeInfo *CacheEntry) bool { // If the last mark was an hour ago, mark again // Zero value for time.Time is 0001-01-01, so first mark is also executed lastMarked := nodeInfo.LastMarkedForTermination return lastMarked.UTC().Add(time.Hour).Before(time.Now().UTC()) } func updateLastMarkedForTerminationTimestamp(c *Cache, nodeInfo *CacheEntry, nodeName string) error { nodeInfo.LastMarkedForTermination = time.Now().UTC() return c.Set(nodeName, *nodeInfo) }
}, name) }); err != nil {
random_line_split
nodes_controller.go
package nodes import ( "context" "os" "time" dynatracev1alpha1 "github.com/Dynatrace/dynatrace-oneagent-operator/api/v1alpha1" "github.com/Dynatrace/dynatrace-oneagent-operator/controllers/utils" "github.com/Dynatrace/dynatrace-oneagent-operator/dtclient" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" ) const ( cacheName = "dynatrace-node-cache" ) var unschedulableTaints = []string{"ToBeDeletedByClusterAutoscaler"} type ReconcileNodes struct { namespace string client client.Client cache cache.Cache scheme *runtime.Scheme logger logr.Logger dtClientFunc utils.DynatraceClientFunc local bool } // Add creates a new Nodes Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager, ns string) error { return mgr.Add(&ReconcileNodes{ namespace: ns, client: mgr.GetClient(), cache: mgr.GetCache(), scheme: mgr.GetScheme(), logger: log.Log.WithName("nodes.controller"), dtClientFunc: utils.BuildDynatraceClient, local: os.Getenv("RUN_LOCAL") == "true", }) } // Start starts the Nodes Reconciler, and will block until a stop signal is sent. func (r *ReconcileNodes) Start(stop context.Context) error { r.cache.WaitForCacheSync(stop) chDels, err := r.watchDeletions(stop.Done()) if err != nil { // I've seen watchDeletions() fail because the Cache Informers weren't ready. WaitForCacheSync() // should block until they are, however, but I believe I saw this not being true once. // // Start() failing would exit the Operator process. Since this is a minor feature, let's disable // for now until further investigation is done. r.logger.Info("failed to initialize watcher for deleted nodes - disabled", "error", err) chDels = make(chan string) } chUpdates, err := r.watchUpdates() if err != nil { r.logger.Info("failed to initialize watcher for updating nodes - disabled", "error", err) chUpdates = make(chan string) } chAll := watchTicks(stop.Done(), 5*time.Minute) for { select { case <-stop.Done(): r.logger.Info("stopping nodes controller") return nil case node := <-chDels: if err := r.onDeletion(node); err != nil { r.logger.Error(err, "failed to reconcile deletion", "node", node) } case node := <-chUpdates: if err := r.onUpdate(node); err != nil { r.logger.Error(err, "failed to reconcile updates", "node", node) } case <-chAll: if err := r.reconcileAll(); err != nil { r.logger.Error(err, "failed to reconcile nodes") } } } } func (r *ReconcileNodes) onUpdate(node string) error { c, err := r.getCache() if err != nil { return err } if err = r.updateNode(c, node); err != nil { return err } return r.updateCache(c) } func (r *ReconcileNodes) onDeletion(node string) error { logger := r.logger.WithValues("node", node) logger.Info("node deletion notification received") c, err := r.getCache() if err != nil { return err } if err = r.removeNode(c, node, func(oaName string) (*dynatracev1alpha1.OneAgent, error) { var oa dynatracev1alpha1.OneAgent if err := r.client.Get(context.TODO(), client.ObjectKey{Name: oaName, Namespace: r.namespace}, &oa); err != nil { return nil, err } return &oa, nil }); err != nil { return err } return r.updateCache(c) } func (r *ReconcileNodes) reconcileAll() error { r.logger.Info("reconciling nodes") var oaLst dynatracev1alpha1.OneAgentList if err := r.client.List(context.TODO(), &oaLst, client.InNamespace(r.namespace)); err != nil { return err } oas := make(map[string]*dynatracev1alpha1.OneAgent, len(oaLst.Items)) for i := range oaLst.Items { oas[oaLst.Items[i].Name] = &oaLst.Items[i] } c, err := r.getCache() if err != nil { return err } var nodeLst corev1.NodeList if err := r.client.List(context.TODO(), &nodeLst); err != nil { return err } nodes := map[string]bool{} for i := range nodeLst.Items { node := nodeLst.Items[i] nodes[node.Name] = true // Sometimes Azure does not cordon off nodes before deleting them since they use taints, // this case is handled in the update event handler if isUnschedulable(&node) { if err = r.reconcileUnschedulableNode(&node, c); err != nil { return err } } } // Add or update all nodes seen on OneAgent instances to the c. for _, oa := range oas { if oa.Status.Instances != nil { for node, info := range oa.Status.Instances { if _, ok := nodes[node]; !ok { continue } info := CacheEntry{ Instance: oa.Name, IPAddress: info.IPAddress, LastSeen: time.Now().UTC(), } if cached, err := c.Get(node); err == nil { info.LastMarkedForTermination = cached.LastMarkedForTermination } if err := c.Set(node, info); err != nil { return err } } } } // Notify and remove all nodes on the c that aren't in the cluster. for _, node := range c.Keys() { if _, ok := nodes[node]; ok { continue } if err := r.removeNode(c, node, func(name string) (*dynatracev1alpha1.OneAgent, error) { if oa, ok := oas[name]; ok { return oa, nil } return nil, errors.NewNotFound(schema.GroupResource{ Group: oaLst.GroupVersionKind().Group, Resource: oaLst.GroupVersionKind().Kind, }, name) }); err != nil { r.logger.Error(err, "failed to remove node", "node", node) } } return r.updateCache(c) } func (r *ReconcileNodes) getCache() (*Cache, error) { var cm corev1.ConfigMap err := r.client.Get(context.TODO(), client.ObjectKey{Name: cacheName, Namespace: r.namespace}, &cm) if err == nil { return &Cache{Obj: &cm}, nil } if errors.IsNotFound(err) { r.logger.Info("no cache found, creating") cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: cacheName, Namespace: r.namespace, }, Data: map[string]string{}, } if !r.local { // If running locally, don't set the controller. deploy, err := utils.GetDeployment(r.client, r.namespace) if err != nil { return nil, err } if err = controllerutil.SetControllerReference(deploy, cm, r.scheme); err != nil { return nil, err } } return &Cache{Create: true, Obj: cm}, nil } return nil, err } func (r *ReconcileNodes) updateCache(c *Cache) error { if !c.Changed() { return nil } if c.Create { return r.client.Create(context.TODO(), c.Obj) } return r.client.Update(context.TODO(), c.Obj) } func (r *ReconcileNodes) removeNode(c *Cache, node string, oaFunc func(name string) (*dynatracev1alpha1.OneAgent, error)) error { logger := r.logger.WithValues("node", node) nodeInfo, err := c.Get(node) if err == ErrNotFound { logger.Info("ignoring uncached node") return nil } else if err != nil
if time.Now().UTC().Sub(nodeInfo.LastSeen).Hours() > 1 { logger.Info("removing stale node") } else if nodeInfo.IPAddress == "" { logger.Info("removing node with unknown IP") } else { oa, err := oaFunc(nodeInfo.Instance) if errors.IsNotFound(err) { logger.Info("oneagent got already deleted") c.Delete(node) return nil } if err != nil { return err } err = r.markForTermination(c, oa, nodeInfo.IPAddress, node) if err != nil { return err } } c.Delete(node) return nil } func (r *ReconcileNodes) updateNode(c *Cache, nodeName string) error { node := &corev1.Node{} err := r.client.Get(context.TODO(), client.ObjectKey{Name: nodeName}, node) if err != nil { return err } if !isUnschedulable(node) { return nil } return r.reconcileUnschedulableNode(node, c) } func (r *ReconcileNodes) sendMarkedForTermination(oa *dynatracev1alpha1.OneAgent, nodeIP string, lastSeen time.Time) error { dtc, err := r.dtClientFunc(r.client, oa, true, true) if err != nil { return err } entityID, err := dtc.GetEntityIDForIP(nodeIP) if err != nil { return err } ts := uint64(lastSeen.Add(-10*time.Minute).UnixNano()) / uint64(time.Millisecond) return dtc.SendEvent(&dtclient.EventData{ EventType: dtclient.MarkedForTerminationEvent, Source: "OneAgent Operator", Description: "Kubernetes node cordoned. Node might be drained or terminated.", StartInMillis: ts, EndInMillis: ts, AttachRules: dtclient.EventDataAttachRules{ EntityIDs: []string{entityID}, }, }) } func (r *ReconcileNodes) reconcileUnschedulableNode(node *corev1.Node, c *Cache) error { oneAgent, err := r.determineOneAgentForNode(node.Name) if err != nil { return err } if oneAgent == nil { return nil } // determineOneAgentForNode only returns a oneagent object if a node instance is present instance := oneAgent.Status.Instances[node.Name] if _, err = c.Get(node.Name); err != nil { if err == ErrNotFound { // If node not found in c add it cachedNode := CacheEntry{ Instance: oneAgent.Name, IPAddress: instance.IPAddress, LastSeen: time.Now().UTC(), } err = c.Set(node.Name, cachedNode) if err != nil { return err } } else { return err } } return r.markForTermination(c, oneAgent, instance.IPAddress, node.Name) } func (r *ReconcileNodes) markForTermination(c *Cache, oneAgent *dynatracev1alpha1.OneAgent, ipAddress string, nodeName string) error { cachedNode, err := c.Get(nodeName) if err != nil { return err } if !isMarkableForTermination(&cachedNode) { return nil } r.logger.Info("sending mark for termination event to dynatrace server", "ip", ipAddress, "node", nodeName) if err = updateLastMarkedForTerminationTimestamp(c, &cachedNode, nodeName); err != nil { return err } return r.sendMarkedForTermination(oneAgent, ipAddress, cachedNode.LastSeen) } func isUnschedulable(node *corev1.Node) bool { return node.Spec.Unschedulable || hasUnschedulableTaint(node) } func hasUnschedulableTaint(node *corev1.Node) bool { for _, taint := range node.Spec.Taints { for _, unschedulableTaint := range unschedulableTaints { if taint.Key == unschedulableTaint { return true } } } return false } // isMarkableForTermination checks if the timestamp from last mark is at least one hour old func isMarkableForTermination(nodeInfo *CacheEntry) bool { // If the last mark was an hour ago, mark again // Zero value for time.Time is 0001-01-01, so first mark is also executed lastMarked := nodeInfo.LastMarkedForTermination return lastMarked.UTC().Add(time.Hour).Before(time.Now().UTC()) } func updateLastMarkedForTerminationTimestamp(c *Cache, nodeInfo *CacheEntry, nodeName string) error { nodeInfo.LastMarkedForTermination = time.Now().UTC() return c.Set(nodeName, *nodeInfo) }
{ return err }
conditional_block
recenter_dump.py
import numpy as np import h5py import sys from io import StringIO import time from itertools import islice import argparse import os def str2array(str): str_temp = str.decode('unicode-escape') strIO = StringIO(str_temp) return np.loadtxt(strIO) def compute_com_pbc(coords, box_size): theta = 2.0 * np.pi * coords / np.array(box_size) xi = np.cos(theta) * box_size / (2.0 * np.pi) zeta = np.sin(theta) * box_size / (2.0 * np.pi) xi_mean = np.mean(xi, axis = 0) #print xi_mean zeta_mean = np.mean(zeta, axis = 0) com = box_size * (np.arctan2(-zeta_mean, -xi_mean) + np.pi) / (2.0 * np.pi) return com def
(coords, box_size): com = compute_com_pbc(coords, box_size) cob = box_size / 2.0 coords_recenter = coords - com + cob coords_recenter_x = coords_recenter[:,0] coords_recenter_y = coords_recenter[:,1] coords_recenter_z = coords_recenter[:,2] #print coords_recenter coords_recenter_x = np.piecewise(coords_recenter_x, [coords_recenter_x < 0.0, (coords_recenter_x >= 0.0) * (coords_recenter_x <= box_size[0]), coords_recenter_x > box_size[0]], \ [lambda coords_recenter_x: coords_recenter_x + box_size[0], lambda coords_recenter_x: coords_recenter_x, lambda coords_recenter_x: coords_recenter_x - box_size[0]]) coords_recenter_y = np.piecewise(coords_recenter_y, [coords_recenter_y < 0.0, (coords_recenter_y >= 0.0) * (coords_recenter_y <= box_size[1]), coords_recenter_y > box_size[1]], \ [lambda coords_recenter_y: coords_recenter_y + box_size[1], lambda coords_recenter_y: coords_recenter_y, lambda coords_recenter_y: coords_recenter_y - box_size[1]]) coords_recenter_z = np.piecewise(coords_recenter_z, [coords_recenter_z < 0.0, (coords_recenter_z >= 0.0) * (coords_recenter_z <= box_size[2]), coords_recenter_z > box_size[2]], \ [lambda coords_recenter_z: coords_recenter_z + box_size[2], lambda coords_recenter_z: coords_recenter_z, lambda coords_recenter_z: coords_recenter_z - box_size[2]]) return np.array(zip(coords_recenter_x,coords_recenter_y,coords_recenter_z)) def mystr(s): if s.is_integer(): return str(int(s)) else: return str(s) ###################################### velocity_flag = False others_flag = True unwrap_flag = False image_flag = False # First declare several flags parser = argparse.ArgumentParser(description='Convert Lammps custom dump file to output format file.\ IMPORTANT NOTES:Only used for simulation where number of particles does not change.') parser.add_argument('lammps_custom_dump', help='Lammps custom dump file.') parser.add_argument('lammps_new_dump', help='new dump file.') parser.add_argument('-nv', '--no-velocity', help='disable writing velocity to output file.', \ action='store_true', dest='no_velocity') parser.add_argument('-no', '--no-others', help='disbale writing other informations to output file.', \ action='store_true', dest='no_others') parser.add_argument('-s', '--stride', help='write output file every this many snapshots.', \ dest='stride', type=int) parser.add_argument('-b', '--begin', help='write output file starting from this index.', \ dest='begin', type=int) parser.add_argument('-t', '--terminate', help='stop write output file starting from this index.', \ dest='terminate', type=int) parser.add_argument('-uw', '--unwrap', help='write unwrapped coordinates of particles in output file.', \ action='store_true', dest='unwrap') parser.add_argument('-i', '--image', help='store image flags of particles in output file.', \ action='store_true', dest='image') parser.add_argument('-q', '--quite', help='turn of printing information on screen.',\ action='store_true', dest='quite') parser.add_argument('-l', '--log', help='output to log files.',\ dest='logfile') args = parser.parse_args() # report error if both args.quite and args.logfile are required if args.quite and args.logfile: sys.stdout.write('ERROR: Both quite and log argument are specified. Program terminated.\n') sys.stdout.flush() sys.exit(0) # redirect stdout to log file if specified if args.logfile: sys.stdout = open(args.logfile, 'w') elif args.quite: sys.stdout = open(os.devnull, 'w') if args.stride is None: stride = 1 else: stride = args.stride # first read number of atoms in the dump file # read atom attribute information check = False with open(args.lammps_custom_dump) as f: for i, line in enumerate(f): if line == 'ITEM: NUMBER OF ATOMS\n': check = True continue if check: natoms = np.int_(line.split()[0]) check = False if 'ITEM: BOX BOUNDS' in line: if not np.any(['p' in line.split()[i] for i in range(3,len(line.split()))]): if args.unwrap: sys.stdout.write("No periodic boundary found. Ignore argument '--unwrap'.\n") if args.image: sys.stdtout.write("No periodic boundary found. Ignore argument '--image'.\n") else: if not args.unwrap and not args.image: sys.stdout.write("\033[93mWARNING: Periodic boundary found. Neither argument '--unwrap' nor '--image' are provided.\033[0m\n") if 'ITEM: ATOMS' in line: attribute_info = line.split()[2:] break # check attribute information if 'id' in attribute_info: id_index = attribute_info.index('id') else: sys.stdout.write('\033[93mERROR: No particle ID is found in dump file. \ Make sure that the order of particles does not change in dump file.\033[0m\n') sys.stdout.flush() if 'x' in attribute_info and 'y' in attribute_info and 'z' in attribute_info: x_index = attribute_info.index('x') y_index = attribute_info.index('y') z_index = attribute_info.index('z') else: sys.stdout.write('*** No position information found in dump file. Terminated. ***\n') sys.stdout.flush() sys.exit(0) if 'vx' in attribute_info and 'vy' in attribute_info and 'vz' in attribute_info: vx_index = attribute_info.index('vx') vy_index = attribute_info.index('vy') vz_index = attribute_info.index('vz') if not args.no_velocity: velocity_flag = True else: if not args.no_velocity: sys.stdout.write('*** No velocity information found in dump file. Skip it. ***\n') sys.stdout.flush() if 'ix' in attribute_info and 'iy' in attribute_info and 'iz' in attribute_info: ix_index = attribute_info.index('ix') iy_index = attribute_info.index('iy') iz_index = attribute_info.index('iz') if not args.unwrap and not args.image: sys.stdout.write('\033[93mWARNING: Image flags found in dump file.\033[0m\n') sys.stdout.flush() unwrap_flag = args.unwrap image_flag = args.image else: if args.unwrap or args.image: sys.stdout.write('*** No image information found in dump file. Skip it. ***\n') sys.stdout.flush() if 'xu' in attribute_info and 'yu' in attribute_info and 'zu' in attribute_info: sys.stdout.write('\033[93mWARNING: Unwrapped position found in dump file.\033[0m\n') sys.stdout.flush() number_lines_one_frame = 9 + natoms # 9 = number of head lines for each frame attribute_info_new = attribute_info[:] for key in ['id', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'ix', 'iy', 'iz', 'xu', 'yu', 'zu']: try: attribute_info_new.remove(key) except ValueError: pass snap_index = 0 # keep track of the index of frames when reading snap_index_write = 0 # keep track the actual number of snapshots written to output file start_time = time.time() # get the start time if args.logfile: sys.stdout.write('Start to convert file {}...\n'.format(args.lammps_custom_dump)) sys.stdout.flush() else: sys.stdout.write('\033[1mStart to convert file {}...\033[0m\n'.format(args.lammps_custom_dump)) sys.stdout.flush() with open(args.lammps_new_dump, 'w') as fout: with open(args.lammps_custom_dump, 'r') as f: while True: next_n_lines = list(islice(f, number_lines_one_frame)) # enumerate all the posiibilities if args.begin is None and args.terminate is None: if snap_index % stride == 0: pass else: snap_index += 1 continue elif args.begin is not None and args.terminate is None: if snap_index >= args.begin: if snap_index % stride == 0: pass else: snap_index += 1 continue else: snap_index += 1 continue elif args.begin is None and args.terminate is not None: if snap_index <= args.terminate: if snap_index % stride == 0: pass else: snap_index += 1 continue else: break elif args.begin is not None and args.terminate is not None: if snap_index >= args.begin and snap_index <= args.terminate: if snap_index % stride == 0: pass else: snap_index += 1 continue elif snap_index > args.terminate: break else: snap_index += 1 continue if not next_n_lines: break # process next_n_lines # get timestep timestep = int(next_n_lines[1]) # get box box = ''.join(next_n_lines[5:8]) box = str2array(box) box_shape = box.shape if unwrap_flag: box_edge_size = box[:,1] - box[:,0] # get per atom information: id, position, velocity, energy ... atom_info = ''.join(next_n_lines[9:]) atom_info = str2array(atom_info) assert len(atom_info) == natoms # sort the atom information based on atom id try: atom_info = atom_info[atom_info[:,id_index].argsort()] except: pass coords = np.float64(atom_info[:,x_index:z_index+1]) # compute center of mass (see wiki page for periodic boudary condition) coords_reposition = reposition(coords, box[:,1] - box[:,0]) atom_info[:,x_index:z_index+1] = coords_reposition fout.write("".join(next_n_lines[:9])) for row in atom_info: fout.write(" ".join([mystr(elem) for elem in row])+"\n") #fout.write(atom_info) snap_index += 1 snap_index_write += 1 if args.logfile: sys.stdout.write("Writing snapshot #{}\n".format(snap_index)) sys.stdout.flush() else: sys.stdout.write("\rWriting snapshot #{}...".format(snap_index)) sys.stdout.flush() end_time = time.time() sys.stdout.write('\nTotal {} snapshots been written to dump file. Time used:{} mins\n'.format(snap_index_write, (end_time-start_time)/60)) sys.stdout.flush()
reposition
identifier_name
recenter_dump.py
import numpy as np import h5py import sys from io import StringIO import time from itertools import islice import argparse import os def str2array(str): str_temp = str.decode('unicode-escape') strIO = StringIO(str_temp) return np.loadtxt(strIO) def compute_com_pbc(coords, box_size): theta = 2.0 * np.pi * coords / np.array(box_size) xi = np.cos(theta) * box_size / (2.0 * np.pi) zeta = np.sin(theta) * box_size / (2.0 * np.pi) xi_mean = np.mean(xi, axis = 0) #print xi_mean zeta_mean = np.mean(zeta, axis = 0) com = box_size * (np.arctan2(-zeta_mean, -xi_mean) + np.pi) / (2.0 * np.pi) return com def reposition(coords, box_size): com = compute_com_pbc(coords, box_size) cob = box_size / 2.0 coords_recenter = coords - com + cob coords_recenter_x = coords_recenter[:,0] coords_recenter_y = coords_recenter[:,1] coords_recenter_z = coords_recenter[:,2] #print coords_recenter coords_recenter_x = np.piecewise(coords_recenter_x, [coords_recenter_x < 0.0, (coords_recenter_x >= 0.0) * (coords_recenter_x <= box_size[0]), coords_recenter_x > box_size[0]], \ [lambda coords_recenter_x: coords_recenter_x + box_size[0], lambda coords_recenter_x: coords_recenter_x, lambda coords_recenter_x: coords_recenter_x - box_size[0]]) coords_recenter_y = np.piecewise(coords_recenter_y, [coords_recenter_y < 0.0, (coords_recenter_y >= 0.0) * (coords_recenter_y <= box_size[1]), coords_recenter_y > box_size[1]], \ [lambda coords_recenter_y: coords_recenter_y + box_size[1], lambda coords_recenter_y: coords_recenter_y, lambda coords_recenter_y: coords_recenter_y - box_size[1]]) coords_recenter_z = np.piecewise(coords_recenter_z, [coords_recenter_z < 0.0, (coords_recenter_z >= 0.0) * (coords_recenter_z <= box_size[2]), coords_recenter_z > box_size[2]], \ [lambda coords_recenter_z: coords_recenter_z + box_size[2], lambda coords_recenter_z: coords_recenter_z, lambda coords_recenter_z: coords_recenter_z - box_size[2]]) return np.array(zip(coords_recenter_x,coords_recenter_y,coords_recenter_z)) def mystr(s): if s.is_integer(): return str(int(s)) else: return str(s) ###################################### velocity_flag = False others_flag = True unwrap_flag = False image_flag = False # First declare several flags parser = argparse.ArgumentParser(description='Convert Lammps custom dump file to output format file.\ IMPORTANT NOTES:Only used for simulation where number of particles does not change.') parser.add_argument('lammps_custom_dump', help='Lammps custom dump file.') parser.add_argument('lammps_new_dump', help='new dump file.') parser.add_argument('-nv', '--no-velocity', help='disable writing velocity to output file.', \ action='store_true', dest='no_velocity') parser.add_argument('-no', '--no-others', help='disbale writing other informations to output file.', \ action='store_true', dest='no_others') parser.add_argument('-s', '--stride', help='write output file every this many snapshots.', \ dest='stride', type=int) parser.add_argument('-b', '--begin', help='write output file starting from this index.', \ dest='begin', type=int) parser.add_argument('-t', '--terminate', help='stop write output file starting from this index.', \ dest='terminate', type=int) parser.add_argument('-uw', '--unwrap', help='write unwrapped coordinates of particles in output file.', \ action='store_true', dest='unwrap') parser.add_argument('-i', '--image', help='store image flags of particles in output file.', \ action='store_true', dest='image') parser.add_argument('-q', '--quite', help='turn of printing information on screen.',\ action='store_true', dest='quite') parser.add_argument('-l', '--log', help='output to log files.',\ dest='logfile') args = parser.parse_args() # report error if both args.quite and args.logfile are required if args.quite and args.logfile: sys.stdout.write('ERROR: Both quite and log argument are specified. Program terminated.\n') sys.stdout.flush() sys.exit(0) # redirect stdout to log file if specified if args.logfile: sys.stdout = open(args.logfile, 'w') elif args.quite: sys.stdout = open(os.devnull, 'w') if args.stride is None: stride = 1 else: stride = args.stride # first read number of atoms in the dump file # read atom attribute information check = False with open(args.lammps_custom_dump) as f: for i, line in enumerate(f): if line == 'ITEM: NUMBER OF ATOMS\n': check = True continue if check: natoms = np.int_(line.split()[0]) check = False if 'ITEM: BOX BOUNDS' in line: if not np.any(['p' in line.split()[i] for i in range(3,len(line.split()))]): if args.unwrap: sys.stdout.write("No periodic boundary found. Ignore argument '--unwrap'.\n") if args.image: sys.stdtout.write("No periodic boundary found. Ignore argument '--image'.\n") else: if not args.unwrap and not args.image: sys.stdout.write("\033[93mWARNING: Periodic boundary found. Neither argument '--unwrap' nor '--image' are provided.\033[0m\n") if 'ITEM: ATOMS' in line: attribute_info = line.split()[2:] break # check attribute information if 'id' in attribute_info: id_index = attribute_info.index('id') else: sys.stdout.write('\033[93mERROR: No particle ID is found in dump file. \ Make sure that the order of particles does not change in dump file.\033[0m\n') sys.stdout.flush() if 'x' in attribute_info and 'y' in attribute_info and 'z' in attribute_info: x_index = attribute_info.index('x') y_index = attribute_info.index('y') z_index = attribute_info.index('z') else: sys.stdout.write('*** No position information found in dump file. Terminated. ***\n') sys.stdout.flush() sys.exit(0) if 'vx' in attribute_info and 'vy' in attribute_info and 'vz' in attribute_info: vx_index = attribute_info.index('vx') vy_index = attribute_info.index('vy') vz_index = attribute_info.index('vz') if not args.no_velocity: velocity_flag = True else: if not args.no_velocity: sys.stdout.write('*** No velocity information found in dump file. Skip it. ***\n') sys.stdout.flush() if 'ix' in attribute_info and 'iy' in attribute_info and 'iz' in attribute_info: ix_index = attribute_info.index('ix') iy_index = attribute_info.index('iy') iz_index = attribute_info.index('iz') if not args.unwrap and not args.image: sys.stdout.write('\033[93mWARNING: Image flags found in dump file.\033[0m\n') sys.stdout.flush() unwrap_flag = args.unwrap image_flag = args.image else: if args.unwrap or args.image: sys.stdout.write('*** No image information found in dump file. Skip it. ***\n') sys.stdout.flush() if 'xu' in attribute_info and 'yu' in attribute_info and 'zu' in attribute_info: sys.stdout.write('\033[93mWARNING: Unwrapped position found in dump file.\033[0m\n') sys.stdout.flush() number_lines_one_frame = 9 + natoms # 9 = number of head lines for each frame attribute_info_new = attribute_info[:] for key in ['id', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'ix', 'iy', 'iz', 'xu', 'yu', 'zu']: try: attribute_info_new.remove(key) except ValueError: pass snap_index = 0 # keep track of the index of frames when reading snap_index_write = 0 # keep track the actual number of snapshots written to output file start_time = time.time() # get the start time if args.logfile: sys.stdout.write('Start to convert file {}...\n'.format(args.lammps_custom_dump)) sys.stdout.flush() else: sys.stdout.write('\033[1mStart to convert file {}...\033[0m\n'.format(args.lammps_custom_dump)) sys.stdout.flush() with open(args.lammps_new_dump, 'w') as fout: with open(args.lammps_custom_dump, 'r') as f: while True: next_n_lines = list(islice(f, number_lines_one_frame)) # enumerate all the posiibilities if args.begin is None and args.terminate is None: if snap_index % stride == 0: pass else: snap_index += 1 continue elif args.begin is not None and args.terminate is None: if snap_index >= args.begin: if snap_index % stride == 0:
else: snap_index += 1 continue else: snap_index += 1 continue elif args.begin is None and args.terminate is not None: if snap_index <= args.terminate: if snap_index % stride == 0: pass else: snap_index += 1 continue else: break elif args.begin is not None and args.terminate is not None: if snap_index >= args.begin and snap_index <= args.terminate: if snap_index % stride == 0: pass else: snap_index += 1 continue elif snap_index > args.terminate: break else: snap_index += 1 continue if not next_n_lines: break # process next_n_lines # get timestep timestep = int(next_n_lines[1]) # get box box = ''.join(next_n_lines[5:8]) box = str2array(box) box_shape = box.shape if unwrap_flag: box_edge_size = box[:,1] - box[:,0] # get per atom information: id, position, velocity, energy ... atom_info = ''.join(next_n_lines[9:]) atom_info = str2array(atom_info) assert len(atom_info) == natoms # sort the atom information based on atom id try: atom_info = atom_info[atom_info[:,id_index].argsort()] except: pass coords = np.float64(atom_info[:,x_index:z_index+1]) # compute center of mass (see wiki page for periodic boudary condition) coords_reposition = reposition(coords, box[:,1] - box[:,0]) atom_info[:,x_index:z_index+1] = coords_reposition fout.write("".join(next_n_lines[:9])) for row in atom_info: fout.write(" ".join([mystr(elem) for elem in row])+"\n") #fout.write(atom_info) snap_index += 1 snap_index_write += 1 if args.logfile: sys.stdout.write("Writing snapshot #{}\n".format(snap_index)) sys.stdout.flush() else: sys.stdout.write("\rWriting snapshot #{}...".format(snap_index)) sys.stdout.flush() end_time = time.time() sys.stdout.write('\nTotal {} snapshots been written to dump file. Time used:{} mins\n'.format(snap_index_write, (end_time-start_time)/60)) sys.stdout.flush()
pass
conditional_block
recenter_dump.py
import numpy as np import h5py import sys from io import StringIO import time from itertools import islice import argparse import os def str2array(str): str_temp = str.decode('unicode-escape') strIO = StringIO(str_temp) return np.loadtxt(strIO) def compute_com_pbc(coords, box_size): theta = 2.0 * np.pi * coords / np.array(box_size) xi = np.cos(theta) * box_size / (2.0 * np.pi) zeta = np.sin(theta) * box_size / (2.0 * np.pi) xi_mean = np.mean(xi, axis = 0) #print xi_mean zeta_mean = np.mean(zeta, axis = 0) com = box_size * (np.arctan2(-zeta_mean, -xi_mean) + np.pi) / (2.0 * np.pi) return com def reposition(coords, box_size): com = compute_com_pbc(coords, box_size) cob = box_size / 2.0 coords_recenter = coords - com + cob coords_recenter_x = coords_recenter[:,0] coords_recenter_y = coords_recenter[:,1] coords_recenter_z = coords_recenter[:,2] #print coords_recenter coords_recenter_x = np.piecewise(coords_recenter_x, [coords_recenter_x < 0.0, (coords_recenter_x >= 0.0) * (coords_recenter_x <= box_size[0]), coords_recenter_x > box_size[0]], \ [lambda coords_recenter_x: coords_recenter_x + box_size[0], lambda coords_recenter_x: coords_recenter_x, lambda coords_recenter_x: coords_recenter_x - box_size[0]]) coords_recenter_y = np.piecewise(coords_recenter_y, [coords_recenter_y < 0.0, (coords_recenter_y >= 0.0) * (coords_recenter_y <= box_size[1]), coords_recenter_y > box_size[1]], \ [lambda coords_recenter_y: coords_recenter_y + box_size[1], lambda coords_recenter_y: coords_recenter_y, lambda coords_recenter_y: coords_recenter_y - box_size[1]]) coords_recenter_z = np.piecewise(coords_recenter_z, [coords_recenter_z < 0.0, (coords_recenter_z >= 0.0) * (coords_recenter_z <= box_size[2]), coords_recenter_z > box_size[2]], \ [lambda coords_recenter_z: coords_recenter_z + box_size[2], lambda coords_recenter_z: coords_recenter_z, lambda coords_recenter_z: coords_recenter_z - box_size[2]]) return np.array(zip(coords_recenter_x,coords_recenter_y,coords_recenter_z)) def mystr(s):
###################################### velocity_flag = False others_flag = True unwrap_flag = False image_flag = False # First declare several flags parser = argparse.ArgumentParser(description='Convert Lammps custom dump file to output format file.\ IMPORTANT NOTES:Only used for simulation where number of particles does not change.') parser.add_argument('lammps_custom_dump', help='Lammps custom dump file.') parser.add_argument('lammps_new_dump', help='new dump file.') parser.add_argument('-nv', '--no-velocity', help='disable writing velocity to output file.', \ action='store_true', dest='no_velocity') parser.add_argument('-no', '--no-others', help='disbale writing other informations to output file.', \ action='store_true', dest='no_others') parser.add_argument('-s', '--stride', help='write output file every this many snapshots.', \ dest='stride', type=int) parser.add_argument('-b', '--begin', help='write output file starting from this index.', \ dest='begin', type=int) parser.add_argument('-t', '--terminate', help='stop write output file starting from this index.', \ dest='terminate', type=int) parser.add_argument('-uw', '--unwrap', help='write unwrapped coordinates of particles in output file.', \ action='store_true', dest='unwrap') parser.add_argument('-i', '--image', help='store image flags of particles in output file.', \ action='store_true', dest='image') parser.add_argument('-q', '--quite', help='turn of printing information on screen.',\ action='store_true', dest='quite') parser.add_argument('-l', '--log', help='output to log files.',\ dest='logfile') args = parser.parse_args() # report error if both args.quite and args.logfile are required if args.quite and args.logfile: sys.stdout.write('ERROR: Both quite and log argument are specified. Program terminated.\n') sys.stdout.flush() sys.exit(0) # redirect stdout to log file if specified if args.logfile: sys.stdout = open(args.logfile, 'w') elif args.quite: sys.stdout = open(os.devnull, 'w') if args.stride is None: stride = 1 else: stride = args.stride # first read number of atoms in the dump file # read atom attribute information check = False with open(args.lammps_custom_dump) as f: for i, line in enumerate(f): if line == 'ITEM: NUMBER OF ATOMS\n': check = True continue if check: natoms = np.int_(line.split()[0]) check = False if 'ITEM: BOX BOUNDS' in line: if not np.any(['p' in line.split()[i] for i in range(3,len(line.split()))]): if args.unwrap: sys.stdout.write("No periodic boundary found. Ignore argument '--unwrap'.\n") if args.image: sys.stdtout.write("No periodic boundary found. Ignore argument '--image'.\n") else: if not args.unwrap and not args.image: sys.stdout.write("\033[93mWARNING: Periodic boundary found. Neither argument '--unwrap' nor '--image' are provided.\033[0m\n") if 'ITEM: ATOMS' in line: attribute_info = line.split()[2:] break # check attribute information if 'id' in attribute_info: id_index = attribute_info.index('id') else: sys.stdout.write('\033[93mERROR: No particle ID is found in dump file. \ Make sure that the order of particles does not change in dump file.\033[0m\n') sys.stdout.flush() if 'x' in attribute_info and 'y' in attribute_info and 'z' in attribute_info: x_index = attribute_info.index('x') y_index = attribute_info.index('y') z_index = attribute_info.index('z') else: sys.stdout.write('*** No position information found in dump file. Terminated. ***\n') sys.stdout.flush() sys.exit(0) if 'vx' in attribute_info and 'vy' in attribute_info and 'vz' in attribute_info: vx_index = attribute_info.index('vx') vy_index = attribute_info.index('vy') vz_index = attribute_info.index('vz') if not args.no_velocity: velocity_flag = True else: if not args.no_velocity: sys.stdout.write('*** No velocity information found in dump file. Skip it. ***\n') sys.stdout.flush() if 'ix' in attribute_info and 'iy' in attribute_info and 'iz' in attribute_info: ix_index = attribute_info.index('ix') iy_index = attribute_info.index('iy') iz_index = attribute_info.index('iz') if not args.unwrap and not args.image: sys.stdout.write('\033[93mWARNING: Image flags found in dump file.\033[0m\n') sys.stdout.flush() unwrap_flag = args.unwrap image_flag = args.image else: if args.unwrap or args.image: sys.stdout.write('*** No image information found in dump file. Skip it. ***\n') sys.stdout.flush() if 'xu' in attribute_info and 'yu' in attribute_info and 'zu' in attribute_info: sys.stdout.write('\033[93mWARNING: Unwrapped position found in dump file.\033[0m\n') sys.stdout.flush() number_lines_one_frame = 9 + natoms # 9 = number of head lines for each frame attribute_info_new = attribute_info[:] for key in ['id', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'ix', 'iy', 'iz', 'xu', 'yu', 'zu']: try: attribute_info_new.remove(key) except ValueError: pass snap_index = 0 # keep track of the index of frames when reading snap_index_write = 0 # keep track the actual number of snapshots written to output file start_time = time.time() # get the start time if args.logfile: sys.stdout.write('Start to convert file {}...\n'.format(args.lammps_custom_dump)) sys.stdout.flush() else: sys.stdout.write('\033[1mStart to convert file {}...\033[0m\n'.format(args.lammps_custom_dump)) sys.stdout.flush() with open(args.lammps_new_dump, 'w') as fout: with open(args.lammps_custom_dump, 'r') as f: while True: next_n_lines = list(islice(f, number_lines_one_frame)) # enumerate all the posiibilities if args.begin is None and args.terminate is None: if snap_index % stride == 0: pass else: snap_index += 1 continue elif args.begin is not None and args.terminate is None: if snap_index >= args.begin: if snap_index % stride == 0: pass else: snap_index += 1 continue else: snap_index += 1 continue elif args.begin is None and args.terminate is not None: if snap_index <= args.terminate: if snap_index % stride == 0: pass else: snap_index += 1 continue else: break elif args.begin is not None and args.terminate is not None: if snap_index >= args.begin and snap_index <= args.terminate: if snap_index % stride == 0: pass else: snap_index += 1 continue elif snap_index > args.terminate: break else: snap_index += 1 continue if not next_n_lines: break # process next_n_lines # get timestep timestep = int(next_n_lines[1]) # get box box = ''.join(next_n_lines[5:8]) box = str2array(box) box_shape = box.shape if unwrap_flag: box_edge_size = box[:,1] - box[:,0] # get per atom information: id, position, velocity, energy ... atom_info = ''.join(next_n_lines[9:]) atom_info = str2array(atom_info) assert len(atom_info) == natoms # sort the atom information based on atom id try: atom_info = atom_info[atom_info[:,id_index].argsort()] except: pass coords = np.float64(atom_info[:,x_index:z_index+1]) # compute center of mass (see wiki page for periodic boudary condition) coords_reposition = reposition(coords, box[:,1] - box[:,0]) atom_info[:,x_index:z_index+1] = coords_reposition fout.write("".join(next_n_lines[:9])) for row in atom_info: fout.write(" ".join([mystr(elem) for elem in row])+"\n") #fout.write(atom_info) snap_index += 1 snap_index_write += 1 if args.logfile: sys.stdout.write("Writing snapshot #{}\n".format(snap_index)) sys.stdout.flush() else: sys.stdout.write("\rWriting snapshot #{}...".format(snap_index)) sys.stdout.flush() end_time = time.time() sys.stdout.write('\nTotal {} snapshots been written to dump file. Time used:{} mins\n'.format(snap_index_write, (end_time-start_time)/60)) sys.stdout.flush()
if s.is_integer(): return str(int(s)) else: return str(s)
identifier_body
recenter_dump.py
import numpy as np import h5py import sys from io import StringIO import time from itertools import islice import argparse import os def str2array(str): str_temp = str.decode('unicode-escape') strIO = StringIO(str_temp) return np.loadtxt(strIO) def compute_com_pbc(coords, box_size): theta = 2.0 * np.pi * coords / np.array(box_size) xi = np.cos(theta) * box_size / (2.0 * np.pi) zeta = np.sin(theta) * box_size / (2.0 * np.pi) xi_mean = np.mean(xi, axis = 0) #print xi_mean zeta_mean = np.mean(zeta, axis = 0) com = box_size * (np.arctan2(-zeta_mean, -xi_mean) + np.pi) / (2.0 * np.pi) return com def reposition(coords, box_size): com = compute_com_pbc(coords, box_size) cob = box_size / 2.0 coords_recenter = coords - com + cob coords_recenter_x = coords_recenter[:,0] coords_recenter_y = coords_recenter[:,1] coords_recenter_z = coords_recenter[:,2] #print coords_recenter coords_recenter_x = np.piecewise(coords_recenter_x, [coords_recenter_x < 0.0, (coords_recenter_x >= 0.0) * (coords_recenter_x <= box_size[0]), coords_recenter_x > box_size[0]], \ [lambda coords_recenter_x: coords_recenter_x + box_size[0], lambda coords_recenter_x: coords_recenter_x, lambda coords_recenter_x: coords_recenter_x - box_size[0]]) coords_recenter_y = np.piecewise(coords_recenter_y, [coords_recenter_y < 0.0, (coords_recenter_y >= 0.0) * (coords_recenter_y <= box_size[1]), coords_recenter_y > box_size[1]], \ [lambda coords_recenter_y: coords_recenter_y + box_size[1], lambda coords_recenter_y: coords_recenter_y, lambda coords_recenter_y: coords_recenter_y - box_size[1]]) coords_recenter_z = np.piecewise(coords_recenter_z, [coords_recenter_z < 0.0, (coords_recenter_z >= 0.0) * (coords_recenter_z <= box_size[2]), coords_recenter_z > box_size[2]], \ [lambda coords_recenter_z: coords_recenter_z + box_size[2], lambda coords_recenter_z: coords_recenter_z, lambda coords_recenter_z: coords_recenter_z - box_size[2]]) return np.array(zip(coords_recenter_x,coords_recenter_y,coords_recenter_z)) def mystr(s): if s.is_integer(): return str(int(s)) else: return str(s) ###################################### velocity_flag = False others_flag = True unwrap_flag = False image_flag = False # First declare several flags parser = argparse.ArgumentParser(description='Convert Lammps custom dump file to output format file.\ IMPORTANT NOTES:Only used for simulation where number of particles does not change.') parser.add_argument('lammps_custom_dump', help='Lammps custom dump file.') parser.add_argument('lammps_new_dump', help='new dump file.') parser.add_argument('-nv', '--no-velocity', help='disable writing velocity to output file.', \ action='store_true', dest='no_velocity') parser.add_argument('-no', '--no-others', help='disbale writing other informations to output file.', \ action='store_true', dest='no_others') parser.add_argument('-s', '--stride', help='write output file every this many snapshots.', \ dest='stride', type=int) parser.add_argument('-b', '--begin', help='write output file starting from this index.', \ dest='begin', type=int) parser.add_argument('-t', '--terminate', help='stop write output file starting from this index.', \ dest='terminate', type=int) parser.add_argument('-uw', '--unwrap', help='write unwrapped coordinates of particles in output file.', \ action='store_true', dest='unwrap') parser.add_argument('-i', '--image', help='store image flags of particles in output file.', \ action='store_true', dest='image') parser.add_argument('-q', '--quite', help='turn of printing information on screen.',\ action='store_true', dest='quite') parser.add_argument('-l', '--log', help='output to log files.',\ dest='logfile') args = parser.parse_args() # report error if both args.quite and args.logfile are required if args.quite and args.logfile: sys.stdout.write('ERROR: Both quite and log argument are specified. Program terminated.\n') sys.stdout.flush() sys.exit(0) # redirect stdout to log file if specified if args.logfile: sys.stdout = open(args.logfile, 'w') elif args.quite: sys.stdout = open(os.devnull, 'w') if args.stride is None: stride = 1 else: stride = args.stride # first read number of atoms in the dump file # read atom attribute information check = False with open(args.lammps_custom_dump) as f: for i, line in enumerate(f): if line == 'ITEM: NUMBER OF ATOMS\n': check = True continue if check: natoms = np.int_(line.split()[0]) check = False if 'ITEM: BOX BOUNDS' in line: if not np.any(['p' in line.split()[i] for i in range(3,len(line.split()))]): if args.unwrap: sys.stdout.write("No periodic boundary found. Ignore argument '--unwrap'.\n") if args.image: sys.stdtout.write("No periodic boundary found. Ignore argument '--image'.\n") else: if not args.unwrap and not args.image: sys.stdout.write("\033[93mWARNING: Periodic boundary found. Neither argument '--unwrap' nor '--image' are provided.\033[0m\n") if 'ITEM: ATOMS' in line: attribute_info = line.split()[2:] break # check attribute information if 'id' in attribute_info: id_index = attribute_info.index('id') else: sys.stdout.write('\033[93mERROR: No particle ID is found in dump file. \ Make sure that the order of particles does not change in dump file.\033[0m\n') sys.stdout.flush() if 'x' in attribute_info and 'y' in attribute_info and 'z' in attribute_info:
sys.stdout.write('*** No position information found in dump file. Terminated. ***\n') sys.stdout.flush() sys.exit(0) if 'vx' in attribute_info and 'vy' in attribute_info and 'vz' in attribute_info: vx_index = attribute_info.index('vx') vy_index = attribute_info.index('vy') vz_index = attribute_info.index('vz') if not args.no_velocity: velocity_flag = True else: if not args.no_velocity: sys.stdout.write('*** No velocity information found in dump file. Skip it. ***\n') sys.stdout.flush() if 'ix' in attribute_info and 'iy' in attribute_info and 'iz' in attribute_info: ix_index = attribute_info.index('ix') iy_index = attribute_info.index('iy') iz_index = attribute_info.index('iz') if not args.unwrap and not args.image: sys.stdout.write('\033[93mWARNING: Image flags found in dump file.\033[0m\n') sys.stdout.flush() unwrap_flag = args.unwrap image_flag = args.image else: if args.unwrap or args.image: sys.stdout.write('*** No image information found in dump file. Skip it. ***\n') sys.stdout.flush() if 'xu' in attribute_info and 'yu' in attribute_info and 'zu' in attribute_info: sys.stdout.write('\033[93mWARNING: Unwrapped position found in dump file.\033[0m\n') sys.stdout.flush() number_lines_one_frame = 9 + natoms # 9 = number of head lines for each frame attribute_info_new = attribute_info[:] for key in ['id', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'ix', 'iy', 'iz', 'xu', 'yu', 'zu']: try: attribute_info_new.remove(key) except ValueError: pass snap_index = 0 # keep track of the index of frames when reading snap_index_write = 0 # keep track the actual number of snapshots written to output file start_time = time.time() # get the start time if args.logfile: sys.stdout.write('Start to convert file {}...\n'.format(args.lammps_custom_dump)) sys.stdout.flush() else: sys.stdout.write('\033[1mStart to convert file {}...\033[0m\n'.format(args.lammps_custom_dump)) sys.stdout.flush() with open(args.lammps_new_dump, 'w') as fout: with open(args.lammps_custom_dump, 'r') as f: while True: next_n_lines = list(islice(f, number_lines_one_frame)) # enumerate all the posiibilities if args.begin is None and args.terminate is None: if snap_index % stride == 0: pass else: snap_index += 1 continue elif args.begin is not None and args.terminate is None: if snap_index >= args.begin: if snap_index % stride == 0: pass else: snap_index += 1 continue else: snap_index += 1 continue elif args.begin is None and args.terminate is not None: if snap_index <= args.terminate: if snap_index % stride == 0: pass else: snap_index += 1 continue else: break elif args.begin is not None and args.terminate is not None: if snap_index >= args.begin and snap_index <= args.terminate: if snap_index % stride == 0: pass else: snap_index += 1 continue elif snap_index > args.terminate: break else: snap_index += 1 continue if not next_n_lines: break # process next_n_lines # get timestep timestep = int(next_n_lines[1]) # get box box = ''.join(next_n_lines[5:8]) box = str2array(box) box_shape = box.shape if unwrap_flag: box_edge_size = box[:,1] - box[:,0] # get per atom information: id, position, velocity, energy ... atom_info = ''.join(next_n_lines[9:]) atom_info = str2array(atom_info) assert len(atom_info) == natoms # sort the atom information based on atom id try: atom_info = atom_info[atom_info[:,id_index].argsort()] except: pass coords = np.float64(atom_info[:,x_index:z_index+1]) # compute center of mass (see wiki page for periodic boudary condition) coords_reposition = reposition(coords, box[:,1] - box[:,0]) atom_info[:,x_index:z_index+1] = coords_reposition fout.write("".join(next_n_lines[:9])) for row in atom_info: fout.write(" ".join([mystr(elem) for elem in row])+"\n") #fout.write(atom_info) snap_index += 1 snap_index_write += 1 if args.logfile: sys.stdout.write("Writing snapshot #{}\n".format(snap_index)) sys.stdout.flush() else: sys.stdout.write("\rWriting snapshot #{}...".format(snap_index)) sys.stdout.flush() end_time = time.time() sys.stdout.write('\nTotal {} snapshots been written to dump file. Time used:{} mins\n'.format(snap_index_write, (end_time-start_time)/60)) sys.stdout.flush()
x_index = attribute_info.index('x') y_index = attribute_info.index('y') z_index = attribute_info.index('z') else:
random_line_split
sudoku_server_basic.rs
use std::future::Future; use bytes::Buf; use bytes::BytesMut; use log::error; use log::info; use recipes::shutdown::Shutdown; use subslice::SubsliceExt; use thiserror::Error; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufWriter; use tokio::runtime::Builder; use tokio::signal; use tokio::sync::mpsc; use tokio::time; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, }; fn main() -> anyhow::Result<()> { let thread_rt = Builder::new_multi_thread() .worker_threads(4) .thread_name("sudoku-server") .enable_io() .enable_time() .build()?; thread_rt.block_on(async move { env_logger::init(); let port = 9981; let listener = TcpListener::bind(&format!("0.0.0.0:{}", port)).await; info!("sudoku server start listening: {}", port); // if let Ok(listener) = listener { // let _ = run(listener, signal::ctrl_c()).await; // } match listener { Ok(l) => { let _ = run(l, signal::ctrl_c()).await; } Err(err) => { error!("bind address[0.0.0.0:{}] error, cause: {}", port, err); } } }); Ok(()) } #[derive(Error, Debug)] enum Error { #[error("ProtocolError")] ProtocolError, #[error("IOError: {0}")] IOError(#[from] std::io::Error), #[error("ConnectionError: {0}")] ConnectionError(&'static str), } #[derive(Debug)] struct Listener { listener: TcpListener, notify_shutdown: broadcast::Sender<()>, shutdown_complete_rx: mpsc::Receiver<()>, shutdown_complete_tx: mpsc::Sender<()>, } impl Listener { async fn run(&mut self) -> anyhow::Result<()> { info!("accepting inbound connections"); loop { let socket = self.accept().await?; let mut handler = Handler { connection: Connection::new(socket), shutdown: Shutdown::new(self.notify_shutdown.subscribe()), _shutdown_complete: self.shutdown_complete_tx.clone(), }; // handler reader tokio::spawn(async move { if let Err(err) = handler.run().await { error!("read error: {}", err); } }); } } async fn accept(&mut self) -> anyhow::Result<TcpStream> { let mut backoff = 1; // Try to accept a few times loop { // Perform the accept operation. If a socket is successfully // accepted, return it. Otherwise, save the error. match self.listener.accept().await { Ok((socket, _peer)) => { // info!("peer: {} connected.", peer); return Ok(socket); } Err(err) => { if backoff > 64 { // Accept has failed too many times. Return the error. return Err(err.into()); } } } // Pause execution until the back off period elapses. time::sleep(time::Duration::from_secs(backoff)).await; // Double the back off backoff *= 2; } } } #[derive(Debug)] struct Connection { stream: BufWriter<TcpStream>, buffer: BytesMut, } impl Connection { /// Create a new `Connection`, backed by `socket`. Read and write buffers /// are initialized. pub fn new(socket: TcpStream) -> Connection { // let (reader, writer) = tokio::io::split(socket); Connection { stream: BufWriter::new(socket), // writer: Arc::new(Mutex::new(BufWriter::new(writer))), // Default to a 4KB read buffer. For the use case of mini redis, // this is fine. However, real applications will want to tune this // value to their specific use case. There is a high likelihood that // a larger read buffer will work better. buffer: BytesMut::with_capacity(4 * 1024), } } async fn
(&mut self) -> anyhow::Result<Option<Frame>> { loop { // Attempt to parse a frame from the buffered data. If enough data // has been buffered, the frame is returned. if let Some(frame) = self.parse_frame()? { return Ok(Some(frame)); } // There is not enough buffered data to read a frame. Attempt to // read more data from the socket. // // On success, the number of bytes is returned. `0` indicates "end // of stream". if 0 == self.stream.read_buf(&mut self.buffer).await? { // The remote closed the connection. For this to be a clean // shutdown, there should be no data in the read buffer. If // there is, this means that the peer closed the socket while // sending a frame. if self.buffer.is_empty() { return Ok(None); } else { return Err(Error::ConnectionError("connection reset by peer").into()); } } } } async fn send_result(&mut self, id: Option<&str>, ans: &str) -> anyhow::Result<()> { if let Some(id) = id { self.stream.write_all(id.as_bytes()).await?; self.stream.write_u8(b':').await?; } self.stream.write_all(ans.as_bytes()).await?; self.stream.write_all(b"\r\n").await?; self.stream.flush().await?; Ok(()) } // frame id:puzzle\r\n or puzzle\r\n fn parse_frame(&mut self) -> anyhow::Result<Option<Frame>> { // let mut buf = Cursor::new(&self.buffer[..]); let line_end = match self.buffer.find(b"\r\n") { Some(end) => end, None => return Ok(None), }; let mut parts = self.buffer[..line_end].split(|c| c == &b':'); // let vec = parts.into_iter().collect(); let maybe_id_or_puzzle = parts.next().ok_or(Error::ProtocolError)?; let maybe_id_or_puzzle = std::str::from_utf8(maybe_id_or_puzzle)?.to_string(); let maybe_puzzle = parts.next(); if maybe_puzzle.is_none() { return Ok(Some(Frame { id: None, puzzle: maybe_id_or_puzzle, })); } let puzzle = std::str::from_utf8(maybe_puzzle.unwrap())?.to_string(); if !parts.next().is_none() { return Err(Error::ProtocolError.into()); } self.buffer.advance(line_end + 2); Ok(Some(Frame { id: Some(maybe_id_or_puzzle), puzzle, })) } } #[derive(Debug)] struct Result { id: Option<String>, ans: String, } #[derive(Debug)] struct Frame { id: Option<String>, puzzle: String, } #[derive(Debug)] struct Handler { connection: Connection, shutdown: Shutdown, _shutdown_complete: mpsc::Sender<()>, } impl Handler { async fn run(&mut self) -> anyhow::Result<()> { // As long as the shutdown signal has not been received, try to read a // new request frame. while !self.shutdown.is_shutdown() { let maybe_frame = tokio::select! { res = self.connection.read_frame() => res?, _ = self.shutdown.recv() => { // If a shutdown signal is received, return from `run`. // This will result in the task terminating. return Ok(()); } }; // If `None` is returned from `read_frame()` then the peer closed // the socket. There is no further work to do and the task can be // terminated. let frame = match maybe_frame { Some(frame) => frame, None => return Ok(()), }; // get ans let ans = sudoku_resolve(&frame.puzzle); // let id = frame.puzzle.clone(); // let id = frame.id.clone(); // let ans = task::spawn_blocking(move || { // sudoku_resolve(&frame.puzzle) // }).await?; // self.connection.send_result(id.as_deref(), &ans).await?; self.connection.send_result(frame.id.as_deref(), &ans).await?; } Ok(()) } } pub async fn run(listener: TcpListener, shutdown: impl Future) -> anyhow::Result<()> { let (notify_shutdown, _) = broadcast::channel(1); let (shutdown_complete_tx, shutdown_complete_rx) = mpsc::channel(1); let mut server = Listener { listener, notify_shutdown, shutdown_complete_tx, shutdown_complete_rx, }; tokio::select! { res = server.run() => { // If an error is received here, accepting connections from the TCP // listener failed multiple times and the server is giving up and // shutting down. // // Errors encountered when handling individual connections do not // bubble up to this point. if let Err(err) = res { error!("failed to accept, cause: {}", err); } } _ = shutdown => { // The shutdown signal has been received. info!("shutting down"); } } let Listener { mut shutdown_complete_rx, shutdown_complete_tx, notify_shutdown, .. } = server; // When `notify_shutdown` is dropped, all tasks which have `subscribe`d will // receive the shutdown signal and can exit drop(notify_shutdown); // Drop final `Sender` so the `Receiver` below can complete drop(shutdown_complete_tx); // Wait for all active connections to finish processing. As the `Sender` // handle held by the listener has been dropped above, the only remaining // `Sender` instances are held by connection handler tasks. When those drop, // the `mpsc` channel will close and `recv()` will return `None`. let _ = shutdown_complete_rx.recv().await; Ok(()) } #[inline] fn sudoku_resolve(req: &str) -> String { recipes::sudoku::sudoku_resolve(&req) }
read_frame
identifier_name
sudoku_server_basic.rs
use std::future::Future; use bytes::Buf; use bytes::BytesMut; use log::error; use log::info; use recipes::shutdown::Shutdown; use subslice::SubsliceExt; use thiserror::Error; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufWriter; use tokio::runtime::Builder; use tokio::signal; use tokio::sync::mpsc; use tokio::time; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, }; fn main() -> anyhow::Result<()> { let thread_rt = Builder::new_multi_thread() .worker_threads(4) .thread_name("sudoku-server") .enable_io() .enable_time() .build()?; thread_rt.block_on(async move { env_logger::init(); let port = 9981; let listener = TcpListener::bind(&format!("0.0.0.0:{}", port)).await; info!("sudoku server start listening: {}", port); // if let Ok(listener) = listener { // let _ = run(listener, signal::ctrl_c()).await; // } match listener { Ok(l) => { let _ = run(l, signal::ctrl_c()).await; } Err(err) => { error!("bind address[0.0.0.0:{}] error, cause: {}", port, err); } } }); Ok(()) } #[derive(Error, Debug)] enum Error { #[error("ProtocolError")] ProtocolError, #[error("IOError: {0}")] IOError(#[from] std::io::Error), #[error("ConnectionError: {0}")] ConnectionError(&'static str), } #[derive(Debug)] struct Listener { listener: TcpListener, notify_shutdown: broadcast::Sender<()>, shutdown_complete_rx: mpsc::Receiver<()>, shutdown_complete_tx: mpsc::Sender<()>, } impl Listener { async fn run(&mut self) -> anyhow::Result<()> { info!("accepting inbound connections"); loop { let socket = self.accept().await?; let mut handler = Handler { connection: Connection::new(socket), shutdown: Shutdown::new(self.notify_shutdown.subscribe()), _shutdown_complete: self.shutdown_complete_tx.clone(), }; // handler reader tokio::spawn(async move { if let Err(err) = handler.run().await { error!("read error: {}", err); } }); } } async fn accept(&mut self) -> anyhow::Result<TcpStream> { let mut backoff = 1; // Try to accept a few times loop { // Perform the accept operation. If a socket is successfully // accepted, return it. Otherwise, save the error. match self.listener.accept().await { Ok((socket, _peer)) => { // info!("peer: {} connected.", peer); return Ok(socket); } Err(err) => { if backoff > 64 { // Accept has failed too many times. Return the error. return Err(err.into()); } } } // Pause execution until the back off period elapses. time::sleep(time::Duration::from_secs(backoff)).await; // Double the back off backoff *= 2; } } } #[derive(Debug)] struct Connection { stream: BufWriter<TcpStream>, buffer: BytesMut, } impl Connection { /// Create a new `Connection`, backed by `socket`. Read and write buffers /// are initialized. pub fn new(socket: TcpStream) -> Connection { // let (reader, writer) = tokio::io::split(socket); Connection { stream: BufWriter::new(socket), // writer: Arc::new(Mutex::new(BufWriter::new(writer))), // Default to a 4KB read buffer. For the use case of mini redis, // this is fine. However, real applications will want to tune this // value to their specific use case. There is a high likelihood that // a larger read buffer will work better. buffer: BytesMut::with_capacity(4 * 1024), } } async fn read_frame(&mut self) -> anyhow::Result<Option<Frame>> { loop { // Attempt to parse a frame from the buffered data. If enough data // has been buffered, the frame is returned. if let Some(frame) = self.parse_frame()? { return Ok(Some(frame)); } // There is not enough buffered data to read a frame. Attempt to // read more data from the socket. // // On success, the number of bytes is returned. `0` indicates "end // of stream". if 0 == self.stream.read_buf(&mut self.buffer).await? { // The remote closed the connection. For this to be a clean // shutdown, there should be no data in the read buffer. If // there is, this means that the peer closed the socket while // sending a frame. if self.buffer.is_empty() { return Ok(None); } else { return Err(Error::ConnectionError("connection reset by peer").into()); } } } } async fn send_result(&mut self, id: Option<&str>, ans: &str) -> anyhow::Result<()> { if let Some(id) = id { self.stream.write_all(id.as_bytes()).await?; self.stream.write_u8(b':').await?; } self.stream.write_all(ans.as_bytes()).await?; self.stream.write_all(b"\r\n").await?; self.stream.flush().await?; Ok(()) } // frame id:puzzle\r\n or puzzle\r\n fn parse_frame(&mut self) -> anyhow::Result<Option<Frame>> { // let mut buf = Cursor::new(&self.buffer[..]); let line_end = match self.buffer.find(b"\r\n") { Some(end) => end, None => return Ok(None), }; let mut parts = self.buffer[..line_end].split(|c| c == &b':'); // let vec = parts.into_iter().collect(); let maybe_id_or_puzzle = parts.next().ok_or(Error::ProtocolError)?; let maybe_id_or_puzzle = std::str::from_utf8(maybe_id_or_puzzle)?.to_string(); let maybe_puzzle = parts.next(); if maybe_puzzle.is_none() { return Ok(Some(Frame { id: None, puzzle: maybe_id_or_puzzle, })); } let puzzle = std::str::from_utf8(maybe_puzzle.unwrap())?.to_string(); if !parts.next().is_none() { return Err(Error::ProtocolError.into()); } self.buffer.advance(line_end + 2); Ok(Some(Frame { id: Some(maybe_id_or_puzzle), puzzle, })) } } #[derive(Debug)] struct Result { id: Option<String>, ans: String, } #[derive(Debug)] struct Frame { id: Option<String>, puzzle: String, } #[derive(Debug)] struct Handler { connection: Connection, shutdown: Shutdown, _shutdown_complete: mpsc::Sender<()>, } impl Handler { async fn run(&mut self) -> anyhow::Result<()> { // As long as the shutdown signal has not been received, try to read a // new request frame. while !self.shutdown.is_shutdown() { let maybe_frame = tokio::select! { res = self.connection.read_frame() => res?, _ = self.shutdown.recv() => { // If a shutdown signal is received, return from `run`. // This will result in the task terminating. return Ok(()); } }; // If `None` is returned from `read_frame()` then the peer closed // the socket. There is no further work to do and the task can be // terminated. let frame = match maybe_frame { Some(frame) => frame, None => return Ok(()), }; // get ans let ans = sudoku_resolve(&frame.puzzle); // let id = frame.puzzle.clone(); // let id = frame.id.clone(); // let ans = task::spawn_blocking(move || { // sudoku_resolve(&frame.puzzle) // }).await?; // self.connection.send_result(id.as_deref(), &ans).await?; self.connection.send_result(frame.id.as_deref(), &ans).await?; } Ok(()) } } pub async fn run(listener: TcpListener, shutdown: impl Future) -> anyhow::Result<()> { let (notify_shutdown, _) = broadcast::channel(1); let (shutdown_complete_tx, shutdown_complete_rx) = mpsc::channel(1); let mut server = Listener { listener, notify_shutdown, shutdown_complete_tx, shutdown_complete_rx, }; tokio::select! { res = server.run() => { // If an error is received here, accepting connections from the TCP // listener failed multiple times and the server is giving up and // shutting down. // // Errors encountered when handling individual connections do not // bubble up to this point. if let Err(err) = res { error!("failed to accept, cause: {}", err); } } _ = shutdown => { // The shutdown signal has been received. info!("shutting down"); } } let Listener { mut shutdown_complete_rx, shutdown_complete_tx, notify_shutdown, .. } = server; // When `notify_shutdown` is dropped, all tasks which have `subscribe`d will // receive the shutdown signal and can exit drop(notify_shutdown); // Drop final `Sender` so the `Receiver` below can complete drop(shutdown_complete_tx); // Wait for all active connections to finish processing. As the `Sender` // handle held by the listener has been dropped above, the only remaining // `Sender` instances are held by connection handler tasks. When those drop,
#[inline] fn sudoku_resolve(req: &str) -> String { recipes::sudoku::sudoku_resolve(&req) }
// the `mpsc` channel will close and `recv()` will return `None`. let _ = shutdown_complete_rx.recv().await; Ok(()) }
random_line_split
sudoku_server_basic.rs
use std::future::Future; use bytes::Buf; use bytes::BytesMut; use log::error; use log::info; use recipes::shutdown::Shutdown; use subslice::SubsliceExt; use thiserror::Error; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufWriter; use tokio::runtime::Builder; use tokio::signal; use tokio::sync::mpsc; use tokio::time; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, }; fn main() -> anyhow::Result<()> { let thread_rt = Builder::new_multi_thread() .worker_threads(4) .thread_name("sudoku-server") .enable_io() .enable_time() .build()?; thread_rt.block_on(async move { env_logger::init(); let port = 9981; let listener = TcpListener::bind(&format!("0.0.0.0:{}", port)).await; info!("sudoku server start listening: {}", port); // if let Ok(listener) = listener { // let _ = run(listener, signal::ctrl_c()).await; // } match listener { Ok(l) => { let _ = run(l, signal::ctrl_c()).await; } Err(err) => { error!("bind address[0.0.0.0:{}] error, cause: {}", port, err); } } }); Ok(()) } #[derive(Error, Debug)] enum Error { #[error("ProtocolError")] ProtocolError, #[error("IOError: {0}")] IOError(#[from] std::io::Error), #[error("ConnectionError: {0}")] ConnectionError(&'static str), } #[derive(Debug)] struct Listener { listener: TcpListener, notify_shutdown: broadcast::Sender<()>, shutdown_complete_rx: mpsc::Receiver<()>, shutdown_complete_tx: mpsc::Sender<()>, } impl Listener { async fn run(&mut self) -> anyhow::Result<()> { info!("accepting inbound connections"); loop { let socket = self.accept().await?; let mut handler = Handler { connection: Connection::new(socket), shutdown: Shutdown::new(self.notify_shutdown.subscribe()), _shutdown_complete: self.shutdown_complete_tx.clone(), }; // handler reader tokio::spawn(async move { if let Err(err) = handler.run().await { error!("read error: {}", err); } }); } } async fn accept(&mut self) -> anyhow::Result<TcpStream> { let mut backoff = 1; // Try to accept a few times loop { // Perform the accept operation. If a socket is successfully // accepted, return it. Otherwise, save the error. match self.listener.accept().await { Ok((socket, _peer)) => { // info!("peer: {} connected.", peer); return Ok(socket); } Err(err) => { if backoff > 64 { // Accept has failed too many times. Return the error. return Err(err.into()); } } } // Pause execution until the back off period elapses. time::sleep(time::Duration::from_secs(backoff)).await; // Double the back off backoff *= 2; } } } #[derive(Debug)] struct Connection { stream: BufWriter<TcpStream>, buffer: BytesMut, } impl Connection { /// Create a new `Connection`, backed by `socket`. Read and write buffers /// are initialized. pub fn new(socket: TcpStream) -> Connection { // let (reader, writer) = tokio::io::split(socket); Connection { stream: BufWriter::new(socket), // writer: Arc::new(Mutex::new(BufWriter::new(writer))), // Default to a 4KB read buffer. For the use case of mini redis, // this is fine. However, real applications will want to tune this // value to their specific use case. There is a high likelihood that // a larger read buffer will work better. buffer: BytesMut::with_capacity(4 * 1024), } } async fn read_frame(&mut self) -> anyhow::Result<Option<Frame>> { loop { // Attempt to parse a frame from the buffered data. If enough data // has been buffered, the frame is returned. if let Some(frame) = self.parse_frame()? { return Ok(Some(frame)); } // There is not enough buffered data to read a frame. Attempt to // read more data from the socket. // // On success, the number of bytes is returned. `0` indicates "end // of stream". if 0 == self.stream.read_buf(&mut self.buffer).await? { // The remote closed the connection. For this to be a clean // shutdown, there should be no data in the read buffer. If // there is, this means that the peer closed the socket while // sending a frame. if self.buffer.is_empty() { return Ok(None); } else { return Err(Error::ConnectionError("connection reset by peer").into()); } } } } async fn send_result(&mut self, id: Option<&str>, ans: &str) -> anyhow::Result<()> { if let Some(id) = id
self.stream.write_all(ans.as_bytes()).await?; self.stream.write_all(b"\r\n").await?; self.stream.flush().await?; Ok(()) } // frame id:puzzle\r\n or puzzle\r\n fn parse_frame(&mut self) -> anyhow::Result<Option<Frame>> { // let mut buf = Cursor::new(&self.buffer[..]); let line_end = match self.buffer.find(b"\r\n") { Some(end) => end, None => return Ok(None), }; let mut parts = self.buffer[..line_end].split(|c| c == &b':'); // let vec = parts.into_iter().collect(); let maybe_id_or_puzzle = parts.next().ok_or(Error::ProtocolError)?; let maybe_id_or_puzzle = std::str::from_utf8(maybe_id_or_puzzle)?.to_string(); let maybe_puzzle = parts.next(); if maybe_puzzle.is_none() { return Ok(Some(Frame { id: None, puzzle: maybe_id_or_puzzle, })); } let puzzle = std::str::from_utf8(maybe_puzzle.unwrap())?.to_string(); if !parts.next().is_none() { return Err(Error::ProtocolError.into()); } self.buffer.advance(line_end + 2); Ok(Some(Frame { id: Some(maybe_id_or_puzzle), puzzle, })) } } #[derive(Debug)] struct Result { id: Option<String>, ans: String, } #[derive(Debug)] struct Frame { id: Option<String>, puzzle: String, } #[derive(Debug)] struct Handler { connection: Connection, shutdown: Shutdown, _shutdown_complete: mpsc::Sender<()>, } impl Handler { async fn run(&mut self) -> anyhow::Result<()> { // As long as the shutdown signal has not been received, try to read a // new request frame. while !self.shutdown.is_shutdown() { let maybe_frame = tokio::select! { res = self.connection.read_frame() => res?, _ = self.shutdown.recv() => { // If a shutdown signal is received, return from `run`. // This will result in the task terminating. return Ok(()); } }; // If `None` is returned from `read_frame()` then the peer closed // the socket. There is no further work to do and the task can be // terminated. let frame = match maybe_frame { Some(frame) => frame, None => return Ok(()), }; // get ans let ans = sudoku_resolve(&frame.puzzle); // let id = frame.puzzle.clone(); // let id = frame.id.clone(); // let ans = task::spawn_blocking(move || { // sudoku_resolve(&frame.puzzle) // }).await?; // self.connection.send_result(id.as_deref(), &ans).await?; self.connection.send_result(frame.id.as_deref(), &ans).await?; } Ok(()) } } pub async fn run(listener: TcpListener, shutdown: impl Future) -> anyhow::Result<()> { let (notify_shutdown, _) = broadcast::channel(1); let (shutdown_complete_tx, shutdown_complete_rx) = mpsc::channel(1); let mut server = Listener { listener, notify_shutdown, shutdown_complete_tx, shutdown_complete_rx, }; tokio::select! { res = server.run() => { // If an error is received here, accepting connections from the TCP // listener failed multiple times and the server is giving up and // shutting down. // // Errors encountered when handling individual connections do not // bubble up to this point. if let Err(err) = res { error!("failed to accept, cause: {}", err); } } _ = shutdown => { // The shutdown signal has been received. info!("shutting down"); } } let Listener { mut shutdown_complete_rx, shutdown_complete_tx, notify_shutdown, .. } = server; // When `notify_shutdown` is dropped, all tasks which have `subscribe`d will // receive the shutdown signal and can exit drop(notify_shutdown); // Drop final `Sender` so the `Receiver` below can complete drop(shutdown_complete_tx); // Wait for all active connections to finish processing. As the `Sender` // handle held by the listener has been dropped above, the only remaining // `Sender` instances are held by connection handler tasks. When those drop, // the `mpsc` channel will close and `recv()` will return `None`. let _ = shutdown_complete_rx.recv().await; Ok(()) } #[inline] fn sudoku_resolve(req: &str) -> String { recipes::sudoku::sudoku_resolve(&req) }
{ self.stream.write_all(id.as_bytes()).await?; self.stream.write_u8(b':').await?; }
conditional_block
sudoku_server_basic.rs
use std::future::Future; use bytes::Buf; use bytes::BytesMut; use log::error; use log::info; use recipes::shutdown::Shutdown; use subslice::SubsliceExt; use thiserror::Error; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufWriter; use tokio::runtime::Builder; use tokio::signal; use tokio::sync::mpsc; use tokio::time; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, }; fn main() -> anyhow::Result<()> { let thread_rt = Builder::new_multi_thread() .worker_threads(4) .thread_name("sudoku-server") .enable_io() .enable_time() .build()?; thread_rt.block_on(async move { env_logger::init(); let port = 9981; let listener = TcpListener::bind(&format!("0.0.0.0:{}", port)).await; info!("sudoku server start listening: {}", port); // if let Ok(listener) = listener { // let _ = run(listener, signal::ctrl_c()).await; // } match listener { Ok(l) => { let _ = run(l, signal::ctrl_c()).await; } Err(err) => { error!("bind address[0.0.0.0:{}] error, cause: {}", port, err); } } }); Ok(()) } #[derive(Error, Debug)] enum Error { #[error("ProtocolError")] ProtocolError, #[error("IOError: {0}")] IOError(#[from] std::io::Error), #[error("ConnectionError: {0}")] ConnectionError(&'static str), } #[derive(Debug)] struct Listener { listener: TcpListener, notify_shutdown: broadcast::Sender<()>, shutdown_complete_rx: mpsc::Receiver<()>, shutdown_complete_tx: mpsc::Sender<()>, } impl Listener { async fn run(&mut self) -> anyhow::Result<()> { info!("accepting inbound connections"); loop { let socket = self.accept().await?; let mut handler = Handler { connection: Connection::new(socket), shutdown: Shutdown::new(self.notify_shutdown.subscribe()), _shutdown_complete: self.shutdown_complete_tx.clone(), }; // handler reader tokio::spawn(async move { if let Err(err) = handler.run().await { error!("read error: {}", err); } }); } } async fn accept(&mut self) -> anyhow::Result<TcpStream> { let mut backoff = 1; // Try to accept a few times loop { // Perform the accept operation. If a socket is successfully // accepted, return it. Otherwise, save the error. match self.listener.accept().await { Ok((socket, _peer)) => { // info!("peer: {} connected.", peer); return Ok(socket); } Err(err) => { if backoff > 64 { // Accept has failed too many times. Return the error. return Err(err.into()); } } } // Pause execution until the back off period elapses. time::sleep(time::Duration::from_secs(backoff)).await; // Double the back off backoff *= 2; } } } #[derive(Debug)] struct Connection { stream: BufWriter<TcpStream>, buffer: BytesMut, } impl Connection { /// Create a new `Connection`, backed by `socket`. Read and write buffers /// are initialized. pub fn new(socket: TcpStream) -> Connection { // let (reader, writer) = tokio::io::split(socket); Connection { stream: BufWriter::new(socket), // writer: Arc::new(Mutex::new(BufWriter::new(writer))), // Default to a 4KB read buffer. For the use case of mini redis, // this is fine. However, real applications will want to tune this // value to their specific use case. There is a high likelihood that // a larger read buffer will work better. buffer: BytesMut::with_capacity(4 * 1024), } } async fn read_frame(&mut self) -> anyhow::Result<Option<Frame>> { loop { // Attempt to parse a frame from the buffered data. If enough data // has been buffered, the frame is returned. if let Some(frame) = self.parse_frame()? { return Ok(Some(frame)); } // There is not enough buffered data to read a frame. Attempt to // read more data from the socket. // // On success, the number of bytes is returned. `0` indicates "end // of stream". if 0 == self.stream.read_buf(&mut self.buffer).await? { // The remote closed the connection. For this to be a clean // shutdown, there should be no data in the read buffer. If // there is, this means that the peer closed the socket while // sending a frame. if self.buffer.is_empty() { return Ok(None); } else { return Err(Error::ConnectionError("connection reset by peer").into()); } } } } async fn send_result(&mut self, id: Option<&str>, ans: &str) -> anyhow::Result<()> { if let Some(id) = id { self.stream.write_all(id.as_bytes()).await?; self.stream.write_u8(b':').await?; } self.stream.write_all(ans.as_bytes()).await?; self.stream.write_all(b"\r\n").await?; self.stream.flush().await?; Ok(()) } // frame id:puzzle\r\n or puzzle\r\n fn parse_frame(&mut self) -> anyhow::Result<Option<Frame>> { // let mut buf = Cursor::new(&self.buffer[..]); let line_end = match self.buffer.find(b"\r\n") { Some(end) => end, None => return Ok(None), }; let mut parts = self.buffer[..line_end].split(|c| c == &b':'); // let vec = parts.into_iter().collect(); let maybe_id_or_puzzle = parts.next().ok_or(Error::ProtocolError)?; let maybe_id_or_puzzle = std::str::from_utf8(maybe_id_or_puzzle)?.to_string(); let maybe_puzzle = parts.next(); if maybe_puzzle.is_none() { return Ok(Some(Frame { id: None, puzzle: maybe_id_or_puzzle, })); } let puzzle = std::str::from_utf8(maybe_puzzle.unwrap())?.to_string(); if !parts.next().is_none() { return Err(Error::ProtocolError.into()); } self.buffer.advance(line_end + 2); Ok(Some(Frame { id: Some(maybe_id_or_puzzle), puzzle, })) } } #[derive(Debug)] struct Result { id: Option<String>, ans: String, } #[derive(Debug)] struct Frame { id: Option<String>, puzzle: String, } #[derive(Debug)] struct Handler { connection: Connection, shutdown: Shutdown, _shutdown_complete: mpsc::Sender<()>, } impl Handler { async fn run(&mut self) -> anyhow::Result<()> { // As long as the shutdown signal has not been received, try to read a // new request frame. while !self.shutdown.is_shutdown() { let maybe_frame = tokio::select! { res = self.connection.read_frame() => res?, _ = self.shutdown.recv() => { // If a shutdown signal is received, return from `run`. // This will result in the task terminating. return Ok(()); } }; // If `None` is returned from `read_frame()` then the peer closed // the socket. There is no further work to do and the task can be // terminated. let frame = match maybe_frame { Some(frame) => frame, None => return Ok(()), }; // get ans let ans = sudoku_resolve(&frame.puzzle); // let id = frame.puzzle.clone(); // let id = frame.id.clone(); // let ans = task::spawn_blocking(move || { // sudoku_resolve(&frame.puzzle) // }).await?; // self.connection.send_result(id.as_deref(), &ans).await?; self.connection.send_result(frame.id.as_deref(), &ans).await?; } Ok(()) } } pub async fn run(listener: TcpListener, shutdown: impl Future) -> anyhow::Result<()>
#[inline] fn sudoku_resolve(req: &str) -> String { recipes::sudoku::sudoku_resolve(&req) }
{ let (notify_shutdown, _) = broadcast::channel(1); let (shutdown_complete_tx, shutdown_complete_rx) = mpsc::channel(1); let mut server = Listener { listener, notify_shutdown, shutdown_complete_tx, shutdown_complete_rx, }; tokio::select! { res = server.run() => { // If an error is received here, accepting connections from the TCP // listener failed multiple times and the server is giving up and // shutting down. // // Errors encountered when handling individual connections do not // bubble up to this point. if let Err(err) = res { error!("failed to accept, cause: {}", err); } } _ = shutdown => { // The shutdown signal has been received. info!("shutting down"); } } let Listener { mut shutdown_complete_rx, shutdown_complete_tx, notify_shutdown, .. } = server; // When `notify_shutdown` is dropped, all tasks which have `subscribe`d will // receive the shutdown signal and can exit drop(notify_shutdown); // Drop final `Sender` so the `Receiver` below can complete drop(shutdown_complete_tx); // Wait for all active connections to finish processing. As the `Sender` // handle held by the listener has been dropped above, the only remaining // `Sender` instances are held by connection handler tasks. When those drop, // the `mpsc` channel will close and `recv()` will return `None`. let _ = shutdown_complete_rx.recv().await; Ok(()) }
identifier_body
Amazon.py
# -*- coding: utf-8 -*- """ Created on Thu Dec 26 20:46:56 2019 @author: Natalia """ import pandas as pd import numpy as np import string import re from nltk.corpus import stopwords from nltk.stem import PorterStemmer from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_curve, roc_auc_score, from sklearn.metrics import classification_report, confusion_matrix import matplotlib.pyplot as plt from wordcloud import WordCloud from PIL import Image from textblob import TextBlob from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC #wczytanie dane = pd.read_csv("allreviews.csv") reviews = dane reviews = reviews.drop(["ReviewTitle"], axis=1) reviews = reviews.drop(["Product"], axis=1) reviews = reviews.drop_duplicates(keep='first') #Sentiment Analysis reviews["Polarity"] = reviews["ReviewBody"].apply(lambda x: TextBlob(x).sentiment[0]) reviews["PolarityB"] = np.where(reviews["Polarity"]>0.2,1,(np.where(reviews["Polarity"]<-0.2,0,100))) # przedział neutralnych -0.2 do 0.2, bo to 1/5, bo 5 gwiazdek pd.crosstab(index = reviews["PolarityB"], columns="Total count") #Positive 58% #Negative 7% #Neutral 35% #Positive&Negative reviews["Satisfied"] = np.where(reviews["ReviewStar"]>3,1,(np.where(reviews["ReviewStar"]<3,0,100))) pd.crosstab(index = reviews["Satisfied"], columns="Total count") #Positive 64% #Negative 25% #Neutral 11% reviews["Difference"]= reviews["Satisfied"]-reviews["PolarityB"] pd.crosstab(index = reviews["Difference"], columns="Total count").plot(kind='pie', subplots=True, autopct='%1.1f%%') # 1 False positive + - <1% # -1 False negative - + 4% # 0 Correct 60% # -99 Positive but Neutral + o 35% wszystkie neutral # -100 Negative but Neutral - o # 99 Neutral but Positive o + # 100 Neutral but Negative o - #false match FalseNeg = reviews FalseNeg = FalseNeg[FalseNeg["Difference"]==-1] FalseNeg = FalseNeg[FalseNeg["Polarity"]>0.6] # górna 1/5 bo 5 gwiazdek FalsePos = reviews FalsePos = FalsePos[FalsePos["Difference"]==1] FalsePos = FalsePos[FalsePos["Polarity"]<-0.6] reviews = pd.concat([reviews,FalseNeg,FalsePos]).drop_duplicates(keep=False) #usuwam neutralne reviews = reviews[reviews["ReviewStar"]!=3] # Text pre-processing #change to lowercase reviews["ReviewBody"] = reviews["ReviewBody"].str.lower() #remove punctuation def remove_punctuation(a): a
reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_punctuation) #remove emoji def remove_emoji(a): emojis = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" #signs u"\U000024C2-\U0001F251" #signs "]+", flags=re.UNICODE) return emojis.sub(r'',a) reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_emoji) #remove numbers def remove_numbers(a): a = ''.join([i for i in a if not i.isdigit()]) return a reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_numbers) #stopwords stopwords = stopwords.words('english') reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopwords)])) ####### stopwords adjustment stopwords.extend(("also","go","went","get","getting", "got","u")) reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopwords)])) #other words -> Context specific otherwords = ["amazon","jbl", "sennheiser", "boat", "bought","buy","purchase","purchasing", "product","earphone","earphones","ear","headphone","headphones","music","bluetooth"] reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (otherwords)])) #stemming ps = PorterStemmer() reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([ps.stem(word) for word in x.split()])) #class proportion - 72% i 28% pd.crosstab(index = reviews["Satisfied"], columns="Total count") example = ["phones", "satisfied", "incredibly"] for word in example: print(ps.stem(word)) ######## WORDLIST ########## wordlist = pd.Series(np.concatenate([x.split() for x in reviews.ReviewBody])).value_counts() wordlist25 = wordlist.head(25) wordlist25.plot.barh(width=0.5,fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plt1.jpg") ########## WORDCLOUD ####### fulltext = " ".join(r for r in reviews.ReviewBody) wordcloud = WordCloud(background_color="white").generate(fulltext) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/wc1.jpg") ############ WORDCLOUD mask ####### chmura_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/chmura.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=chmura_mask,contour_width=2,contour_color="navy", collocations=True).generate(fulltext) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/toheadphones.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() ########### Z podziałem na positive, negative, omijając neutral revpositive = reviews[reviews.Satisfied == 1] revnegative = reviews[reviews.Satisfied == 0] # POSITIVE WORDLIST wordlistpos = pd.Series(np.concatenate([x.split() for x in revpositive.ReviewBody])).value_counts() wordlistpos25 = wordlistpos.head(25) wordlistpos25.plot.barh(color="green", width=0.5,fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/pltpos.jpg") #POSITIVE WORDCLOUD fulltextpos = " ".join(r for r in revpositive.ReviewBody) happy_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/jeden.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=happy_mask,contour_width=2,contour_color="green", collocations=True).generate(fulltextpos) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/tohappy.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") #NEGATIVE WORDLIST wordlistneg = pd.Series(np.concatenate([x.split() for x in revnegative.ReviewBody])).value_counts() wordlistneg25 = wordlistneg.head(25) wordlistneg25.plot.barh(color="red",width=0.5, fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/pltneg.jpg") #NEGATIVE WORDCLOUD fulltextneg = " ".join(r for r in revnegative.ReviewBody) sad_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/dwa.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=sad_mask,contour_width=2,contour_color="red", collocations=True).generate(fulltextneg) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/tosad.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") #WYJĄTKOWO NIEZADOWOLENI KLIENCI, RATE = 1 revextnegative = reviews[reviews.ReviewStar == 1] wordlistextneg = pd.Series(np.concatenate([x.split() for x in revextnegative.ReviewBody])).value_counts() wordlistextneg25 = wordlistextneg.head(25) wordlistextneg25.plot.barh(color="black",width=0.5,fontsize=20).invert_yaxis() plt.show() # Subjectivity reviews["Subjectivity"] = reviews["ReviewBody"].apply(lambda x: TextBlob(x).sentiment[1]) reviews["SubjectivityB"] = np.where(reviews["Subjectivity"]>0.666,1,(np.where(reviews["Subjectivity"]<0.333,0,100))) # przedział 0.333-0.666 neutralne - trudno powiedzieć pd.crosstab(index = reviews["SubjectivityB"], columns="Total count") pd.crosstab(index = reviews["SubjectivityB"], columns="Total count").plot(kind='pie', subplots=True, labels=('Objektywne','Subjektywne','Trudno powiedzieć'),autopct='%1.1f%%') #subjektywne 1493; 71% poz, 29% neg #obiektywne 2873 58% poz, 42% neg #pozytywne 8362 - SUB 13%, OB 20% #negatywne 3281 - SUB 13%, OB 37% # Pozytywne i Subjektywne 9% reviews[(reviews.Satisfied==1) & (reviews.SubjectivityB==1)].count() # Negatywne i Subjektywne 4% reviews[(reviews.Satisfied==0) & (reviews.SubjectivityB==1)].count() # Pozytywne i Obiektywne 14% reviews[(reviews.Satisfied==1) & (reviews.SubjectivityB==0)].count() # Negatywne i Obiektywne 10% reviews[(reviews.Satisfied==0) & (reviews.SubjectivityB==0)].count() # podział na train i test - random split X_train, X_test, y_train, y_test = train_test_split(reviews['ReviewBody'], reviews['Satisfied'], random_state=0) #Model LG with CountVectorizer vect = CountVectorizer().fit(X_train) vect.get_feature_names()[::500] print(vect.vocabulary_) X_train_vectorized = vect.transform(X_train) print(X_train_vectorized[8725]) print('Shape', X_train_vectorized.shape) print(type(X_train_vectorized)) print(X_train_vectorized.toarray()[600]) #logistic regression model = LogisticRegression(multi_class='ovr',n_jobs=1,solver='liblinear') model.fit(X_train_vectorized,y_train) # testing predictions = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictions)) print(classification_report(y_test,predictions)) predictions_zero = [0 for _ in range(len(y_test))] auc_lr = roc_auc_score(y_test, predictions) auc_zero =roc_auc_score(y_test,predictions_zero) print('AUC: ',roc_auc_score(y_test, predictions)) false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, predictions) false_positive_rate_zero, true_positive_rate_zero, thresholds = roc_curve(y_test, predictions_zero) #roc_auc = auc(false_positive_rate, true_positive_rate) #plotting plt.title("Model") plt.plot(false_positive_rate,true_positive_rate,'r',label="AUC = %0.3f"% auc_lr) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.2f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plot1.jpg") ################################################ Model LG with Tf-Idf Vectorizer vectTf = TfidfVectorizer(smooth_idf=False).fit(X_train) print(vectTf.vocabulary_) print(vectTf.idf_) X_train_vectorizedTfidf = vectTf.transform(X_train) print(X_train_vectorizedTfidf) model = LogisticRegression(multi_class='ovr',n_jobs=1,solver="liblinear") model.fit(X_train_vectorizedTfidf,y_train) predictionsTfidf = model.predict(vectTf.transform(X_test)) print(confusion_matrix(y_test,predictionsTfidf)) print(classification_report(y_test,predictionsTfidf)) print("AUC: ", roc_auc_score(y_test,predictionsTfidf)) auc_lr_tfidf = roc_auc_score(y_test, predictionsTfidf) false_positive_rate_tfidf, true_positive_rate_tfidf, thresholds = roc_curve(y_test, predictionsTfidf) false_positive_rate_zero, true_positive_rate_zero, thresholds = roc_curve(y_test, predictions_zero) plt.title("Model") plt.plot(false_positive_rate_tfidf,true_positive_rate_tfidf,'r',label="AUC = %0.3f"% auc_lr_tfidf) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotTfidf.jpg") ####################################### Model Random Forest with CountVectorizer model = RandomForestClassifier(n_estimators=1000, random_state=0) model.fit(X_train_vectorized,y_train) predictionsRF = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictionsRF)) print(classification_report(y_test,predictionsRF)) print("AUC: ", roc_auc_score(y_test,predictionsRF)) false_positive_rate_RF, true_positive_rate_RF, thresholds = roc_curve(y_test, predictionsRF) #roc_auc = auc(false_positive_rate, true_positive_rate) #plotting auc_RF = roc_auc_score(y_test, predictionsRF) plt.title("Model") plt.plot(false_positive_rate_RF,true_positive_rate_RF,'r',label="AUC = %0.3f"% auc_RF) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotRF.jpg") #################################### MODEL SVM model = SVC(kernel='linear') model.fit(X_train_vectorized,y_train) predictionsSVM = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictionsSVM)) print(classification_report(y_test,predictionsSVM)) ### auc_SVM = roc_auc_score(y_test, predictionsSVM) false_positive_rate_SVM, true_positive_rate_SVM, thresholds = roc_curve(y_test, predictionsSVM) plt.title("Model") plt.plot(false_positive_rate_SVM,true_positive_rate_SVM,'r',label="AUC = %0.3f"% auc_SVM) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotSVM.jpg")
= ''.join([i for i in a if i not in frozenset(string.punctuation)]) return ' '+ a
identifier_body
Amazon.py
# -*- coding: utf-8 -*- """ Created on Thu Dec 26 20:46:56 2019 @author: Natalia """ import pandas as pd import numpy as np import string import re from nltk.corpus import stopwords from nltk.stem import PorterStemmer from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_curve, roc_auc_score, from sklearn.metrics import classification_report, confusion_matrix import matplotlib.pyplot as plt from wordcloud import WordCloud from PIL import Image from textblob import TextBlob from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC #wczytanie dane = pd.read_csv("allreviews.csv") reviews = dane reviews = reviews.drop(["ReviewTitle"], axis=1) reviews = reviews.drop(["Product"], axis=1) reviews = reviews.drop_duplicates(keep='first') #Sentiment Analysis reviews["Polarity"] = reviews["ReviewBody"].apply(lambda x: TextBlob(x).sentiment[0]) reviews["PolarityB"] = np.where(reviews["Polarity"]>0.2,1,(np.where(reviews["Polarity"]<-0.2,0,100))) # przedział neutralnych -0.2 do 0.2, bo to 1/5, bo 5 gwiazdek pd.crosstab(index = reviews["PolarityB"], columns="Total count") #Positive 58% #Negative 7% #Neutral 35% #Positive&Negative reviews["Satisfied"] = np.where(reviews["ReviewStar"]>3,1,(np.where(reviews["ReviewStar"]<3,0,100))) pd.crosstab(index = reviews["Satisfied"], columns="Total count") #Positive 64% #Negative 25% #Neutral 11% reviews["Difference"]= reviews["Satisfied"]-reviews["PolarityB"] pd.crosstab(index = reviews["Difference"], columns="Total count").plot(kind='pie', subplots=True, autopct='%1.1f%%') # 1 False positive + - <1% # -1 False negative - + 4% # 0 Correct 60% # -99 Positive but Neutral + o 35% wszystkie neutral # -100 Negative but Neutral - o # 99 Neutral but Positive o + # 100 Neutral but Negative o - #false match FalseNeg = reviews FalseNeg = FalseNeg[FalseNeg["Difference"]==-1] FalseNeg = FalseNeg[FalseNeg["Polarity"]>0.6] # górna 1/5 bo 5 gwiazdek FalsePos = reviews FalsePos = FalsePos[FalsePos["Difference"]==1] FalsePos = FalsePos[FalsePos["Polarity"]<-0.6] reviews = pd.concat([reviews,FalseNeg,FalsePos]).drop_duplicates(keep=False) #usuwam neutralne reviews = reviews[reviews["ReviewStar"]!=3] # Text pre-processing #change to lowercase reviews["ReviewBody"] = reviews["ReviewBody"].str.lower() #remove punctuation def remove_punctuation(a): a = ''.join([i for i in a if i not in frozenset(string.punctuation)]) return ' '+ a reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_punctuation) #remove emoji def remove_emoji(a): emojis = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" #signs u"\U000024C2-\U0001F251" #signs "]+", flags=re.UNICODE) return emojis.sub(r'',a) reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_emoji) #remove numbers def remove_numbers(a): a = ''.join([i for i in a if not i.isdigit()]) return a reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_numbers) #stopwords stopwords = stopwords.words('english') reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopwords)])) ####### stopwords adjustment stopwords.extend(("also","go","went","get","getting", "got","u")) reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopwords)])) #other words -> Context specific otherwords = ["amazon","jbl", "sennheiser", "boat", "bought","buy","purchase","purchasing", "product","earphone","earphones","ear","headphone","headphones","music","bluetooth"] reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (otherwords)])) #stemming ps = PorterStemmer() reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([ps.stem(word) for word in x.split()])) #class proportion - 72% i 28% pd.crosstab(index = reviews["Satisfied"], columns="Total count") example = ["phones", "satisfied", "incredibly"] for word in example: print(ps.stem(word)) ######## WORDLIST ########## wordlist = pd.Series(np.concatenate([x.split() for x in reviews.ReviewBody])).value_counts() wordlist25 = wordlist.head(25) wordlist25.plot.barh(width=0.5,fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plt1.jpg") ########## WORDCLOUD ####### fulltext = " ".join(r for r in reviews.ReviewBody) wordcloud = WordCloud(background_color="white").generate(fulltext) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/wc1.jpg") ############ WORDCLOUD mask ####### chmura_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/chmura.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=chmura_mask,contour_width=2,contour_color="navy", collocations=True).generate(fulltext) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/toheadphones.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() ########### Z podziałem na positive, negative, omijając neutral revpositive = reviews[reviews.Satisfied == 1] revnegative = reviews[reviews.Satisfied == 0] # POSITIVE WORDLIST wordlistpos = pd.Series(np.concatenate([x.split() for x in revpositive.ReviewBody])).value_counts() wordlistpos25 = wordlistpos.head(25) wordlistpos25.plot.barh(color="green", width=0.5,fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/pltpos.jpg") #POSITIVE WORDCLOUD fulltextpos = " ".join(r for r in revpositive.ReviewBody) happy_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/jeden.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=happy_mask,contour_width=2,contour_color="green", collocations=True).generate(fulltextpos) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/tohappy.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") #NEGATIVE WORDLIST wordlistneg = pd.Series(np.concatenate([x.split() for x in revnegative.ReviewBody])).value_counts() wordlistneg25 = wordlistneg.head(25) wordlistneg25.plot.barh(color="red",width=0.5, fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/pltneg.jpg") #NEGATIVE WORDCLOUD fulltextneg = " ".join(r for r in revnegative.ReviewBody) sad_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/dwa.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=sad_mask,contour_width=2,contour_color="red", collocations=True).generate(fulltextneg) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/tosad.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") #WYJĄTKOWO NIEZADOWOLENI KLIENCI, RATE = 1 revextnegative = reviews[reviews.ReviewStar == 1] wordlistextneg = pd.Series(np.concatenate([x.split() for x in revextnegative.ReviewBody])).value_counts() wordlistextneg25 = wordlistextneg.head(25) wordlistextneg25.plot.barh(color="black",width=0.5,fontsize=20).invert_yaxis() plt.show() # Subjectivity reviews["Subjectivity"] = reviews["ReviewBody"].apply(lambda x: TextBlob(x).sentiment[1]) reviews["SubjectivityB"] = np.where(reviews["Subjectivity"]>0.666,1,(np.where(reviews["Subjectivity"]<0.333,0,100))) # przedział 0.333-0.666 neutralne - trudno powiedzieć pd.crosstab(index = reviews["SubjectivityB"], columns="Total count") pd.crosstab(index = reviews["SubjectivityB"], columns="Total count").plot(kind='pie', subplots=True, labels=('Objektywne','Subjektywne','Trudno powiedzieć'),autopct='%1.1f%%') #subjektywne 1493; 71% poz, 29% neg #obiektywne 2873 58% poz, 42% neg #pozytywne 8362 - SUB 13%, OB 20% #negatywne 3281 - SUB 13%, OB 37% # Pozytywne i Subjektywne 9% reviews[(reviews.Satisfied==1) & (reviews.SubjectivityB==1)].count() # Negatywne i Subjektywne 4% reviews[(reviews.Satisfied==0) & (reviews.SubjectivityB==1)].count() # Pozytywne i Obiektywne 14% reviews[(reviews.Satisfied==1) & (reviews.SubjectivityB==0)].count() # Negatywne i Obiektywne 10% reviews[(reviews.Satisfied==0) & (reviews.SubjectivityB==0)].count() # podział na train i test - random split X_train, X_test, y_train, y_test = train_test_split(reviews['ReviewBody'], reviews['Satisfied'], random_state=0) #Model LG with CountVectorizer vect = CountVectorizer().fit(X_train) vect.get_feature_names()[::500] print(vect.vocabulary_) X_train_vectorized = vect.transform(X_train) print(X_train_vectorized[8725]) print('Shape', X_train_vectorized.shape) print(type(X_train_vectorized)) print(X_train_vectorized.toarray()[600]) #logistic regression model = LogisticRegression(multi_class='ovr',n_jobs=1,solver='liblinear') model.fit(X_train_vectorized,y_train) # testing predictions = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictions)) print(classification_report(y_test,predictions)) predictions_zero = [0 for _ in range(len(y_test))] auc_lr = roc_auc_score(y_test, predictions) auc_zero =roc_auc_score(y_test,predictions_zero) print('AUC: ',roc_auc_score(y_test, predictions)) false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, predictions) false_positive_rate_zero, true_positive_rate_zero, thresholds = roc_curve(y_test, predictions_zero) #roc_auc = auc(false_positive_rate, true_positive_rate) #plotting plt.title("Model") plt.plot(false_positive_rate,true_positive_rate,'r',label="AUC = %0.3f"% auc_lr) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.2f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plot1.jpg") ################################################ Model LG with Tf-Idf Vectorizer vectTf = TfidfVectorizer(smooth_idf=False).fit(X_train) print(vectTf.vocabulary_) print(vectTf.idf_) X_train_vectorizedTfidf = vectTf.transform(X_train) print(X_train_vectorizedTfidf) model = LogisticRegression(multi_class='ovr',n_jobs=1,solver="liblinear") model.fit(X_train_vectorizedTfidf,y_train) predictionsTfidf = model.predict(vectTf.transform(X_test)) print(confusion_matrix(y_test,predictionsTfidf)) print(classification_report(y_test,predictionsTfidf)) print("AUC: ", roc_auc_score(y_test,predictionsTfidf)) auc_lr_tfidf = roc_auc_score(y_test, predictionsTfidf) false_positive_rate_tfidf, true_positive_rate_tfidf, thresholds = roc_curve(y_test, predictionsTfidf) false_positive_rate_zero, true_positive_rate_zero, thresholds = roc_curve(y_test, predictions_zero) plt.title("Model") plt.plot(false_positive_rate_tfidf,true_positive_rate_tfidf,'r',label="AUC = %0.3f"% auc_lr_tfidf) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotTfidf.jpg") ####################################### Model Random Forest with CountVectorizer model = RandomForestClassifier(n_estimators=1000, random_state=0) model.fit(X_train_vectorized,y_train) predictionsRF = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictionsRF)) print(classification_report(y_test,predictionsRF)) print("AUC: ", roc_auc_score(y_test,predictionsRF)) false_positive_rate_RF, true_positive_rate_RF, thresholds = roc_curve(y_test, predictionsRF) #roc_auc = auc(false_positive_rate, true_positive_rate) #plotting auc_RF = roc_auc_score(y_test, predictionsRF) plt.title("Model") plt.plot(false_positive_rate_RF,true_positive_rate_RF,'r',label="AUC = %0.3f"% auc_RF) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12)
predictionsSVM = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictionsSVM)) print(classification_report(y_test,predictionsSVM)) ### auc_SVM = roc_auc_score(y_test, predictionsSVM) false_positive_rate_SVM, true_positive_rate_SVM, thresholds = roc_curve(y_test, predictionsSVM) plt.title("Model") plt.plot(false_positive_rate_SVM,true_positive_rate_SVM,'r',label="AUC = %0.3f"% auc_SVM) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotSVM.jpg")
plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotRF.jpg") #################################### MODEL SVM model = SVC(kernel='linear') model.fit(X_train_vectorized,y_train)
random_line_split
Amazon.py
# -*- coding: utf-8 -*- """ Created on Thu Dec 26 20:46:56 2019 @author: Natalia """ import pandas as pd import numpy as np import string import re from nltk.corpus import stopwords from nltk.stem import PorterStemmer from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_curve, roc_auc_score, from sklearn.metrics import classification_report, confusion_matrix import matplotlib.pyplot as plt from wordcloud import WordCloud from PIL import Image from textblob import TextBlob from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC #wczytanie dane = pd.read_csv("allreviews.csv") reviews = dane reviews = reviews.drop(["ReviewTitle"], axis=1) reviews = reviews.drop(["Product"], axis=1) reviews = reviews.drop_duplicates(keep='first') #Sentiment Analysis reviews["Polarity"] = reviews["ReviewBody"].apply(lambda x: TextBlob(x).sentiment[0]) reviews["PolarityB"] = np.where(reviews["Polarity"]>0.2,1,(np.where(reviews["Polarity"]<-0.2,0,100))) # przedział neutralnych -0.2 do 0.2, bo to 1/5, bo 5 gwiazdek pd.crosstab(index = reviews["PolarityB"], columns="Total count") #Positive 58% #Negative 7% #Neutral 35% #Positive&Negative reviews["Satisfied"] = np.where(reviews["ReviewStar"]>3,1,(np.where(reviews["ReviewStar"]<3,0,100))) pd.crosstab(index = reviews["Satisfied"], columns="Total count") #Positive 64% #Negative 25% #Neutral 11% reviews["Difference"]= reviews["Satisfied"]-reviews["PolarityB"] pd.crosstab(index = reviews["Difference"], columns="Total count").plot(kind='pie', subplots=True, autopct='%1.1f%%') # 1 False positive + - <1% # -1 False negative - + 4% # 0 Correct 60% # -99 Positive but Neutral + o 35% wszystkie neutral # -100 Negative but Neutral - o # 99 Neutral but Positive o + # 100 Neutral but Negative o - #false match FalseNeg = reviews FalseNeg = FalseNeg[FalseNeg["Difference"]==-1] FalseNeg = FalseNeg[FalseNeg["Polarity"]>0.6] # górna 1/5 bo 5 gwiazdek FalsePos = reviews FalsePos = FalsePos[FalsePos["Difference"]==1] FalsePos = FalsePos[FalsePos["Polarity"]<-0.6] reviews = pd.concat([reviews,FalseNeg,FalsePos]).drop_duplicates(keep=False) #usuwam neutralne reviews = reviews[reviews["ReviewStar"]!=3] # Text pre-processing #change to lowercase reviews["ReviewBody"] = reviews["ReviewBody"].str.lower() #remove punctuation def remove_punctuation(a): a = ''.join([i for i in a if i not in frozenset(string.punctuation)]) return ' '+ a reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_punctuation) #remove emoji def remove_emoji(a): emojis = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" #signs u"\U000024C2-\U0001F251" #signs "]+", flags=re.UNICODE) return emojis.sub(r'',a) reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_emoji) #remove numbers def re
): a = ''.join([i for i in a if not i.isdigit()]) return a reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_numbers) #stopwords stopwords = stopwords.words('english') reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopwords)])) ####### stopwords adjustment stopwords.extend(("also","go","went","get","getting", "got","u")) reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopwords)])) #other words -> Context specific otherwords = ["amazon","jbl", "sennheiser", "boat", "bought","buy","purchase","purchasing", "product","earphone","earphones","ear","headphone","headphones","music","bluetooth"] reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (otherwords)])) #stemming ps = PorterStemmer() reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([ps.stem(word) for word in x.split()])) #class proportion - 72% i 28% pd.crosstab(index = reviews["Satisfied"], columns="Total count") example = ["phones", "satisfied", "incredibly"] for word in example: print(ps.stem(word)) ######## WORDLIST ########## wordlist = pd.Series(np.concatenate([x.split() for x in reviews.ReviewBody])).value_counts() wordlist25 = wordlist.head(25) wordlist25.plot.barh(width=0.5,fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plt1.jpg") ########## WORDCLOUD ####### fulltext = " ".join(r for r in reviews.ReviewBody) wordcloud = WordCloud(background_color="white").generate(fulltext) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/wc1.jpg") ############ WORDCLOUD mask ####### chmura_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/chmura.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=chmura_mask,contour_width=2,contour_color="navy", collocations=True).generate(fulltext) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/toheadphones.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() ########### Z podziałem na positive, negative, omijając neutral revpositive = reviews[reviews.Satisfied == 1] revnegative = reviews[reviews.Satisfied == 0] # POSITIVE WORDLIST wordlistpos = pd.Series(np.concatenate([x.split() for x in revpositive.ReviewBody])).value_counts() wordlistpos25 = wordlistpos.head(25) wordlistpos25.plot.barh(color="green", width=0.5,fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/pltpos.jpg") #POSITIVE WORDCLOUD fulltextpos = " ".join(r for r in revpositive.ReviewBody) happy_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/jeden.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=happy_mask,contour_width=2,contour_color="green", collocations=True).generate(fulltextpos) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/tohappy.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") #NEGATIVE WORDLIST wordlistneg = pd.Series(np.concatenate([x.split() for x in revnegative.ReviewBody])).value_counts() wordlistneg25 = wordlistneg.head(25) wordlistneg25.plot.barh(color="red",width=0.5, fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/pltneg.jpg") #NEGATIVE WORDCLOUD fulltextneg = " ".join(r for r in revnegative.ReviewBody) sad_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/dwa.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=sad_mask,contour_width=2,contour_color="red", collocations=True).generate(fulltextneg) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/tosad.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") #WYJĄTKOWO NIEZADOWOLENI KLIENCI, RATE = 1 revextnegative = reviews[reviews.ReviewStar == 1] wordlistextneg = pd.Series(np.concatenate([x.split() for x in revextnegative.ReviewBody])).value_counts() wordlistextneg25 = wordlistextneg.head(25) wordlistextneg25.plot.barh(color="black",width=0.5,fontsize=20).invert_yaxis() plt.show() # Subjectivity reviews["Subjectivity"] = reviews["ReviewBody"].apply(lambda x: TextBlob(x).sentiment[1]) reviews["SubjectivityB"] = np.where(reviews["Subjectivity"]>0.666,1,(np.where(reviews["Subjectivity"]<0.333,0,100))) # przedział 0.333-0.666 neutralne - trudno powiedzieć pd.crosstab(index = reviews["SubjectivityB"], columns="Total count") pd.crosstab(index = reviews["SubjectivityB"], columns="Total count").plot(kind='pie', subplots=True, labels=('Objektywne','Subjektywne','Trudno powiedzieć'),autopct='%1.1f%%') #subjektywne 1493; 71% poz, 29% neg #obiektywne 2873 58% poz, 42% neg #pozytywne 8362 - SUB 13%, OB 20% #negatywne 3281 - SUB 13%, OB 37% # Pozytywne i Subjektywne 9% reviews[(reviews.Satisfied==1) & (reviews.SubjectivityB==1)].count() # Negatywne i Subjektywne 4% reviews[(reviews.Satisfied==0) & (reviews.SubjectivityB==1)].count() # Pozytywne i Obiektywne 14% reviews[(reviews.Satisfied==1) & (reviews.SubjectivityB==0)].count() # Negatywne i Obiektywne 10% reviews[(reviews.Satisfied==0) & (reviews.SubjectivityB==0)].count() # podział na train i test - random split X_train, X_test, y_train, y_test = train_test_split(reviews['ReviewBody'], reviews['Satisfied'], random_state=0) #Model LG with CountVectorizer vect = CountVectorizer().fit(X_train) vect.get_feature_names()[::500] print(vect.vocabulary_) X_train_vectorized = vect.transform(X_train) print(X_train_vectorized[8725]) print('Shape', X_train_vectorized.shape) print(type(X_train_vectorized)) print(X_train_vectorized.toarray()[600]) #logistic regression model = LogisticRegression(multi_class='ovr',n_jobs=1,solver='liblinear') model.fit(X_train_vectorized,y_train) # testing predictions = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictions)) print(classification_report(y_test,predictions)) predictions_zero = [0 for _ in range(len(y_test))] auc_lr = roc_auc_score(y_test, predictions) auc_zero =roc_auc_score(y_test,predictions_zero) print('AUC: ',roc_auc_score(y_test, predictions)) false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, predictions) false_positive_rate_zero, true_positive_rate_zero, thresholds = roc_curve(y_test, predictions_zero) #roc_auc = auc(false_positive_rate, true_positive_rate) #plotting plt.title("Model") plt.plot(false_positive_rate,true_positive_rate,'r',label="AUC = %0.3f"% auc_lr) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.2f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plot1.jpg") ################################################ Model LG with Tf-Idf Vectorizer vectTf = TfidfVectorizer(smooth_idf=False).fit(X_train) print(vectTf.vocabulary_) print(vectTf.idf_) X_train_vectorizedTfidf = vectTf.transform(X_train) print(X_train_vectorizedTfidf) model = LogisticRegression(multi_class='ovr',n_jobs=1,solver="liblinear") model.fit(X_train_vectorizedTfidf,y_train) predictionsTfidf = model.predict(vectTf.transform(X_test)) print(confusion_matrix(y_test,predictionsTfidf)) print(classification_report(y_test,predictionsTfidf)) print("AUC: ", roc_auc_score(y_test,predictionsTfidf)) auc_lr_tfidf = roc_auc_score(y_test, predictionsTfidf) false_positive_rate_tfidf, true_positive_rate_tfidf, thresholds = roc_curve(y_test, predictionsTfidf) false_positive_rate_zero, true_positive_rate_zero, thresholds = roc_curve(y_test, predictions_zero) plt.title("Model") plt.plot(false_positive_rate_tfidf,true_positive_rate_tfidf,'r',label="AUC = %0.3f"% auc_lr_tfidf) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotTfidf.jpg") ####################################### Model Random Forest with CountVectorizer model = RandomForestClassifier(n_estimators=1000, random_state=0) model.fit(X_train_vectorized,y_train) predictionsRF = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictionsRF)) print(classification_report(y_test,predictionsRF)) print("AUC: ", roc_auc_score(y_test,predictionsRF)) false_positive_rate_RF, true_positive_rate_RF, thresholds = roc_curve(y_test, predictionsRF) #roc_auc = auc(false_positive_rate, true_positive_rate) #plotting auc_RF = roc_auc_score(y_test, predictionsRF) plt.title("Model") plt.plot(false_positive_rate_RF,true_positive_rate_RF,'r',label="AUC = %0.3f"% auc_RF) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotRF.jpg") #################################### MODEL SVM model = SVC(kernel='linear') model.fit(X_train_vectorized,y_train) predictionsSVM = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictionsSVM)) print(classification_report(y_test,predictionsSVM)) ### auc_SVM = roc_auc_score(y_test, predictionsSVM) false_positive_rate_SVM, true_positive_rate_SVM, thresholds = roc_curve(y_test, predictionsSVM) plt.title("Model") plt.plot(false_positive_rate_SVM,true_positive_rate_SVM,'r',label="AUC = %0.3f"% auc_SVM) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotSVM.jpg")
move_numbers(a
identifier_name
Amazon.py
# -*- coding: utf-8 -*- """ Created on Thu Dec 26 20:46:56 2019 @author: Natalia """ import pandas as pd import numpy as np import string import re from nltk.corpus import stopwords from nltk.stem import PorterStemmer from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_curve, roc_auc_score, from sklearn.metrics import classification_report, confusion_matrix import matplotlib.pyplot as plt from wordcloud import WordCloud from PIL import Image from textblob import TextBlob from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC #wczytanie dane = pd.read_csv("allreviews.csv") reviews = dane reviews = reviews.drop(["ReviewTitle"], axis=1) reviews = reviews.drop(["Product"], axis=1) reviews = reviews.drop_duplicates(keep='first') #Sentiment Analysis reviews["Polarity"] = reviews["ReviewBody"].apply(lambda x: TextBlob(x).sentiment[0]) reviews["PolarityB"] = np.where(reviews["Polarity"]>0.2,1,(np.where(reviews["Polarity"]<-0.2,0,100))) # przedział neutralnych -0.2 do 0.2, bo to 1/5, bo 5 gwiazdek pd.crosstab(index = reviews["PolarityB"], columns="Total count") #Positive 58% #Negative 7% #Neutral 35% #Positive&Negative reviews["Satisfied"] = np.where(reviews["ReviewStar"]>3,1,(np.where(reviews["ReviewStar"]<3,0,100))) pd.crosstab(index = reviews["Satisfied"], columns="Total count") #Positive 64% #Negative 25% #Neutral 11% reviews["Difference"]= reviews["Satisfied"]-reviews["PolarityB"] pd.crosstab(index = reviews["Difference"], columns="Total count").plot(kind='pie', subplots=True, autopct='%1.1f%%') # 1 False positive + - <1% # -1 False negative - + 4% # 0 Correct 60% # -99 Positive but Neutral + o 35% wszystkie neutral # -100 Negative but Neutral - o # 99 Neutral but Positive o + # 100 Neutral but Negative o - #false match FalseNeg = reviews FalseNeg = FalseNeg[FalseNeg["Difference"]==-1] FalseNeg = FalseNeg[FalseNeg["Polarity"]>0.6] # górna 1/5 bo 5 gwiazdek FalsePos = reviews FalsePos = FalsePos[FalsePos["Difference"]==1] FalsePos = FalsePos[FalsePos["Polarity"]<-0.6] reviews = pd.concat([reviews,FalseNeg,FalsePos]).drop_duplicates(keep=False) #usuwam neutralne reviews = reviews[reviews["ReviewStar"]!=3] # Text pre-processing #change to lowercase reviews["ReviewBody"] = reviews["ReviewBody"].str.lower() #remove punctuation def remove_punctuation(a): a = ''.join([i for i in a if i not in frozenset(string.punctuation)]) return ' '+ a reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_punctuation) #remove emoji def remove_emoji(a): emojis = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" #signs u"\U000024C2-\U0001F251" #signs "]+", flags=re.UNICODE) return emojis.sub(r'',a) reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_emoji) #remove numbers def remove_numbers(a): a = ''.join([i for i in a if not i.isdigit()]) return a reviews["ReviewBody"] = reviews["ReviewBody"].apply(remove_numbers) #stopwords stopwords = stopwords.words('english') reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopwords)])) ####### stopwords adjustment stopwords.extend(("also","go","went","get","getting", "got","u")) reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (stopwords)])) #other words -> Context specific otherwords = ["amazon","jbl", "sennheiser", "boat", "bought","buy","purchase","purchasing", "product","earphone","earphones","ear","headphone","headphones","music","bluetooth"] reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([word for word in x.split() if word not in (otherwords)])) #stemming ps = PorterStemmer() reviews["ReviewBody"] = reviews["ReviewBody"].apply(lambda x: ' '.join([ps.stem(word) for word in x.split()])) #class proportion - 72% i 28% pd.crosstab(index = reviews["Satisfied"], columns="Total count") example = ["phones", "satisfied", "incredibly"] for word in example: pr
######## WORDLIST ########## wordlist = pd.Series(np.concatenate([x.split() for x in reviews.ReviewBody])).value_counts() wordlist25 = wordlist.head(25) wordlist25.plot.barh(width=0.5,fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plt1.jpg") ########## WORDCLOUD ####### fulltext = " ".join(r for r in reviews.ReviewBody) wordcloud = WordCloud(background_color="white").generate(fulltext) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/wc1.jpg") ############ WORDCLOUD mask ####### chmura_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/chmura.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=chmura_mask,contour_width=2,contour_color="navy", collocations=True).generate(fulltext) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/toheadphones.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() ########### Z podziałem na positive, negative, omijając neutral revpositive = reviews[reviews.Satisfied == 1] revnegative = reviews[reviews.Satisfied == 0] # POSITIVE WORDLIST wordlistpos = pd.Series(np.concatenate([x.split() for x in revpositive.ReviewBody])).value_counts() wordlistpos25 = wordlistpos.head(25) wordlistpos25.plot.barh(color="green", width=0.5,fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/pltpos.jpg") #POSITIVE WORDCLOUD fulltextpos = " ".join(r for r in revpositive.ReviewBody) happy_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/jeden.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=happy_mask,contour_width=2,contour_color="green", collocations=True).generate(fulltextpos) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/tohappy.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") #NEGATIVE WORDLIST wordlistneg = pd.Series(np.concatenate([x.split() for x in revnegative.ReviewBody])).value_counts() wordlistneg25 = wordlistneg.head(25) wordlistneg25.plot.barh(color="red",width=0.5, fontsize=20).invert_yaxis() plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/pltneg.jpg") #NEGATIVE WORDCLOUD fulltextneg = " ".join(r for r in revnegative.ReviewBody) sad_mask = np.array(Image.open("C:/Users/Natalia/Documents/Python/Amazon/dwa.png")) wordcloud = WordCloud(background_color="white", max_words=250, mask=sad_mask,contour_width=2,contour_color="red", collocations=True).generate(fulltextneg) wordcloud.to_file("C:/Users/Natalia/Documents/Python/Amazon/tosad.png") plt.figure(figsize=[10,10]) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") #WYJĄTKOWO NIEZADOWOLENI KLIENCI, RATE = 1 revextnegative = reviews[reviews.ReviewStar == 1] wordlistextneg = pd.Series(np.concatenate([x.split() for x in revextnegative.ReviewBody])).value_counts() wordlistextneg25 = wordlistextneg.head(25) wordlistextneg25.plot.barh(color="black",width=0.5,fontsize=20).invert_yaxis() plt.show() # Subjectivity reviews["Subjectivity"] = reviews["ReviewBody"].apply(lambda x: TextBlob(x).sentiment[1]) reviews["SubjectivityB"] = np.where(reviews["Subjectivity"]>0.666,1,(np.where(reviews["Subjectivity"]<0.333,0,100))) # przedział 0.333-0.666 neutralne - trudno powiedzieć pd.crosstab(index = reviews["SubjectivityB"], columns="Total count") pd.crosstab(index = reviews["SubjectivityB"], columns="Total count").plot(kind='pie', subplots=True, labels=('Objektywne','Subjektywne','Trudno powiedzieć'),autopct='%1.1f%%') #subjektywne 1493; 71% poz, 29% neg #obiektywne 2873 58% poz, 42% neg #pozytywne 8362 - SUB 13%, OB 20% #negatywne 3281 - SUB 13%, OB 37% # Pozytywne i Subjektywne 9% reviews[(reviews.Satisfied==1) & (reviews.SubjectivityB==1)].count() # Negatywne i Subjektywne 4% reviews[(reviews.Satisfied==0) & (reviews.SubjectivityB==1)].count() # Pozytywne i Obiektywne 14% reviews[(reviews.Satisfied==1) & (reviews.SubjectivityB==0)].count() # Negatywne i Obiektywne 10% reviews[(reviews.Satisfied==0) & (reviews.SubjectivityB==0)].count() # podział na train i test - random split X_train, X_test, y_train, y_test = train_test_split(reviews['ReviewBody'], reviews['Satisfied'], random_state=0) #Model LG with CountVectorizer vect = CountVectorizer().fit(X_train) vect.get_feature_names()[::500] print(vect.vocabulary_) X_train_vectorized = vect.transform(X_train) print(X_train_vectorized[8725]) print('Shape', X_train_vectorized.shape) print(type(X_train_vectorized)) print(X_train_vectorized.toarray()[600]) #logistic regression model = LogisticRegression(multi_class='ovr',n_jobs=1,solver='liblinear') model.fit(X_train_vectorized,y_train) # testing predictions = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictions)) print(classification_report(y_test,predictions)) predictions_zero = [0 for _ in range(len(y_test))] auc_lr = roc_auc_score(y_test, predictions) auc_zero =roc_auc_score(y_test,predictions_zero) print('AUC: ',roc_auc_score(y_test, predictions)) false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, predictions) false_positive_rate_zero, true_positive_rate_zero, thresholds = roc_curve(y_test, predictions_zero) #roc_auc = auc(false_positive_rate, true_positive_rate) #plotting plt.title("Model") plt.plot(false_positive_rate,true_positive_rate,'r',label="AUC = %0.3f"% auc_lr) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.2f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plot1.jpg") ################################################ Model LG with Tf-Idf Vectorizer vectTf = TfidfVectorizer(smooth_idf=False).fit(X_train) print(vectTf.vocabulary_) print(vectTf.idf_) X_train_vectorizedTfidf = vectTf.transform(X_train) print(X_train_vectorizedTfidf) model = LogisticRegression(multi_class='ovr',n_jobs=1,solver="liblinear") model.fit(X_train_vectorizedTfidf,y_train) predictionsTfidf = model.predict(vectTf.transform(X_test)) print(confusion_matrix(y_test,predictionsTfidf)) print(classification_report(y_test,predictionsTfidf)) print("AUC: ", roc_auc_score(y_test,predictionsTfidf)) auc_lr_tfidf = roc_auc_score(y_test, predictionsTfidf) false_positive_rate_tfidf, true_positive_rate_tfidf, thresholds = roc_curve(y_test, predictionsTfidf) false_positive_rate_zero, true_positive_rate_zero, thresholds = roc_curve(y_test, predictions_zero) plt.title("Model") plt.plot(false_positive_rate_tfidf,true_positive_rate_tfidf,'r',label="AUC = %0.3f"% auc_lr_tfidf) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotTfidf.jpg") ####################################### Model Random Forest with CountVectorizer model = RandomForestClassifier(n_estimators=1000, random_state=0) model.fit(X_train_vectorized,y_train) predictionsRF = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictionsRF)) print(classification_report(y_test,predictionsRF)) print("AUC: ", roc_auc_score(y_test,predictionsRF)) false_positive_rate_RF, true_positive_rate_RF, thresholds = roc_curve(y_test, predictionsRF) #roc_auc = auc(false_positive_rate, true_positive_rate) #plotting auc_RF = roc_auc_score(y_test, predictionsRF) plt.title("Model") plt.plot(false_positive_rate_RF,true_positive_rate_RF,'r',label="AUC = %0.3f"% auc_RF) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotRF.jpg") #################################### MODEL SVM model = SVC(kernel='linear') model.fit(X_train_vectorized,y_train) predictionsSVM = model.predict(vect.transform(X_test)) print(confusion_matrix(y_test,predictionsSVM)) print(classification_report(y_test,predictionsSVM)) ### auc_SVM = roc_auc_score(y_test, predictionsSVM) false_positive_rate_SVM, true_positive_rate_SVM, thresholds = roc_curve(y_test, predictionsSVM) plt.title("Model") plt.plot(false_positive_rate_SVM,true_positive_rate_SVM,'r',label="AUC = %0.3f"% auc_SVM) plt.legend(loc='lower right') plt.plot(false_positive_rate_zero,true_positive_rate_zero,'b--',label="AUC = %0.3f"% auc_zero) plt.ylabel('"True Positive"', fontsize=12) plt.xlabel('"False Positive"', fontsize=12) plt.savefig("C:/Users/Natalia/Documents/Python/Amazon/plotSVM.jpg")
int(ps.stem(word))
conditional_block
cliff_walking.py
from math import * from numpy import * from random import * import numpy as np import matplotlib.pyplot as plt from colorama import Fore, Back, Style from gridworld import q_to_arrow N_ROWS = 6 N_COLUMNS = 10 class State(object): def __init__(self, i, j, is_cliff=False, is_goal=False): self.i = i self.j = j self.is_cliff = is_cliff self.is_goal = is_goal # north, east, south, west self.q_values = np.array([0.0, 0.0, 0.0, 0.0]) def __str__(self): return '({}, {})'.format(self.i, self.j) def is_terminal(self): return self.is_goal or self.is_cliff def get_max_q_index(self): best_q_values = np.argwhere(self.q_values == np.max(self.q_values)) if len(best_q_values) > 1: return best_q_values[randint(0, len(best_q_values) - 1)][0] else: _max_q = np.argmax(self.q_values) return _max_q def get_max_q_value(self): return np.max(self.q_values) def initialize_states(): # This is the set of states, all initialised with default values states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)] # make the cliff for j in range(1, N_COLUMNS - 1): states[-1][j].is_cliff = True states[-1][-1].is_goal = True return states # The reward function defines what reward I get for transitioning between the first and second state def reward(s_1, s_2): if (s_1.is_goal or s_1.is_cliff): return 0 elif (s_2.is_goal): return 10 elif (s_2.is_cliff): return -100 else: return -1 """ the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the "states" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a "difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left""" def transition(stsp, s, di, dj): if (s.is_cliff or s.is_goal): return s elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)): return s else: return stsp[s.i + di][s.j + dj] gamma = 1 learning_rate = 0.01 def action_to_diff_vector(action): if action == 0: # NORTH return -1, 0 elif action == 1: # EAST return 0, 1 elif action == 2: # SOUTH return 1, 0 elif action == 3: # WEST return 0, -1 def action_to_verbose(action): if action == 0: return 'NORTH' elif action == 1: return 'EAST' elif action == 2: return 'SOUTH' elif action == 3: return 'WEST' def sarsa(state, next_state, action, next_state_action): return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action]) def q_learning(state, next_state, action, next_state_action): next_state_q_value = next_state.get_max_q_value() return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action]) N_STEPS = 10000 METHOD = 'BOTH' EPSILONS = [0.05, 0.1, 0.25] def run_code(use_q_learning=False, _epsilon=0.01): states = initialize_states() decay = 1 min_epsilon = 0.00001 epsilon = _epsilon episode_rewards = [] mistakes_array = [] # array which tracks error from convergence on each step for i in range(N_STEPS): # select a random starting state current_state = states[N_ROWS-1][0] # iterate until reaching a terminal state epsilon = max(min_epsilon, epsilon * decay) episode_reward = 0 while not current_state.is_terminal(): if random() < epsilon: next_action = randint(0, 3) else: next_action = current_state.get_max_q_index() di, dj = action_to_diff_vector(next_action) next_state = transition(states, current_state, di, dj) if random() < epsilon: next_state_action = randint(0, 3) else: next_state_action = next_state.get_max_q_index() if use_q_learning: reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action) else: reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action) # print(current_state, next_state, action_to_verbose(next_action), di, dj) episode_reward += reward current_state = next_state if len(episode_rewards): episode_rewards.append(episode_rewards[-1] + episode_reward) else: episode_rewards.append(episode_reward) ''' if (i % 100 == 0): print(i) ''' mistakes_array.append(check_accuracy(states)) return np.array(mistakes_array), states, episode_rewards def check_accuracy(states): correct_result = np.array([ [-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ], [-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ], [-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ], [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ], [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ], [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ], ]) mistakes_delta = 0 for i in range(N_ROWS): for j in range(N_COLUMNS): mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values)) return mistakes_delta def
(mistakes_sarsa, mistakes_q_learning): plt.gca().invert_yaxis() legend = [] for mistake_sarsa in mistakes_sarsa: plt.plot(mistake_sarsa[1]) legend.append(r'SARSA $\epsilon={}$'.format(mistake_sarsa[0])) for mistake_q_learning in mistakes_q_learning: plt.plot(mistake_q_learning[1]) legend.append(r'Q-learning $\epsilon={}$'.format(mistake_q_learning[0])) plt.grid(which='y') plt.legend(legend) plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS)) # plt.show() def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax): final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)]) if PLOTS > 2: ax = ax[PLOTS % 3, 1] else: ax = ax[PLOTS, 0] ax.imshow(final_grid, aspect='auto', cmap='coolwarm') # fig, ax = plt.subplots() ax.set_xticks(np.arange(N_COLUMNS)) ax.set_yticks(np.arange(N_ROWS)) ax.set_xticklabels([i for i in range(N_COLUMNS)]) ax.set_yticklabels([i for i in range(N_ROWS)]) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. for i in range(N_ROWS): for j in range(N_COLUMNS): text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)), ha="center", va="center", color="w") fig.tight_layout() ax.set_title("{}; $\epsilon={}$".format(method, epsilon)) for i in range(N_ROWS): str_ = "" for j in range(N_COLUMNS): str_ += str(int(final_grid[i][j])) + ", " PLOTS += 1 # plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method)) # plt.show() def display_optimal_policy(states, method, epsilon): print("{}; ε = {}".format(method, epsilon)) print('-' * 60) for i in range(len(states)): line_str = '' for j in range(len(states[0])): if j == 0: print('|', end='') if states[i][j].is_goal: print(Back.GREEN + ' ', end='') print(Style.RESET_ALL + ' | ', end='') elif states[i][j].is_cliff: print(Back.RED + ' ', end='') print(Style.RESET_ALL + ' | ', end='') else: print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='') print(line_str) print('-' * 60) if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']: print('invalidt method. must be Q_LEARNING or SARSA or both') import sys; sys.exit() mistakes_q_learning = [] mistakes_sarsa = [] PLOTS = 0 fig, axes = plt.subplots(3, 2) rewards = [] for epsilon in EPSILONS: if METHOD == 'Q_LEARNING' or METHOD == 'BOTH': _mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon) plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon) mistakes_q_learning.append((epsilon, _mistakes_q_learning)) rewards.append(('Q_LEARNING', epsilon, episode_rewards)) PLOTS += 1 for epsilon in EPSILONS: if METHOD == 'SARSA' or METHOD == 'BOTH': _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon) plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_sarsa, 'SARSA', epsilon) mistakes_sarsa.append((epsilon, _mistakes_sarsa)) rewards.append(('SARSA', epsilon, episode_rewards)) PLOTS += 1 plt.savefig('all_runs.png') plt.show() # for i, j in [(0, 3), (1, 4), (2, 5)]: for reward in rewards: # plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1])) # plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1])) plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1])) plt.xlabel('Episodes') plt.ylabel('Sum of rewards during episode') plt.legend() plt.show() plt.savefig('episode_rewards.png') plot_errors(mistakes_sarsa, mistakes_q_learning)
plot_errors
identifier_name
cliff_walking.py
from math import * from numpy import * from random import * import numpy as np import matplotlib.pyplot as plt from colorama import Fore, Back, Style from gridworld import q_to_arrow N_ROWS = 6 N_COLUMNS = 10 class State(object): def __init__(self, i, j, is_cliff=False, is_goal=False): self.i = i self.j = j self.is_cliff = is_cliff self.is_goal = is_goal # north, east, south, west self.q_values = np.array([0.0, 0.0, 0.0, 0.0]) def __str__(self): return '({}, {})'.format(self.i, self.j) def is_terminal(self): return self.is_goal or self.is_cliff def get_max_q_index(self): best_q_values = np.argwhere(self.q_values == np.max(self.q_values)) if len(best_q_values) > 1: return best_q_values[randint(0, len(best_q_values) - 1)][0] else: _max_q = np.argmax(self.q_values) return _max_q def get_max_q_value(self): return np.max(self.q_values) def initialize_states(): # This is the set of states, all initialised with default values states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)] # make the cliff for j in range(1, N_COLUMNS - 1): states[-1][j].is_cliff = True states[-1][-1].is_goal = True return states # The reward function defines what reward I get for transitioning between the first and second state def reward(s_1, s_2): if (s_1.is_goal or s_1.is_cliff): return 0 elif (s_2.is_goal): return 10 elif (s_2.is_cliff): return -100 else: return -1 """ the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the "states" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a "difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left""" def transition(stsp, s, di, dj): if (s.is_cliff or s.is_goal): return s elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)): return s else: return stsp[s.i + di][s.j + dj] gamma = 1 learning_rate = 0.01 def action_to_diff_vector(action): if action == 0: # NORTH return -1, 0 elif action == 1: # EAST return 0, 1 elif action == 2: # SOUTH return 1, 0 elif action == 3: # WEST return 0, -1 def action_to_verbose(action): if action == 0: return 'NORTH' elif action == 1: return 'EAST' elif action == 2: return 'SOUTH' elif action == 3: return 'WEST' def sarsa(state, next_state, action, next_state_action): return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action]) def q_learning(state, next_state, action, next_state_action): next_state_q_value = next_state.get_max_q_value() return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action]) N_STEPS = 10000 METHOD = 'BOTH' EPSILONS = [0.05, 0.1, 0.25] def run_code(use_q_learning=False, _epsilon=0.01): states = initialize_states() decay = 1 min_epsilon = 0.00001 epsilon = _epsilon episode_rewards = [] mistakes_array = [] # array which tracks error from convergence on each step
current_state = states[N_ROWS-1][0] # iterate until reaching a terminal state epsilon = max(min_epsilon, epsilon * decay) episode_reward = 0 while not current_state.is_terminal(): if random() < epsilon: next_action = randint(0, 3) else: next_action = current_state.get_max_q_index() di, dj = action_to_diff_vector(next_action) next_state = transition(states, current_state, di, dj) if random() < epsilon: next_state_action = randint(0, 3) else: next_state_action = next_state.get_max_q_index() if use_q_learning: reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action) else: reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action) # print(current_state, next_state, action_to_verbose(next_action), di, dj) episode_reward += reward current_state = next_state if len(episode_rewards): episode_rewards.append(episode_rewards[-1] + episode_reward) else: episode_rewards.append(episode_reward) ''' if (i % 100 == 0): print(i) ''' mistakes_array.append(check_accuracy(states)) return np.array(mistakes_array), states, episode_rewards def check_accuracy(states): correct_result = np.array([ [-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ], [-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ], [-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ], [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ], [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ], [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ], ]) mistakes_delta = 0 for i in range(N_ROWS): for j in range(N_COLUMNS): mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values)) return mistakes_delta def plot_errors(mistakes_sarsa, mistakes_q_learning): plt.gca().invert_yaxis() legend = [] for mistake_sarsa in mistakes_sarsa: plt.plot(mistake_sarsa[1]) legend.append(r'SARSA $\epsilon={}$'.format(mistake_sarsa[0])) for mistake_q_learning in mistakes_q_learning: plt.plot(mistake_q_learning[1]) legend.append(r'Q-learning $\epsilon={}$'.format(mistake_q_learning[0])) plt.grid(which='y') plt.legend(legend) plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS)) # plt.show() def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax): final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)]) if PLOTS > 2: ax = ax[PLOTS % 3, 1] else: ax = ax[PLOTS, 0] ax.imshow(final_grid, aspect='auto', cmap='coolwarm') # fig, ax = plt.subplots() ax.set_xticks(np.arange(N_COLUMNS)) ax.set_yticks(np.arange(N_ROWS)) ax.set_xticklabels([i for i in range(N_COLUMNS)]) ax.set_yticklabels([i for i in range(N_ROWS)]) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. for i in range(N_ROWS): for j in range(N_COLUMNS): text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)), ha="center", va="center", color="w") fig.tight_layout() ax.set_title("{}; $\epsilon={}$".format(method, epsilon)) for i in range(N_ROWS): str_ = "" for j in range(N_COLUMNS): str_ += str(int(final_grid[i][j])) + ", " PLOTS += 1 # plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method)) # plt.show() def display_optimal_policy(states, method, epsilon): print("{}; ε = {}".format(method, epsilon)) print('-' * 60) for i in range(len(states)): line_str = '' for j in range(len(states[0])): if j == 0: print('|', end='') if states[i][j].is_goal: print(Back.GREEN + ' ', end='') print(Style.RESET_ALL + ' | ', end='') elif states[i][j].is_cliff: print(Back.RED + ' ', end='') print(Style.RESET_ALL + ' | ', end='') else: print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='') print(line_str) print('-' * 60) if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']: print('invalidt method. must be Q_LEARNING or SARSA or both') import sys; sys.exit() mistakes_q_learning = [] mistakes_sarsa = [] PLOTS = 0 fig, axes = plt.subplots(3, 2) rewards = [] for epsilon in EPSILONS: if METHOD == 'Q_LEARNING' or METHOD == 'BOTH': _mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon) plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon) mistakes_q_learning.append((epsilon, _mistakes_q_learning)) rewards.append(('Q_LEARNING', epsilon, episode_rewards)) PLOTS += 1 for epsilon in EPSILONS: if METHOD == 'SARSA' or METHOD == 'BOTH': _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon) plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_sarsa, 'SARSA', epsilon) mistakes_sarsa.append((epsilon, _mistakes_sarsa)) rewards.append(('SARSA', epsilon, episode_rewards)) PLOTS += 1 plt.savefig('all_runs.png') plt.show() # for i, j in [(0, 3), (1, 4), (2, 5)]: for reward in rewards: # plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1])) # plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1])) plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1])) plt.xlabel('Episodes') plt.ylabel('Sum of rewards during episode') plt.legend() plt.show() plt.savefig('episode_rewards.png') plot_errors(mistakes_sarsa, mistakes_q_learning)
for i in range(N_STEPS): # select a random starting state
random_line_split
cliff_walking.py
from math import * from numpy import * from random import * import numpy as np import matplotlib.pyplot as plt from colorama import Fore, Back, Style from gridworld import q_to_arrow N_ROWS = 6 N_COLUMNS = 10 class State(object): def __init__(self, i, j, is_cliff=False, is_goal=False): self.i = i self.j = j self.is_cliff = is_cliff self.is_goal = is_goal # north, east, south, west self.q_values = np.array([0.0, 0.0, 0.0, 0.0]) def __str__(self): return '({}, {})'.format(self.i, self.j) def is_terminal(self): return self.is_goal or self.is_cliff def get_max_q_index(self): best_q_values = np.argwhere(self.q_values == np.max(self.q_values)) if len(best_q_values) > 1: return best_q_values[randint(0, len(best_q_values) - 1)][0] else: _max_q = np.argmax(self.q_values) return _max_q def get_max_q_value(self): return np.max(self.q_values) def initialize_states(): # This is the set of states, all initialised with default values states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)] # make the cliff for j in range(1, N_COLUMNS - 1): states[-1][j].is_cliff = True states[-1][-1].is_goal = True return states # The reward function defines what reward I get for transitioning between the first and second state def reward(s_1, s_2): if (s_1.is_goal or s_1.is_cliff): return 0 elif (s_2.is_goal): return 10 elif (s_2.is_cliff): return -100 else: return -1 """ the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the "states" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a "difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left""" def transition(stsp, s, di, dj): if (s.is_cliff or s.is_goal): return s elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)): return s else: return stsp[s.i + di][s.j + dj] gamma = 1 learning_rate = 0.01 def action_to_diff_vector(action): if action == 0: # NORTH return -1, 0 elif action == 1: # EAST return 0, 1 elif action == 2: # SOUTH return 1, 0 elif action == 3: # WEST return 0, -1 def action_to_verbose(action): if action == 0: return 'NORTH' elif action == 1: return 'EAST' elif action == 2: return 'SOUTH' elif action == 3: return 'WEST' def sarsa(state, next_state, action, next_state_action): return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action]) def q_learning(state, next_state, action, next_state_action): next_state_q_value = next_state.get_max_q_value() return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action]) N_STEPS = 10000 METHOD = 'BOTH' EPSILONS = [0.05, 0.1, 0.25] def run_code(use_q_learning=False, _epsilon=0.01): states = initialize_states() decay = 1 min_epsilon = 0.00001 epsilon = _epsilon episode_rewards = [] mistakes_array = [] # array which tracks error from convergence on each step for i in range(N_STEPS): # select a random starting state current_state = states[N_ROWS-1][0] # iterate until reaching a terminal state epsilon = max(min_epsilon, epsilon * decay) episode_reward = 0 while not current_state.is_terminal(): if random() < epsilon: next_action = randint(0, 3) else: next_action = current_state.get_max_q_index() di, dj = action_to_diff_vector(next_action) next_state = transition(states, current_state, di, dj) if random() < epsilon: next_state_action = randint(0, 3) else: next_state_action = next_state.get_max_q_index() if use_q_learning: reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action) else: reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action) # print(current_state, next_state, action_to_verbose(next_action), di, dj) episode_reward += reward current_state = next_state if len(episode_rewards): episode_rewards.append(episode_rewards[-1] + episode_reward) else: episode_rewards.append(episode_reward) ''' if (i % 100 == 0): print(i) ''' mistakes_array.append(check_accuracy(states)) return np.array(mistakes_array), states, episode_rewards def check_accuracy(states): correct_result = np.array([ [-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ], [-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ], [-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ], [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ], [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ], [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ], ]) mistakes_delta = 0 for i in range(N_ROWS): for j in range(N_COLUMNS): mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values)) return mistakes_delta def plot_errors(mistakes_sarsa, mistakes_q_learning): plt.gca().invert_yaxis() legend = [] for mistake_sarsa in mistakes_sarsa: plt.plot(mistake_sarsa[1]) legend.append(r'SARSA $\epsilon={}$'.format(mistake_sarsa[0])) for mistake_q_learning in mistakes_q_learning: plt.plot(mistake_q_learning[1]) legend.append(r'Q-learning $\epsilon={}$'.format(mistake_q_learning[0])) plt.grid(which='y') plt.legend(legend) plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS)) # plt.show() def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax): final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)]) if PLOTS > 2: ax = ax[PLOTS % 3, 1] else: ax = ax[PLOTS, 0] ax.imshow(final_grid, aspect='auto', cmap='coolwarm') # fig, ax = plt.subplots() ax.set_xticks(np.arange(N_COLUMNS)) ax.set_yticks(np.arange(N_ROWS)) ax.set_xticklabels([i for i in range(N_COLUMNS)]) ax.set_yticklabels([i for i in range(N_ROWS)]) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. for i in range(N_ROWS): for j in range(N_COLUMNS): text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)), ha="center", va="center", color="w") fig.tight_layout() ax.set_title("{}; $\epsilon={}$".format(method, epsilon)) for i in range(N_ROWS):
PLOTS += 1 # plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method)) # plt.show() def display_optimal_policy(states, method, epsilon): print("{}; ε = {}".format(method, epsilon)) print('-' * 60) for i in range(len(states)): line_str = '' for j in range(len(states[0])): if j == 0: print('|', end='') if states[i][j].is_goal: print(Back.GREEN + ' ', end='') print(Style.RESET_ALL + ' | ', end='') elif states[i][j].is_cliff: print(Back.RED + ' ', end='') print(Style.RESET_ALL + ' | ', end='') else: print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='') print(line_str) print('-' * 60) if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']: print('invalidt method. must be Q_LEARNING or SARSA or both') import sys; sys.exit() mistakes_q_learning = [] mistakes_sarsa = [] PLOTS = 0 fig, axes = plt.subplots(3, 2) rewards = [] for epsilon in EPSILONS: if METHOD == 'Q_LEARNING' or METHOD == 'BOTH': _mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon) plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon) mistakes_q_learning.append((epsilon, _mistakes_q_learning)) rewards.append(('Q_LEARNING', epsilon, episode_rewards)) PLOTS += 1 for epsilon in EPSILONS: if METHOD == 'SARSA' or METHOD == 'BOTH': _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon) plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_sarsa, 'SARSA', epsilon) mistakes_sarsa.append((epsilon, _mistakes_sarsa)) rewards.append(('SARSA', epsilon, episode_rewards)) PLOTS += 1 plt.savefig('all_runs.png') plt.show() # for i, j in [(0, 3), (1, 4), (2, 5)]: for reward in rewards: # plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1])) # plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1])) plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1])) plt.xlabel('Episodes') plt.ylabel('Sum of rewards during episode') plt.legend() plt.show() plt.savefig('episode_rewards.png') plot_errors(mistakes_sarsa, mistakes_q_learning)
str_ = "" for j in range(N_COLUMNS): str_ += str(int(final_grid[i][j])) + ", "
conditional_block
cliff_walking.py
from math import * from numpy import * from random import * import numpy as np import matplotlib.pyplot as plt from colorama import Fore, Back, Style from gridworld import q_to_arrow N_ROWS = 6 N_COLUMNS = 10 class State(object): def __init__(self, i, j, is_cliff=False, is_goal=False): self.i = i self.j = j self.is_cliff = is_cliff self.is_goal = is_goal # north, east, south, west self.q_values = np.array([0.0, 0.0, 0.0, 0.0]) def __str__(self): return '({}, {})'.format(self.i, self.j) def is_terminal(self): return self.is_goal or self.is_cliff def get_max_q_index(self): best_q_values = np.argwhere(self.q_values == np.max(self.q_values)) if len(best_q_values) > 1: return best_q_values[randint(0, len(best_q_values) - 1)][0] else: _max_q = np.argmax(self.q_values) return _max_q def get_max_q_value(self): return np.max(self.q_values) def initialize_states(): # This is the set of states, all initialised with default values states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)] # make the cliff for j in range(1, N_COLUMNS - 1): states[-1][j].is_cliff = True states[-1][-1].is_goal = True return states # The reward function defines what reward I get for transitioning between the first and second state def reward(s_1, s_2): if (s_1.is_goal or s_1.is_cliff): return 0 elif (s_2.is_goal): return 10 elif (s_2.is_cliff): return -100 else: return -1 """ the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the "states" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a "difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left""" def transition(stsp, s, di, dj): if (s.is_cliff or s.is_goal): return s elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)): return s else: return stsp[s.i + di][s.j + dj] gamma = 1 learning_rate = 0.01 def action_to_diff_vector(action):
def action_to_verbose(action): if action == 0: return 'NORTH' elif action == 1: return 'EAST' elif action == 2: return 'SOUTH' elif action == 3: return 'WEST' def sarsa(state, next_state, action, next_state_action): return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action]) def q_learning(state, next_state, action, next_state_action): next_state_q_value = next_state.get_max_q_value() return reward(state, next_state), state.q_values[action] +\ learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action]) N_STEPS = 10000 METHOD = 'BOTH' EPSILONS = [0.05, 0.1, 0.25] def run_code(use_q_learning=False, _epsilon=0.01): states = initialize_states() decay = 1 min_epsilon = 0.00001 epsilon = _epsilon episode_rewards = [] mistakes_array = [] # array which tracks error from convergence on each step for i in range(N_STEPS): # select a random starting state current_state = states[N_ROWS-1][0] # iterate until reaching a terminal state epsilon = max(min_epsilon, epsilon * decay) episode_reward = 0 while not current_state.is_terminal(): if random() < epsilon: next_action = randint(0, 3) else: next_action = current_state.get_max_q_index() di, dj = action_to_diff_vector(next_action) next_state = transition(states, current_state, di, dj) if random() < epsilon: next_state_action = randint(0, 3) else: next_state_action = next_state.get_max_q_index() if use_q_learning: reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action) else: reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action) # print(current_state, next_state, action_to_verbose(next_action), di, dj) episode_reward += reward current_state = next_state if len(episode_rewards): episode_rewards.append(episode_rewards[-1] + episode_reward) else: episode_rewards.append(episode_reward) ''' if (i % 100 == 0): print(i) ''' mistakes_array.append(check_accuracy(states)) return np.array(mistakes_array), states, episode_rewards def check_accuracy(states): correct_result = np.array([ [-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ], [-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ], [-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ], [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ], [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ], [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ], ]) mistakes_delta = 0 for i in range(N_ROWS): for j in range(N_COLUMNS): mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values)) return mistakes_delta def plot_errors(mistakes_sarsa, mistakes_q_learning): plt.gca().invert_yaxis() legend = [] for mistake_sarsa in mistakes_sarsa: plt.plot(mistake_sarsa[1]) legend.append(r'SARSA $\epsilon={}$'.format(mistake_sarsa[0])) for mistake_q_learning in mistakes_q_learning: plt.plot(mistake_q_learning[1]) legend.append(r'Q-learning $\epsilon={}$'.format(mistake_q_learning[0])) plt.grid(which='y') plt.legend(legend) plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS)) # plt.show() def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax): final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)]) if PLOTS > 2: ax = ax[PLOTS % 3, 1] else: ax = ax[PLOTS, 0] ax.imshow(final_grid, aspect='auto', cmap='coolwarm') # fig, ax = plt.subplots() ax.set_xticks(np.arange(N_COLUMNS)) ax.set_yticks(np.arange(N_ROWS)) ax.set_xticklabels([i for i in range(N_COLUMNS)]) ax.set_yticklabels([i for i in range(N_ROWS)]) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. for i in range(N_ROWS): for j in range(N_COLUMNS): text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)), ha="center", va="center", color="w") fig.tight_layout() ax.set_title("{}; $\epsilon={}$".format(method, epsilon)) for i in range(N_ROWS): str_ = "" for j in range(N_COLUMNS): str_ += str(int(final_grid[i][j])) + ", " PLOTS += 1 # plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method)) # plt.show() def display_optimal_policy(states, method, epsilon): print("{}; ε = {}".format(method, epsilon)) print('-' * 60) for i in range(len(states)): line_str = '' for j in range(len(states[0])): if j == 0: print('|', end='') if states[i][j].is_goal: print(Back.GREEN + ' ', end='') print(Style.RESET_ALL + ' | ', end='') elif states[i][j].is_cliff: print(Back.RED + ' ', end='') print(Style.RESET_ALL + ' | ', end='') else: print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='') print(line_str) print('-' * 60) if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']: print('invalidt method. must be Q_LEARNING or SARSA or both') import sys; sys.exit() mistakes_q_learning = [] mistakes_sarsa = [] PLOTS = 0 fig, axes = plt.subplots(3, 2) rewards = [] for epsilon in EPSILONS: if METHOD == 'Q_LEARNING' or METHOD == 'BOTH': _mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon) plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon) mistakes_q_learning.append((epsilon, _mistakes_q_learning)) rewards.append(('Q_LEARNING', epsilon, episode_rewards)) PLOTS += 1 for epsilon in EPSILONS: if METHOD == 'SARSA' or METHOD == 'BOTH': _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon) plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes) display_optimal_policy(end_states_sarsa, 'SARSA', epsilon) mistakes_sarsa.append((epsilon, _mistakes_sarsa)) rewards.append(('SARSA', epsilon, episode_rewards)) PLOTS += 1 plt.savefig('all_runs.png') plt.show() # for i, j in [(0, 3), (1, 4), (2, 5)]: for reward in rewards: # plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1])) # plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1])) plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1])) plt.xlabel('Episodes') plt.ylabel('Sum of rewards during episode') plt.legend() plt.show() plt.savefig('episode_rewards.png') plot_errors(mistakes_sarsa, mistakes_q_learning)
if action == 0: # NORTH return -1, 0 elif action == 1: # EAST return 0, 1 elif action == 2: # SOUTH return 1, 0 elif action == 3: # WEST return 0, -1
identifier_body
rule_builder.rs
use { crate::{ mir::{self, BlockRef, LValLink}, mir_ext::mark_persistent_recursive, optimizer::{ BuildGroups, ClosureBuilder, GroupBuilder, GroupDyn, GroupSet, MatchMap, OptimizerAspect, SuccBuilder, closure::{Closure, ClosureId, ClosureSeed}, closure_interner::{ClosureInterner}, dfa_builder::{DFABuilder}, }, runtime::{ Grammar, ReduceId, options::Options, }, }, lang_mir::Cursor, north_core::{ compiler::Compiler, iter::ModelIterator, model::{Child, Link, ModelCell}, node_id::{NodeId, ToNodeId}, structure::ForestExt, visitor::{VisitorAspect, VisitCtxCore}, }, std::{ any::Any, cell::RefCell, collections::{HashMap, VecDeque, hash_map::Entry::*}, mem::replace, rc::Rc, } }; //////////////////////////////////////////////////////////////////////////////////////////////// crate type GroupPair = (Box<dyn GroupDyn>, Box<dyn Any>); //////////////////////////////////////////////////////////////////////////////////////////////// crate enum BuildJob { BuildOne { cursor: Cursor, closure: Closure, pair: GroupPair, }, BuildSet { cursor: Cursor, closure: Closure, }, } //////////////////////////////////////////////////////////////////////////////////////////////// // ClosureInterner // Closure -> TransitionTable pub struct RuleBuilder { crate _optimizer: Rc<OptimizerAspect>, crate visitor: Rc<VisitorAspect>, crate output_model: ModelCell, crate options: Options, crate closure_builder: Rc<ClosureBuilder>, crate closure_interner: Rc<ClosureInterner>, crate dfa_builder: DFABuilder, crate grammar: Option<Rc<Grammar>>, crate reduce_ids: Vec<ReduceId>, crate closure_map: HashMap<Closure, Option<BlockRef>>, // IndexMap crate value_map: HashMap<NodeId, NodeId>, crate build_queue: VecDeque<BuildJob>, crate fail_block: Option<BlockRef>, crate num_built: usize, crate origin_matches: MatchMap, crate origin_stmts: Rc<RefCell<Vec<GroupPair>>>, crate rule_ty: Option<Link<mir::TypeFn>>, crate iter_locals: Vec<LValLink>, crate locals: Vec<Child<mir::RuleLocal>>, crate blocks: Vec<Child<mir::Block>>, } impl RuleBuilder { pub fn new(comp: &Compiler, options: Options) -> Self { let visitor = comp.aspect_mut::<VisitorAspect>(); let closure_builder = Rc::new(ClosureBuilder::new(visitor.clone())); let closure_interner = ClosureInterner::new(); let dfa_builder = DFABuilder::new( closure_builder.clone(), closure_interner.clone(), visitor.clone(), ); Self { _optimizer: comp.aspect_mut::<OptimizerAspect>(), visitor: visitor.clone(), output_model: comp.model_cell.clone(), options, closure_builder, closure_interner, dfa_builder, grammar: None, reduce_ids: Vec::new(), closure_map: HashMap::new(), value_map: HashMap::new(), build_queue: VecDeque::new(), fail_block: None, num_built: 0, origin_matches: MatchMap::default(), origin_stmts: <_>::default(), rule_ty: None, iter_locals: Vec::new(), locals: Vec::new(), blocks: Vec::new(), } } pub fn add_rules(&mut self, rules: &Vec<NodeId>) { let seed = { let model = self.visitor.input_model.borrow(); let first_rule = model.get::<_, mir::ItemRule>(rules[0]).unwrap(); self.rule_ty = Some(first_rule.rule_ty.clone()); model.iter(rules) .borrow_cast_nodes_to::<mir::ItemRule>() .filter_map(|r| r.blocks.first().cloned()) .map(|b| model.node(b).first_node()) .collect::<Vec<_>>() }; let mut seed = ClosureSeed::new(seed); seed.follow_calls = true; let _main_block = self.resolve_closure_seed(seed); let fail_seed = ClosureSeed::default(); let fail_block = self.resolve_closure_seed(fail_seed); self.fail_block = Some(fail_block); } pub fn add_value_map<A: ToNodeId, B: ToNodeId>(&mut self, from: A, to: B) { self.value_map.insert(from.to_top(), to.to_top()); } pub fn at_origin(&self) -> bool { self.num_built <= 2 } pub fn build( &mut self, grammar: Rc<Grammar>, rules: &Vec<NodeId>, reduce_ids: Vec<ReduceId>, ) -> (String, NodeId<mir::ItemRule>) { assert!(rules.len() >= 1); self.closure_builder.set_grammar(Some(grammar.clone())); self.grammar = Some(grammar); self.reduce_ids = reduce_ids; self.add_rules(rules); while let Some(job) = self.build_queue.pop_front() { match job { BuildJob::BuildOne { cursor, closure, pair } => { // println!("BUILD_ONE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_one(cursor, closure, pair); }, BuildJob::BuildSet { cursor, closure } => { // println!("BUILD_CLOSURE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_set(cursor, closure); self.num_built += 1; }, } } // self.dump_info(); self.closure_builder.set_grammar(None); self.closure_map.clear(); self.grammar = None; self.iter_locals.clear(); self.num_built = 0; self.origin_matches.clear(); self.origin_stmts.borrow_mut().clear(); self.value_map.clear(); let mut model = self.output_model.borrow_mut(); let node_id = model.new_node(); let name = format!("opt_{}", node_id.idx()); //println!("BUILD LOCALS {:?}", self.locals.len()); model.complete_node(node_id, mir::ItemRule { name: name.clone(), rule_ty: self.rule_ty.take().unwrap(), params: Vec::new(), locals: replace(&mut self.locals, Vec::new()), blocks: replace(&mut self.blocks, Vec::new()), }); drop(model); // Mark persistent blocks mark_persistent_recursive(&*self.visitor, node_id); (name, node_id) } pub fn build_one(&mut self, mut cursor: Cursor, closure: Closure, pair: GroupPair) { let (group_k, group_v) = pair; let succ_seed = self.successors(closure); let succ_closure = self.closure(succ_seed); group_k.build_mir_dyn(&*group_v, self, &mut cursor); match (cursor.is_completed(), succ_closure.is_empty()) { (true, true) => { }, (true, false) => { panic!("completed group with successors"); }, (false, true) => { self.fuse_block(cursor); }, (false, false) =>
, } } crate fn build_origin_stmts(&mut self, cursor: &mut Cursor) { let stmts = self.origin_stmts.clone(); let stmts = stmts.borrow(); for (k, v) in stmts.iter() { k.build_mir_dyn(&**v, self, cursor); } } pub fn build_set(&mut self, mut cursor: Cursor, closure: Closure) { if closure.is_empty() { cursor.build_ctl(mir::CtlFail { }); return; } let group_set = self.group(&closure); group_set.build_mir(self, cursor); } pub fn closure(&self, seed: ClosureSeed) -> Closure { self.closure_builder.build(seed) } pub fn dump_info(&self) { let mut map = HashMap::new(); for (closure, _) in &self.closure_map { let mut closure_ids = closure.entries.clone(); closure_ids.sort(); map.insert(closure_ids, ()); } println!("CLOSURES {:?} vs {:?}", map.len(), self.closure_map.len()); } pub fn fail_block(&self) -> BlockRef { self.fail_block.clone().unwrap() } pub fn fuse_block(&self, mut cur: Cursor) { if !cur.is_completed() { let fail_block = self.fail_block(); cur.build_ctl(mir::CtlBr { block: fail_block }); } } pub fn group(&mut self, closure: &Closure) -> GroupSet { let mut group_buider = GroupBuilder::new(); let visitor = self.visitor.clone(); for entry in &closure.entries { let node_id = *entry; let core = VisitCtxCore::<BuildGroups> { aspect: &*visitor, node_id, imp_args: self }; let _ = visitor.visit(core, &mut group_buider); } group_buider.complete() } pub fn intern_closure(&self, closure: Closure) -> ClosureId { self.closure_interner.intern(closure) } pub fn is_origin_pure(&self) -> bool { let incorporate_reductions = self.options.incorporate_reductions; let is_pure = self.origin_stmts.borrow().is_empty(); incorporate_reductions && is_pure } pub fn is_reduce_external(&self, reduce_id: ReduceId) -> bool { self.reduce_ids.contains(&reduce_id) } pub fn map_local(&mut self, src: NodeId<mir::RuleLocal>) -> NodeId<mir::RuleLocal> { let ty = { let model = self.visitor.input_model.borrow(); let node = model.node(src); node.ty.clone() // XXX: BAD: type needs to be deep copied/transfered }; let mut model_out = self.output_model.borrow_mut(); let local_out = model_out.build_node(mir::RuleLocal { ty, index: self.locals.len() }); self.locals.push(local_out.into()); local_out } pub fn map_local_iter(&mut self, src: &LValLink, depth: usize) -> LValLink { if let Some(local) = self.iter_locals.get(depth).cloned() { self.add_value_map(src, &local); return local; } if depth != self.iter_locals.len() { panic!("invalid depth"); } let local = self.map_lval(src); self.iter_locals.push(local.clone()); local } pub fn map_lval(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); if let Some(result) = self.value_map.get(&node_id) { return result.cast().into(); } let new_local = self.map_local(node_id.cast()); self.value_map.insert(node_id, new_local.to_top()); new_local.cast().into() } pub fn map_lval_lookup(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); match self.value_map.get(&node_id) { Some(result) => result.cast().into(), None => panic!("unknown value: {:?}", node_id), } } crate fn origin_matches(&self, reduce_id: ReduceId) -> ClosureSeed { let mut result = ClosureSeed::default(); if let Some(grammar) = &self.grammar { let match_map = &grammar.match_map; for (match_id, prec_map) in &self.origin_matches { let prec = match match_map.get(*match_id, reduce_id) { Some(prec) => prec, None => continue, }; if let Some(closure) = prec_map.get(&prec) { result.merge(closure); } } } result } crate fn queue_build_one(&mut self, cursor: Cursor, closure: Closure, pair: GroupPair) { let job = BuildJob::BuildOne { cursor, closure, pair }; self.build_queue.push_back(job); } crate fn resolve<F>(&mut self, closure: Closure, job_ctor: F) -> BlockRef where F: FnOnce(Cursor, Closure) -> BuildJob { match self.closure_map.entry(closure) { Occupied(occupied) => { occupied.get().clone().unwrap() } Vacant(vacant) => { let mut model = self.output_model.borrow_mut(); let block_id = model.new_node::<mir::Block>(); self.blocks.push(block_id.into()); let cursor = Cursor::new(self.output_model.clone(), block_id); let job = job_ctor(cursor, vacant.key().clone()); self.build_queue.push_back(job); vacant.insert(Some(block_id.into())); block_id.into() } } } crate fn resolve_closure(&mut self, closure: Closure) -> BlockRef { self.resolve(closure, |cursor, closure| { BuildJob::BuildSet { cursor, closure } }) } crate fn resolve_closure_seed(&mut self, seed: ClosureSeed) -> BlockRef { let closure = self.closure(seed); self.resolve_closure(closure) } crate fn resolve_one(&mut self, seed: ClosureSeed, pair: GroupPair) -> BlockRef { let closure = seed.into_closure(); self.resolve(closure, |cursor, closure| { BuildJob::BuildOne { cursor, closure, pair } }) } crate fn successors(&self, seed: Closure) -> ClosureSeed { let mut builder = SuccBuilder::new(seed.entries); builder.build(&*self.visitor); builder.complete() } } ////////////////////////////////////////////////////////////////////////////////////////////////
{ let job = BuildJob::BuildSet { cursor, closure: succ_closure }; self.build_queue.push_back(job); }
conditional_block
rule_builder.rs
use { crate::{ mir::{self, BlockRef, LValLink}, mir_ext::mark_persistent_recursive, optimizer::{ BuildGroups, ClosureBuilder, GroupBuilder, GroupDyn, GroupSet, MatchMap, OptimizerAspect, SuccBuilder, closure::{Closure, ClosureId, ClosureSeed}, closure_interner::{ClosureInterner}, dfa_builder::{DFABuilder}, }, runtime::{ Grammar, ReduceId, options::Options, }, }, lang_mir::Cursor, north_core::{ compiler::Compiler, iter::ModelIterator, model::{Child, Link, ModelCell}, node_id::{NodeId, ToNodeId}, structure::ForestExt, visitor::{VisitorAspect, VisitCtxCore}, }, std::{ any::Any, cell::RefCell, collections::{HashMap, VecDeque, hash_map::Entry::*}, mem::replace, rc::Rc, } }; //////////////////////////////////////////////////////////////////////////////////////////////// crate type GroupPair = (Box<dyn GroupDyn>, Box<dyn Any>); //////////////////////////////////////////////////////////////////////////////////////////////// crate enum BuildJob { BuildOne { cursor: Cursor, closure: Closure, pair: GroupPair, }, BuildSet { cursor: Cursor, closure: Closure, }, } //////////////////////////////////////////////////////////////////////////////////////////////// // ClosureInterner // Closure -> TransitionTable pub struct RuleBuilder { crate _optimizer: Rc<OptimizerAspect>, crate visitor: Rc<VisitorAspect>, crate output_model: ModelCell, crate options: Options, crate closure_builder: Rc<ClosureBuilder>, crate closure_interner: Rc<ClosureInterner>, crate dfa_builder: DFABuilder, crate grammar: Option<Rc<Grammar>>, crate reduce_ids: Vec<ReduceId>, crate closure_map: HashMap<Closure, Option<BlockRef>>, // IndexMap crate value_map: HashMap<NodeId, NodeId>, crate build_queue: VecDeque<BuildJob>, crate fail_block: Option<BlockRef>, crate num_built: usize, crate origin_matches: MatchMap, crate origin_stmts: Rc<RefCell<Vec<GroupPair>>>, crate rule_ty: Option<Link<mir::TypeFn>>, crate iter_locals: Vec<LValLink>, crate locals: Vec<Child<mir::RuleLocal>>, crate blocks: Vec<Child<mir::Block>>, } impl RuleBuilder { pub fn new(comp: &Compiler, options: Options) -> Self { let visitor = comp.aspect_mut::<VisitorAspect>(); let closure_builder = Rc::new(ClosureBuilder::new(visitor.clone())); let closure_interner = ClosureInterner::new(); let dfa_builder = DFABuilder::new( closure_builder.clone(), closure_interner.clone(), visitor.clone(), ); Self { _optimizer: comp.aspect_mut::<OptimizerAspect>(), visitor: visitor.clone(), output_model: comp.model_cell.clone(), options, closure_builder, closure_interner, dfa_builder, grammar: None, reduce_ids: Vec::new(), closure_map: HashMap::new(), value_map: HashMap::new(), build_queue: VecDeque::new(), fail_block: None, num_built: 0, origin_matches: MatchMap::default(), origin_stmts: <_>::default(), rule_ty: None, iter_locals: Vec::new(), locals: Vec::new(), blocks: Vec::new(), } } pub fn add_rules(&mut self, rules: &Vec<NodeId>) { let seed = { let model = self.visitor.input_model.borrow(); let first_rule = model.get::<_, mir::ItemRule>(rules[0]).unwrap(); self.rule_ty = Some(first_rule.rule_ty.clone()); model.iter(rules) .borrow_cast_nodes_to::<mir::ItemRule>() .filter_map(|r| r.blocks.first().cloned()) .map(|b| model.node(b).first_node()) .collect::<Vec<_>>() }; let mut seed = ClosureSeed::new(seed); seed.follow_calls = true; let _main_block = self.resolve_closure_seed(seed); let fail_seed = ClosureSeed::default(); let fail_block = self.resolve_closure_seed(fail_seed); self.fail_block = Some(fail_block); } pub fn add_value_map<A: ToNodeId, B: ToNodeId>(&mut self, from: A, to: B) { self.value_map.insert(from.to_top(), to.to_top()); } pub fn at_origin(&self) -> bool { self.num_built <= 2 } pub fn build( &mut self, grammar: Rc<Grammar>, rules: &Vec<NodeId>, reduce_ids: Vec<ReduceId>, ) -> (String, NodeId<mir::ItemRule>) { assert!(rules.len() >= 1); self.closure_builder.set_grammar(Some(grammar.clone())); self.grammar = Some(grammar); self.reduce_ids = reduce_ids; self.add_rules(rules); while let Some(job) = self.build_queue.pop_front() { match job { BuildJob::BuildOne { cursor, closure, pair } => { // println!("BUILD_ONE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_one(cursor, closure, pair); }, BuildJob::BuildSet { cursor, closure } => { // println!("BUILD_CLOSURE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_set(cursor, closure); self.num_built += 1; }, } } // self.dump_info(); self.closure_builder.set_grammar(None); self.closure_map.clear(); self.grammar = None; self.iter_locals.clear(); self.num_built = 0; self.origin_matches.clear(); self.origin_stmts.borrow_mut().clear(); self.value_map.clear(); let mut model = self.output_model.borrow_mut(); let node_id = model.new_node(); let name = format!("opt_{}", node_id.idx()); //println!("BUILD LOCALS {:?}", self.locals.len()); model.complete_node(node_id, mir::ItemRule { name: name.clone(), rule_ty: self.rule_ty.take().unwrap(), params: Vec::new(), locals: replace(&mut self.locals, Vec::new()), blocks: replace(&mut self.blocks, Vec::new()), }); drop(model); // Mark persistent blocks mark_persistent_recursive(&*self.visitor, node_id); (name, node_id) } pub fn build_one(&mut self, mut cursor: Cursor, closure: Closure, pair: GroupPair) { let (group_k, group_v) = pair; let succ_seed = self.successors(closure); let succ_closure = self.closure(succ_seed); group_k.build_mir_dyn(&*group_v, self, &mut cursor); match (cursor.is_completed(), succ_closure.is_empty()) { (true, true) => { }, (true, false) => { panic!("completed group with successors"); }, (false, true) => { self.fuse_block(cursor); }, (false, false) => { let job = BuildJob::BuildSet { cursor, closure: succ_closure }; self.build_queue.push_back(job); }, } } crate fn build_origin_stmts(&mut self, cursor: &mut Cursor) { let stmts = self.origin_stmts.clone(); let stmts = stmts.borrow(); for (k, v) in stmts.iter() { k.build_mir_dyn(&**v, self, cursor); } } pub fn build_set(&mut self, mut cursor: Cursor, closure: Closure) { if closure.is_empty() { cursor.build_ctl(mir::CtlFail { }); return; } let group_set = self.group(&closure); group_set.build_mir(self, cursor); } pub fn closure(&self, seed: ClosureSeed) -> Closure { self.closure_builder.build(seed) } pub fn dump_info(&self) { let mut map = HashMap::new(); for (closure, _) in &self.closure_map { let mut closure_ids = closure.entries.clone(); closure_ids.sort(); map.insert(closure_ids, ()); } println!("CLOSURES {:?} vs {:?}", map.len(), self.closure_map.len()); } pub fn fail_block(&self) -> BlockRef { self.fail_block.clone().unwrap() } pub fn fuse_block(&self, mut cur: Cursor) { if !cur.is_completed() { let fail_block = self.fail_block(); cur.build_ctl(mir::CtlBr { block: fail_block }); } } pub fn group(&mut self, closure: &Closure) -> GroupSet { let mut group_buider = GroupBuilder::new(); let visitor = self.visitor.clone(); for entry in &closure.entries { let node_id = *entry; let core = VisitCtxCore::<BuildGroups> { aspect: &*visitor, node_id, imp_args: self }; let _ = visitor.visit(core, &mut group_buider); } group_buider.complete() } pub fn intern_closure(&self, closure: Closure) -> ClosureId { self.closure_interner.intern(closure) } pub fn is_origin_pure(&self) -> bool { let incorporate_reductions = self.options.incorporate_reductions; let is_pure = self.origin_stmts.borrow().is_empty(); incorporate_reductions && is_pure } pub fn is_reduce_external(&self, reduce_id: ReduceId) -> bool { self.reduce_ids.contains(&reduce_id) } pub fn map_local(&mut self, src: NodeId<mir::RuleLocal>) -> NodeId<mir::RuleLocal> { let ty = { let model = self.visitor.input_model.borrow(); let node = model.node(src); node.ty.clone() // XXX: BAD: type needs to be deep copied/transfered }; let mut model_out = self.output_model.borrow_mut(); let local_out = model_out.build_node(mir::RuleLocal { ty, index: self.locals.len() }); self.locals.push(local_out.into()); local_out } pub fn map_local_iter(&mut self, src: &LValLink, depth: usize) -> LValLink { if let Some(local) = self.iter_locals.get(depth).cloned() { self.add_value_map(src, &local); return local; } if depth != self.iter_locals.len() { panic!("invalid depth"); } let local = self.map_lval(src); self.iter_locals.push(local.clone()); local } pub fn map_lval(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); if let Some(result) = self.value_map.get(&node_id) { return result.cast().into(); } let new_local = self.map_local(node_id.cast()); self.value_map.insert(node_id, new_local.to_top()); new_local.cast().into() } pub fn map_lval_lookup(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); match self.value_map.get(&node_id) { Some(result) => result.cast().into(), None => panic!("unknown value: {:?}", node_id), } } crate fn origin_matches(&self, reduce_id: ReduceId) -> ClosureSeed { let mut result = ClosureSeed::default(); if let Some(grammar) = &self.grammar { let match_map = &grammar.match_map; for (match_id, prec_map) in &self.origin_matches { let prec = match match_map.get(*match_id, reduce_id) { Some(prec) => prec, None => continue, }; if let Some(closure) = prec_map.get(&prec) { result.merge(closure); } } } result } crate fn queue_build_one(&mut self, cursor: Cursor, closure: Closure, pair: GroupPair) { let job = BuildJob::BuildOne { cursor, closure, pair }; self.build_queue.push_back(job); } crate fn resolve<F>(&mut self, closure: Closure, job_ctor: F) -> BlockRef where F: FnOnce(Cursor, Closure) -> BuildJob { match self.closure_map.entry(closure) { Occupied(occupied) => { occupied.get().clone().unwrap() } Vacant(vacant) => { let mut model = self.output_model.borrow_mut(); let block_id = model.new_node::<mir::Block>(); self.blocks.push(block_id.into()); let cursor = Cursor::new(self.output_model.clone(), block_id); let job = job_ctor(cursor, vacant.key().clone()); self.build_queue.push_back(job); vacant.insert(Some(block_id.into())); block_id.into() } } } crate fn resolve_closure(&mut self, closure: Closure) -> BlockRef { self.resolve(closure, |cursor, closure| { BuildJob::BuildSet { cursor, closure } }) } crate fn resolve_closure_seed(&mut self, seed: ClosureSeed) -> BlockRef { let closure = self.closure(seed); self.resolve_closure(closure) } crate fn
(&mut self, seed: ClosureSeed, pair: GroupPair) -> BlockRef { let closure = seed.into_closure(); self.resolve(closure, |cursor, closure| { BuildJob::BuildOne { cursor, closure, pair } }) } crate fn successors(&self, seed: Closure) -> ClosureSeed { let mut builder = SuccBuilder::new(seed.entries); builder.build(&*self.visitor); builder.complete() } } ////////////////////////////////////////////////////////////////////////////////////////////////
resolve_one
identifier_name
rule_builder.rs
use { crate::{ mir::{self, BlockRef, LValLink}, mir_ext::mark_persistent_recursive, optimizer::{ BuildGroups, ClosureBuilder, GroupBuilder, GroupDyn, GroupSet, MatchMap, OptimizerAspect, SuccBuilder, closure::{Closure, ClosureId, ClosureSeed}, closure_interner::{ClosureInterner}, dfa_builder::{DFABuilder}, }, runtime::{ Grammar, ReduceId, options::Options, }, }, lang_mir::Cursor, north_core::{ compiler::Compiler, iter::ModelIterator, model::{Child, Link, ModelCell}, node_id::{NodeId, ToNodeId}, structure::ForestExt, visitor::{VisitorAspect, VisitCtxCore}, }, std::{ any::Any, cell::RefCell, collections::{HashMap, VecDeque, hash_map::Entry::*}, mem::replace, rc::Rc, } }; //////////////////////////////////////////////////////////////////////////////////////////////// crate type GroupPair = (Box<dyn GroupDyn>, Box<dyn Any>); //////////////////////////////////////////////////////////////////////////////////////////////// crate enum BuildJob { BuildOne { cursor: Cursor, closure: Closure, pair: GroupPair, }, BuildSet { cursor: Cursor, closure: Closure, }, } //////////////////////////////////////////////////////////////////////////////////////////////// // ClosureInterner // Closure -> TransitionTable pub struct RuleBuilder { crate _optimizer: Rc<OptimizerAspect>, crate visitor: Rc<VisitorAspect>, crate output_model: ModelCell, crate options: Options, crate closure_builder: Rc<ClosureBuilder>, crate closure_interner: Rc<ClosureInterner>, crate dfa_builder: DFABuilder, crate grammar: Option<Rc<Grammar>>, crate reduce_ids: Vec<ReduceId>, crate closure_map: HashMap<Closure, Option<BlockRef>>, // IndexMap crate value_map: HashMap<NodeId, NodeId>, crate build_queue: VecDeque<BuildJob>, crate fail_block: Option<BlockRef>, crate num_built: usize, crate origin_matches: MatchMap, crate origin_stmts: Rc<RefCell<Vec<GroupPair>>>, crate rule_ty: Option<Link<mir::TypeFn>>, crate iter_locals: Vec<LValLink>, crate locals: Vec<Child<mir::RuleLocal>>, crate blocks: Vec<Child<mir::Block>>, } impl RuleBuilder { pub fn new(comp: &Compiler, options: Options) -> Self { let visitor = comp.aspect_mut::<VisitorAspect>(); let closure_builder = Rc::new(ClosureBuilder::new(visitor.clone())); let closure_interner = ClosureInterner::new(); let dfa_builder = DFABuilder::new( closure_builder.clone(), closure_interner.clone(), visitor.clone(), ); Self { _optimizer: comp.aspect_mut::<OptimizerAspect>(), visitor: visitor.clone(), output_model: comp.model_cell.clone(), options, closure_builder, closure_interner, dfa_builder, grammar: None, reduce_ids: Vec::new(), closure_map: HashMap::new(), value_map: HashMap::new(), build_queue: VecDeque::new(), fail_block: None, num_built: 0, origin_matches: MatchMap::default(), origin_stmts: <_>::default(), rule_ty: None, iter_locals: Vec::new(), locals: Vec::new(), blocks: Vec::new(), } } pub fn add_rules(&mut self, rules: &Vec<NodeId>)
pub fn add_value_map<A: ToNodeId, B: ToNodeId>(&mut self, from: A, to: B) { self.value_map.insert(from.to_top(), to.to_top()); } pub fn at_origin(&self) -> bool { self.num_built <= 2 } pub fn build( &mut self, grammar: Rc<Grammar>, rules: &Vec<NodeId>, reduce_ids: Vec<ReduceId>, ) -> (String, NodeId<mir::ItemRule>) { assert!(rules.len() >= 1); self.closure_builder.set_grammar(Some(grammar.clone())); self.grammar = Some(grammar); self.reduce_ids = reduce_ids; self.add_rules(rules); while let Some(job) = self.build_queue.pop_front() { match job { BuildJob::BuildOne { cursor, closure, pair } => { // println!("BUILD_ONE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_one(cursor, closure, pair); }, BuildJob::BuildSet { cursor, closure } => { // println!("BUILD_CLOSURE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_set(cursor, closure); self.num_built += 1; }, } } // self.dump_info(); self.closure_builder.set_grammar(None); self.closure_map.clear(); self.grammar = None; self.iter_locals.clear(); self.num_built = 0; self.origin_matches.clear(); self.origin_stmts.borrow_mut().clear(); self.value_map.clear(); let mut model = self.output_model.borrow_mut(); let node_id = model.new_node(); let name = format!("opt_{}", node_id.idx()); //println!("BUILD LOCALS {:?}", self.locals.len()); model.complete_node(node_id, mir::ItemRule { name: name.clone(), rule_ty: self.rule_ty.take().unwrap(), params: Vec::new(), locals: replace(&mut self.locals, Vec::new()), blocks: replace(&mut self.blocks, Vec::new()), }); drop(model); // Mark persistent blocks mark_persistent_recursive(&*self.visitor, node_id); (name, node_id) } pub fn build_one(&mut self, mut cursor: Cursor, closure: Closure, pair: GroupPair) { let (group_k, group_v) = pair; let succ_seed = self.successors(closure); let succ_closure = self.closure(succ_seed); group_k.build_mir_dyn(&*group_v, self, &mut cursor); match (cursor.is_completed(), succ_closure.is_empty()) { (true, true) => { }, (true, false) => { panic!("completed group with successors"); }, (false, true) => { self.fuse_block(cursor); }, (false, false) => { let job = BuildJob::BuildSet { cursor, closure: succ_closure }; self.build_queue.push_back(job); }, } } crate fn build_origin_stmts(&mut self, cursor: &mut Cursor) { let stmts = self.origin_stmts.clone(); let stmts = stmts.borrow(); for (k, v) in stmts.iter() { k.build_mir_dyn(&**v, self, cursor); } } pub fn build_set(&mut self, mut cursor: Cursor, closure: Closure) { if closure.is_empty() { cursor.build_ctl(mir::CtlFail { }); return; } let group_set = self.group(&closure); group_set.build_mir(self, cursor); } pub fn closure(&self, seed: ClosureSeed) -> Closure { self.closure_builder.build(seed) } pub fn dump_info(&self) { let mut map = HashMap::new(); for (closure, _) in &self.closure_map { let mut closure_ids = closure.entries.clone(); closure_ids.sort(); map.insert(closure_ids, ()); } println!("CLOSURES {:?} vs {:?}", map.len(), self.closure_map.len()); } pub fn fail_block(&self) -> BlockRef { self.fail_block.clone().unwrap() } pub fn fuse_block(&self, mut cur: Cursor) { if !cur.is_completed() { let fail_block = self.fail_block(); cur.build_ctl(mir::CtlBr { block: fail_block }); } } pub fn group(&mut self, closure: &Closure) -> GroupSet { let mut group_buider = GroupBuilder::new(); let visitor = self.visitor.clone(); for entry in &closure.entries { let node_id = *entry; let core = VisitCtxCore::<BuildGroups> { aspect: &*visitor, node_id, imp_args: self }; let _ = visitor.visit(core, &mut group_buider); } group_buider.complete() } pub fn intern_closure(&self, closure: Closure) -> ClosureId { self.closure_interner.intern(closure) } pub fn is_origin_pure(&self) -> bool { let incorporate_reductions = self.options.incorporate_reductions; let is_pure = self.origin_stmts.borrow().is_empty(); incorporate_reductions && is_pure } pub fn is_reduce_external(&self, reduce_id: ReduceId) -> bool { self.reduce_ids.contains(&reduce_id) } pub fn map_local(&mut self, src: NodeId<mir::RuleLocal>) -> NodeId<mir::RuleLocal> { let ty = { let model = self.visitor.input_model.borrow(); let node = model.node(src); node.ty.clone() // XXX: BAD: type needs to be deep copied/transfered }; let mut model_out = self.output_model.borrow_mut(); let local_out = model_out.build_node(mir::RuleLocal { ty, index: self.locals.len() }); self.locals.push(local_out.into()); local_out } pub fn map_local_iter(&mut self, src: &LValLink, depth: usize) -> LValLink { if let Some(local) = self.iter_locals.get(depth).cloned() { self.add_value_map(src, &local); return local; } if depth != self.iter_locals.len() { panic!("invalid depth"); } let local = self.map_lval(src); self.iter_locals.push(local.clone()); local } pub fn map_lval(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); if let Some(result) = self.value_map.get(&node_id) { return result.cast().into(); } let new_local = self.map_local(node_id.cast()); self.value_map.insert(node_id, new_local.to_top()); new_local.cast().into() } pub fn map_lval_lookup(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); match self.value_map.get(&node_id) { Some(result) => result.cast().into(), None => panic!("unknown value: {:?}", node_id), } } crate fn origin_matches(&self, reduce_id: ReduceId) -> ClosureSeed { let mut result = ClosureSeed::default(); if let Some(grammar) = &self.grammar { let match_map = &grammar.match_map; for (match_id, prec_map) in &self.origin_matches { let prec = match match_map.get(*match_id, reduce_id) { Some(prec) => prec, None => continue, }; if let Some(closure) = prec_map.get(&prec) { result.merge(closure); } } } result } crate fn queue_build_one(&mut self, cursor: Cursor, closure: Closure, pair: GroupPair) { let job = BuildJob::BuildOne { cursor, closure, pair }; self.build_queue.push_back(job); } crate fn resolve<F>(&mut self, closure: Closure, job_ctor: F) -> BlockRef where F: FnOnce(Cursor, Closure) -> BuildJob { match self.closure_map.entry(closure) { Occupied(occupied) => { occupied.get().clone().unwrap() } Vacant(vacant) => { let mut model = self.output_model.borrow_mut(); let block_id = model.new_node::<mir::Block>(); self.blocks.push(block_id.into()); let cursor = Cursor::new(self.output_model.clone(), block_id); let job = job_ctor(cursor, vacant.key().clone()); self.build_queue.push_back(job); vacant.insert(Some(block_id.into())); block_id.into() } } } crate fn resolve_closure(&mut self, closure: Closure) -> BlockRef { self.resolve(closure, |cursor, closure| { BuildJob::BuildSet { cursor, closure } }) } crate fn resolve_closure_seed(&mut self, seed: ClosureSeed) -> BlockRef { let closure = self.closure(seed); self.resolve_closure(closure) } crate fn resolve_one(&mut self, seed: ClosureSeed, pair: GroupPair) -> BlockRef { let closure = seed.into_closure(); self.resolve(closure, |cursor, closure| { BuildJob::BuildOne { cursor, closure, pair } }) } crate fn successors(&self, seed: Closure) -> ClosureSeed { let mut builder = SuccBuilder::new(seed.entries); builder.build(&*self.visitor); builder.complete() } } ////////////////////////////////////////////////////////////////////////////////////////////////
{ let seed = { let model = self.visitor.input_model.borrow(); let first_rule = model.get::<_, mir::ItemRule>(rules[0]).unwrap(); self.rule_ty = Some(first_rule.rule_ty.clone()); model.iter(rules) .borrow_cast_nodes_to::<mir::ItemRule>() .filter_map(|r| r.blocks.first().cloned()) .map(|b| model.node(b).first_node()) .collect::<Vec<_>>() }; let mut seed = ClosureSeed::new(seed); seed.follow_calls = true; let _main_block = self.resolve_closure_seed(seed); let fail_seed = ClosureSeed::default(); let fail_block = self.resolve_closure_seed(fail_seed); self.fail_block = Some(fail_block); }
identifier_body
rule_builder.rs
use { crate::{ mir::{self, BlockRef, LValLink}, mir_ext::mark_persistent_recursive, optimizer::{ BuildGroups, ClosureBuilder, GroupBuilder, GroupDyn, GroupSet, MatchMap, OptimizerAspect, SuccBuilder, closure::{Closure, ClosureId, ClosureSeed}, closure_interner::{ClosureInterner}, dfa_builder::{DFABuilder}, }, runtime::{ Grammar, ReduceId, options::Options, }, }, lang_mir::Cursor, north_core::{ compiler::Compiler, iter::ModelIterator, model::{Child, Link, ModelCell}, node_id::{NodeId, ToNodeId}, structure::ForestExt, visitor::{VisitorAspect, VisitCtxCore}, }, std::{ any::Any, cell::RefCell, collections::{HashMap, VecDeque, hash_map::Entry::*}, mem::replace, rc::Rc, } }; //////////////////////////////////////////////////////////////////////////////////////////////// crate type GroupPair = (Box<dyn GroupDyn>, Box<dyn Any>); //////////////////////////////////////////////////////////////////////////////////////////////// crate enum BuildJob { BuildOne { cursor: Cursor, closure: Closure, pair: GroupPair, }, BuildSet { cursor: Cursor, closure: Closure, }, } //////////////////////////////////////////////////////////////////////////////////////////////// // ClosureInterner // Closure -> TransitionTable pub struct RuleBuilder { crate _optimizer: Rc<OptimizerAspect>, crate visitor: Rc<VisitorAspect>, crate output_model: ModelCell, crate options: Options, crate closure_builder: Rc<ClosureBuilder>, crate closure_interner: Rc<ClosureInterner>, crate dfa_builder: DFABuilder, crate grammar: Option<Rc<Grammar>>, crate reduce_ids: Vec<ReduceId>, crate closure_map: HashMap<Closure, Option<BlockRef>>, // IndexMap crate value_map: HashMap<NodeId, NodeId>, crate build_queue: VecDeque<BuildJob>, crate fail_block: Option<BlockRef>, crate num_built: usize, crate origin_matches: MatchMap,
crate rule_ty: Option<Link<mir::TypeFn>>, crate iter_locals: Vec<LValLink>, crate locals: Vec<Child<mir::RuleLocal>>, crate blocks: Vec<Child<mir::Block>>, } impl RuleBuilder { pub fn new(comp: &Compiler, options: Options) -> Self { let visitor = comp.aspect_mut::<VisitorAspect>(); let closure_builder = Rc::new(ClosureBuilder::new(visitor.clone())); let closure_interner = ClosureInterner::new(); let dfa_builder = DFABuilder::new( closure_builder.clone(), closure_interner.clone(), visitor.clone(), ); Self { _optimizer: comp.aspect_mut::<OptimizerAspect>(), visitor: visitor.clone(), output_model: comp.model_cell.clone(), options, closure_builder, closure_interner, dfa_builder, grammar: None, reduce_ids: Vec::new(), closure_map: HashMap::new(), value_map: HashMap::new(), build_queue: VecDeque::new(), fail_block: None, num_built: 0, origin_matches: MatchMap::default(), origin_stmts: <_>::default(), rule_ty: None, iter_locals: Vec::new(), locals: Vec::new(), blocks: Vec::new(), } } pub fn add_rules(&mut self, rules: &Vec<NodeId>) { let seed = { let model = self.visitor.input_model.borrow(); let first_rule = model.get::<_, mir::ItemRule>(rules[0]).unwrap(); self.rule_ty = Some(first_rule.rule_ty.clone()); model.iter(rules) .borrow_cast_nodes_to::<mir::ItemRule>() .filter_map(|r| r.blocks.first().cloned()) .map(|b| model.node(b).first_node()) .collect::<Vec<_>>() }; let mut seed = ClosureSeed::new(seed); seed.follow_calls = true; let _main_block = self.resolve_closure_seed(seed); let fail_seed = ClosureSeed::default(); let fail_block = self.resolve_closure_seed(fail_seed); self.fail_block = Some(fail_block); } pub fn add_value_map<A: ToNodeId, B: ToNodeId>(&mut self, from: A, to: B) { self.value_map.insert(from.to_top(), to.to_top()); } pub fn at_origin(&self) -> bool { self.num_built <= 2 } pub fn build( &mut self, grammar: Rc<Grammar>, rules: &Vec<NodeId>, reduce_ids: Vec<ReduceId>, ) -> (String, NodeId<mir::ItemRule>) { assert!(rules.len() >= 1); self.closure_builder.set_grammar(Some(grammar.clone())); self.grammar = Some(grammar); self.reduce_ids = reduce_ids; self.add_rules(rules); while let Some(job) = self.build_queue.pop_front() { match job { BuildJob::BuildOne { cursor, closure, pair } => { // println!("BUILD_ONE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_one(cursor, closure, pair); }, BuildJob::BuildSet { cursor, closure } => { // println!("BUILD_CLOSURE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_set(cursor, closure); self.num_built += 1; }, } } // self.dump_info(); self.closure_builder.set_grammar(None); self.closure_map.clear(); self.grammar = None; self.iter_locals.clear(); self.num_built = 0; self.origin_matches.clear(); self.origin_stmts.borrow_mut().clear(); self.value_map.clear(); let mut model = self.output_model.borrow_mut(); let node_id = model.new_node(); let name = format!("opt_{}", node_id.idx()); //println!("BUILD LOCALS {:?}", self.locals.len()); model.complete_node(node_id, mir::ItemRule { name: name.clone(), rule_ty: self.rule_ty.take().unwrap(), params: Vec::new(), locals: replace(&mut self.locals, Vec::new()), blocks: replace(&mut self.blocks, Vec::new()), }); drop(model); // Mark persistent blocks mark_persistent_recursive(&*self.visitor, node_id); (name, node_id) } pub fn build_one(&mut self, mut cursor: Cursor, closure: Closure, pair: GroupPair) { let (group_k, group_v) = pair; let succ_seed = self.successors(closure); let succ_closure = self.closure(succ_seed); group_k.build_mir_dyn(&*group_v, self, &mut cursor); match (cursor.is_completed(), succ_closure.is_empty()) { (true, true) => { }, (true, false) => { panic!("completed group with successors"); }, (false, true) => { self.fuse_block(cursor); }, (false, false) => { let job = BuildJob::BuildSet { cursor, closure: succ_closure }; self.build_queue.push_back(job); }, } } crate fn build_origin_stmts(&mut self, cursor: &mut Cursor) { let stmts = self.origin_stmts.clone(); let stmts = stmts.borrow(); for (k, v) in stmts.iter() { k.build_mir_dyn(&**v, self, cursor); } } pub fn build_set(&mut self, mut cursor: Cursor, closure: Closure) { if closure.is_empty() { cursor.build_ctl(mir::CtlFail { }); return; } let group_set = self.group(&closure); group_set.build_mir(self, cursor); } pub fn closure(&self, seed: ClosureSeed) -> Closure { self.closure_builder.build(seed) } pub fn dump_info(&self) { let mut map = HashMap::new(); for (closure, _) in &self.closure_map { let mut closure_ids = closure.entries.clone(); closure_ids.sort(); map.insert(closure_ids, ()); } println!("CLOSURES {:?} vs {:?}", map.len(), self.closure_map.len()); } pub fn fail_block(&self) -> BlockRef { self.fail_block.clone().unwrap() } pub fn fuse_block(&self, mut cur: Cursor) { if !cur.is_completed() { let fail_block = self.fail_block(); cur.build_ctl(mir::CtlBr { block: fail_block }); } } pub fn group(&mut self, closure: &Closure) -> GroupSet { let mut group_buider = GroupBuilder::new(); let visitor = self.visitor.clone(); for entry in &closure.entries { let node_id = *entry; let core = VisitCtxCore::<BuildGroups> { aspect: &*visitor, node_id, imp_args: self }; let _ = visitor.visit(core, &mut group_buider); } group_buider.complete() } pub fn intern_closure(&self, closure: Closure) -> ClosureId { self.closure_interner.intern(closure) } pub fn is_origin_pure(&self) -> bool { let incorporate_reductions = self.options.incorporate_reductions; let is_pure = self.origin_stmts.borrow().is_empty(); incorporate_reductions && is_pure } pub fn is_reduce_external(&self, reduce_id: ReduceId) -> bool { self.reduce_ids.contains(&reduce_id) } pub fn map_local(&mut self, src: NodeId<mir::RuleLocal>) -> NodeId<mir::RuleLocal> { let ty = { let model = self.visitor.input_model.borrow(); let node = model.node(src); node.ty.clone() // XXX: BAD: type needs to be deep copied/transfered }; let mut model_out = self.output_model.borrow_mut(); let local_out = model_out.build_node(mir::RuleLocal { ty, index: self.locals.len() }); self.locals.push(local_out.into()); local_out } pub fn map_local_iter(&mut self, src: &LValLink, depth: usize) -> LValLink { if let Some(local) = self.iter_locals.get(depth).cloned() { self.add_value_map(src, &local); return local; } if depth != self.iter_locals.len() { panic!("invalid depth"); } let local = self.map_lval(src); self.iter_locals.push(local.clone()); local } pub fn map_lval(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); if let Some(result) = self.value_map.get(&node_id) { return result.cast().into(); } let new_local = self.map_local(node_id.cast()); self.value_map.insert(node_id, new_local.to_top()); new_local.cast().into() } pub fn map_lval_lookup(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); match self.value_map.get(&node_id) { Some(result) => result.cast().into(), None => panic!("unknown value: {:?}", node_id), } } crate fn origin_matches(&self, reduce_id: ReduceId) -> ClosureSeed { let mut result = ClosureSeed::default(); if let Some(grammar) = &self.grammar { let match_map = &grammar.match_map; for (match_id, prec_map) in &self.origin_matches { let prec = match match_map.get(*match_id, reduce_id) { Some(prec) => prec, None => continue, }; if let Some(closure) = prec_map.get(&prec) { result.merge(closure); } } } result } crate fn queue_build_one(&mut self, cursor: Cursor, closure: Closure, pair: GroupPair) { let job = BuildJob::BuildOne { cursor, closure, pair }; self.build_queue.push_back(job); } crate fn resolve<F>(&mut self, closure: Closure, job_ctor: F) -> BlockRef where F: FnOnce(Cursor, Closure) -> BuildJob { match self.closure_map.entry(closure) { Occupied(occupied) => { occupied.get().clone().unwrap() } Vacant(vacant) => { let mut model = self.output_model.borrow_mut(); let block_id = model.new_node::<mir::Block>(); self.blocks.push(block_id.into()); let cursor = Cursor::new(self.output_model.clone(), block_id); let job = job_ctor(cursor, vacant.key().clone()); self.build_queue.push_back(job); vacant.insert(Some(block_id.into())); block_id.into() } } } crate fn resolve_closure(&mut self, closure: Closure) -> BlockRef { self.resolve(closure, |cursor, closure| { BuildJob::BuildSet { cursor, closure } }) } crate fn resolve_closure_seed(&mut self, seed: ClosureSeed) -> BlockRef { let closure = self.closure(seed); self.resolve_closure(closure) } crate fn resolve_one(&mut self, seed: ClosureSeed, pair: GroupPair) -> BlockRef { let closure = seed.into_closure(); self.resolve(closure, |cursor, closure| { BuildJob::BuildOne { cursor, closure, pair } }) } crate fn successors(&self, seed: Closure) -> ClosureSeed { let mut builder = SuccBuilder::new(seed.entries); builder.build(&*self.visitor); builder.complete() } } ////////////////////////////////////////////////////////////////////////////////////////////////
crate origin_stmts: Rc<RefCell<Vec<GroupPair>>>,
random_line_split
genetic.go
// Code for genetic algorithms package genetic import ( "flag" "fmt" "log" "math/rand" "os" "os/exec" "path" "sort" "strconv" "strings" "sync" "time" "github.com/kevinburke/rct/Godeps/_workspace/src/github.com/pborman/uuid" "github.com/kevinburke/rct/tracks" ) var directory *string func init() { directory = flag.String("directory", "/usr/local/rct", "Path to the folder storing RCT experiment data") } // constants which may be altered to affect the ride runtime const PARENTS = 2 const FIT_PERCENTAGE = 0.2 const MUTATION_RATE = 0.05 // crossover with a probability of 0.6 (taken from the book & De Jong 1975) const CROSSOVER_PROBABILITY = 0.6 const POOL_SIZE = 500 const ITERATIONS = 1000 const PRINT_RESULTS_EVERY = 5 type ExperimentMetadata struct { Hash string Date time.Time Notes string Runtime time.Duration PoolSize int16 Iterations int32 CrossoverProbability float32 MutationRate float32 } func Run(packageRoot string) error { if directory == nil { return fmt.Errorf("invalid directory - need to specify it") } expDir := path.Join(*directory, "experiments") err := os.MkdirAll(expDir, 0755) if err != nil { return err } id := fmt.Sprintf("exp_%s", uuid.New()) expIdDir := path.Join(expDir, id) err = os.MkdirAll(expIdDir, 0755) if err != nil { return err } iterationsDir := path.Join(expIdDir, "iterations") err = os.MkdirAll(iterationsDir, 0755) if err != nil { return err } cmd := exec.Command("git", "rev-parse", "HEAD") cmd.Dir = packageRoot hashb, err := cmd.Output() if err != nil { return err } mtd := ExperimentMetadata{ Hash: strings.TrimSpace(string(hashb)), Date: time.Now().UTC(), Notes: "(none)", // XXX CrossoverProbability: CROSSOVER_PROBABILITY, PoolSize: POOL_SIZE, MutationRate: MUTATION_RATE, Iterations: ITERATIONS, } metadataPath := path.Join(expIdDir, "meta.json") err = encode(metadataPath, mtd) if err != nil { return err } fmt.Printf("Experiment %s\n======================================\n", id) pool := SeedPool(POOL_SIZE) pool.Id = id for i := 0; i < ITERATIONS; i++ { pool = pool.Crossover() pool.Mutate(MUTATION_RATE) pool.Evaluate() iterationDir := path.Join(iterationsDir, strconv.Itoa(i)) err = os.MkdirAll(iterationDir, 0755) if err != nil { return err } pool.Statistics(i, *directory) } return nil } type Pool struct { Id string Members []*Member } type Member struct { Id string Score int64 // Advantage or disadvantage in reproducing Fitness float64 Runtime time.Duration Track []tracks.Element ScoreData scoreData } type scoresArray [500]int64 func (a *scoresArray) Len() int { return len(a) } func (a *scoresArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } func (a *scoresArray) Less(i, j int) bool { return a[i] < a[j] } func (p *Pool) Statistics(iteration int, outputDirectory string) { var scores scoresArray var highestScore int64 = -1 var worstScore int64 = 1000 * 1000 * 1000 bestMember := new(Member) worstMember := new(Member) for i := 0; i < len(p.Members); i++ { scores[i] = p.Members[i].Score if p.Members[i].Score > highestScore { highestScore = p.Members[i].Score bestMember = p.Members[i] } if p.Members[i].Score < worstScore { worstScore = p.Members[i].Score worstMember = p.Members[i] } } if iteration%PRINT_RESULTS_EVERY == 0 { middle := len(scores) / 2 median := (scores[middle] + scores[middle-1]) / 2 sort.Sort(&scores) bestScorer := fmt.Sprintf("\t(length: %d, collisions: %d, to completion: %d, negative speed points: %d)\n\n", bestMember.ScoreData.Length, bestMember.ScoreData.Collisions, bestMember.ScoreData.Distance, bestMember.ScoreData.NegativeSpeed) fmt.Printf("Iteration %d: %d members, best member %s has score %d, "+ "median %d, worst has score %d\n%s", iteration, len(p.Members), bestMember.Id, bestMember.Score, median, worstScore, bestScorer) if os.Getenv("DEBUG_BEST_TRACK") == "true" { if iteration%20 == 0 && iteration > 0 { for _, elem := range bestMember.Track { fmt.Printf("%s %t\n", elem.Segment.String(), elem.ChainLift) } fmt.Println("==================") } } } pth := path.Join(outputDirectory, "experiments", p.Id, "iterations", strconv.Itoa(iteration), fmt.Sprintf("%s.json", bestMember.Id)) err := encode(pth, bestMember) if err != nil { log.Print(err) } pth = path.Join(outputDirectory, "experiments", p.Id, "iterations", strconv.Itoa(iteration), fmt.Sprintf("%s.json", worstMember.Id)) err = encode(pth, worstMember) if err != nil { log.Print(err) } } // Create an initial pool func SeedPool(size int) *Pool { // 1. Create a station of length 10 // 2. For the start station piece, generate a list of possible pieces. // 3. Choose one at random. Advance a pointer one forward. // 4. Repeat for 50 pieces (Woodchip is length 108. Mischief is 123) members := make([]*Member, POOL_SIZE) for i := 0; i < POOL_SIZE; i++ { track := CreateStation() for j := STATION_LENGTH - 1; j < INITIAL_TRACK_LENGTH-STATION_LENGTH; j++ { poss := track[j].Possibilities() track = append(track, poss[rand.Intn(len(poss))]) } score, d := GetScore(track) members[i] = &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: track, Score: score, ScoreData: d, } //if i%100 == 0 { //for _, elem := range track { //fmt.Println(elem.Segment.String()) //} //fmt.Printf("========\n") //} } return &Pool{Members: members} } func (p *Pool) Mutate(rate float64) { // for each ride: // for each position in the ride: // add in a possibility of mutation - addition // if addition: // find a piece that is compatible with both ends. // if no piece: // advance to the next track piece and try again } // Assign scores for every member of the pool func (p *Pool) Evaluate() { var wg sync.WaitGroup for i := 0; i < POOL_SIZE; i++ { wg.Add(1) go func(i int, track []tracks.Element) { p.Members[i].Score, p.Members[i].ScoreData = GetScore(track) wg.Done() }(i, p.Members[i].Track) } wg.Wait() // Assign fitness for every member. In the future, consider a smarter // algorithm higher score members a better chance of reproducing. for i := 0; i < POOL_SIZE; i++ { p.Members[i].Fitness = float64(p.Members[i].Score) } } // Select chooses a member of the population at random func (p *Pool) Select() (int, *Member) { // Stupid dumb version of this taken from here: // http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python // If it's a bottleneck, rewrite it. var weightedTotal float64 = 0 totals := make([]float64, len(p.Members)) for i := 0; i < len(p.Members); i++ { weightedTotal += max(p.Members[i].Fitness, 0) totals[i] = weightedTotal } rnd := rand.Float64() * weightedTotal for index, element := range totals { if rnd < element { return index, p.Members[index] } } return -1, &Member{} } func min(a int, b int) int { if a < b { return a } return b } func max(a float64, b float64) float64 { if a > b { return a } return b } // crossPoint1 = splice point in parent1. func crossoverAtPoint(parent1 *Member, parent2 *Member, crossPoint1 int) (*Member, *Member) { // crossPoint2 = splice point in parent2. crossPoint2 := crossPoint1 + 1 foundMatch := false for { // say cross point 1 = 3, track length = 6 // parent 1 == 0-3, parent 2 = 4-5 if tracks.Compatible(parent1.Track[crossPoint1], parent2.Track[crossPoint2]) { // Increment so we splice *after* the cross point in parent 1 crossPoint1++ foundMatch = true break } crossPoint1++ if crossPoint1 >= len(parent1.Track) { break } if tracks.Compatible(parent1.Track[crossPoint1], parent2.Track[crossPoint2]) { foundMatch = true // Increment so we splice *after* the cross point in parent 1 crossPoint1++ break } crossPoint2++ if crossPoint2 >= len(parent2.Track) { break } } // swap the track pieces at the chosen point on track A and track B if foundMatch { c1, c2 := Swap(parent1, parent2, crossPoint1, crossPoint2) if os.Getenv("DEBUG_SWAPS") == "true" && crossPoint1 > 4 { fmt.Println("swapped at", crossPoint1, crossPoint2) fmt.Println("parent1") printTrack(parent1.Track) fmt.Println("parent2") printTrack(parent2.Track) fmt.Println("child1") printTrack(c1.Track) fmt.Println("child2") printTrack(c2.Track) } return c1, c2 } return parent1, parent2 } // crossoverOne crosses over the parent tracks. if point is -1, a random point // is chosen. (point is used for testing) func crossoverOne(parent1 *Member, parent2 *Member, point int) (*Member, *Member) { // choose a random point between the beginning and the end if point == -1 { minval := min(len(parent1.Track)-1, len(parent2.Track)-1) point = rand.Intn(minval) } return crossoverAtPoint(parent1, parent2, point) } func printTrack(t []tracks.Element) { for i, elem := range t { fmt.Printf("%d %s\n", i, elem.Segment.String()) } } // Crossover chooses two members of a pool and joins them at random. func (p *Pool)
() *Pool { halfLen := len(p.Members) / 2 for i := 0; i < halfLen; i++ { // select 2 parents at random idx1, parent1 := p.Select() idx2, parent2 := p.Select() if idx1 == -1 || idx2 == -1 { continue } if idx1 == idx2 { // No point in crossing over with ourself continue } if rand.Float64() < CROSSOVER_PROBABILITY { // XXX delete parents child1, child2 := crossoverOne(parent1, parent2, -1) p.Members[idx1] = child1 p.Members[idx2] = child2 } } return p } // Swap creates two children out of the parents, by crossing over the tracks at // the given cross points. The sum of the two track lengths may be the same, // but the tracks themselves will change. func Swap(parent1 *Member, parent2 *Member, crossPoint1 int, crossPoint2 int) (*Member, *Member) { child1len := crossPoint1 + (len(parent2.Track) - crossPoint2) child2len := crossPoint2 + (len(parent1.Track) - crossPoint1) child1track := make([]tracks.Element, child1len) child2track := make([]tracks.Element, child2len) copy(child1track[:crossPoint1], parent1.Track[:crossPoint1]) // if we filled in to child1 from [crossPoint2:] we might have gaps in the // track. fill from the end of cross point 1, with the contents of cross // point 2. copy(child1track[crossPoint1:], parent2.Track[crossPoint2:]) copy(child2track[:crossPoint2], parent2.Track[:crossPoint2]) copy(child2track[crossPoint2:], parent1.Track[crossPoint1:]) child1 := &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: child1track, } child2 := &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: child2track, } return child1, child2 } func (p *Pool) Spawn(numParents int) *Pool { return nil }
Crossover
identifier_name
genetic.go
// Code for genetic algorithms package genetic import ( "flag" "fmt" "log" "math/rand" "os" "os/exec" "path" "sort" "strconv" "strings" "sync" "time" "github.com/kevinburke/rct/Godeps/_workspace/src/github.com/pborman/uuid" "github.com/kevinburke/rct/tracks" ) var directory *string func init() { directory = flag.String("directory", "/usr/local/rct", "Path to the folder storing RCT experiment data") } // constants which may be altered to affect the ride runtime const PARENTS = 2 const FIT_PERCENTAGE = 0.2 const MUTATION_RATE = 0.05 // crossover with a probability of 0.6 (taken from the book & De Jong 1975) const CROSSOVER_PROBABILITY = 0.6 const POOL_SIZE = 500 const ITERATIONS = 1000 const PRINT_RESULTS_EVERY = 5 type ExperimentMetadata struct { Hash string Date time.Time Notes string Runtime time.Duration PoolSize int16 Iterations int32 CrossoverProbability float32 MutationRate float32 } func Run(packageRoot string) error { if directory == nil { return fmt.Errorf("invalid directory - need to specify it") } expDir := path.Join(*directory, "experiments") err := os.MkdirAll(expDir, 0755) if err != nil { return err } id := fmt.Sprintf("exp_%s", uuid.New()) expIdDir := path.Join(expDir, id) err = os.MkdirAll(expIdDir, 0755) if err != nil { return err
} iterationsDir := path.Join(expIdDir, "iterations") err = os.MkdirAll(iterationsDir, 0755) if err != nil { return err } cmd := exec.Command("git", "rev-parse", "HEAD") cmd.Dir = packageRoot hashb, err := cmd.Output() if err != nil { return err } mtd := ExperimentMetadata{ Hash: strings.TrimSpace(string(hashb)), Date: time.Now().UTC(), Notes: "(none)", // XXX CrossoverProbability: CROSSOVER_PROBABILITY, PoolSize: POOL_SIZE, MutationRate: MUTATION_RATE, Iterations: ITERATIONS, } metadataPath := path.Join(expIdDir, "meta.json") err = encode(metadataPath, mtd) if err != nil { return err } fmt.Printf("Experiment %s\n======================================\n", id) pool := SeedPool(POOL_SIZE) pool.Id = id for i := 0; i < ITERATIONS; i++ { pool = pool.Crossover() pool.Mutate(MUTATION_RATE) pool.Evaluate() iterationDir := path.Join(iterationsDir, strconv.Itoa(i)) err = os.MkdirAll(iterationDir, 0755) if err != nil { return err } pool.Statistics(i, *directory) } return nil } type Pool struct { Id string Members []*Member } type Member struct { Id string Score int64 // Advantage or disadvantage in reproducing Fitness float64 Runtime time.Duration Track []tracks.Element ScoreData scoreData } type scoresArray [500]int64 func (a *scoresArray) Len() int { return len(a) } func (a *scoresArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } func (a *scoresArray) Less(i, j int) bool { return a[i] < a[j] } func (p *Pool) Statistics(iteration int, outputDirectory string) { var scores scoresArray var highestScore int64 = -1 var worstScore int64 = 1000 * 1000 * 1000 bestMember := new(Member) worstMember := new(Member) for i := 0; i < len(p.Members); i++ { scores[i] = p.Members[i].Score if p.Members[i].Score > highestScore { highestScore = p.Members[i].Score bestMember = p.Members[i] } if p.Members[i].Score < worstScore { worstScore = p.Members[i].Score worstMember = p.Members[i] } } if iteration%PRINT_RESULTS_EVERY == 0 { middle := len(scores) / 2 median := (scores[middle] + scores[middle-1]) / 2 sort.Sort(&scores) bestScorer := fmt.Sprintf("\t(length: %d, collisions: %d, to completion: %d, negative speed points: %d)\n\n", bestMember.ScoreData.Length, bestMember.ScoreData.Collisions, bestMember.ScoreData.Distance, bestMember.ScoreData.NegativeSpeed) fmt.Printf("Iteration %d: %d members, best member %s has score %d, "+ "median %d, worst has score %d\n%s", iteration, len(p.Members), bestMember.Id, bestMember.Score, median, worstScore, bestScorer) if os.Getenv("DEBUG_BEST_TRACK") == "true" { if iteration%20 == 0 && iteration > 0 { for _, elem := range bestMember.Track { fmt.Printf("%s %t\n", elem.Segment.String(), elem.ChainLift) } fmt.Println("==================") } } } pth := path.Join(outputDirectory, "experiments", p.Id, "iterations", strconv.Itoa(iteration), fmt.Sprintf("%s.json", bestMember.Id)) err := encode(pth, bestMember) if err != nil { log.Print(err) } pth = path.Join(outputDirectory, "experiments", p.Id, "iterations", strconv.Itoa(iteration), fmt.Sprintf("%s.json", worstMember.Id)) err = encode(pth, worstMember) if err != nil { log.Print(err) } } // Create an initial pool func SeedPool(size int) *Pool { // 1. Create a station of length 10 // 2. For the start station piece, generate a list of possible pieces. // 3. Choose one at random. Advance a pointer one forward. // 4. Repeat for 50 pieces (Woodchip is length 108. Mischief is 123) members := make([]*Member, POOL_SIZE) for i := 0; i < POOL_SIZE; i++ { track := CreateStation() for j := STATION_LENGTH - 1; j < INITIAL_TRACK_LENGTH-STATION_LENGTH; j++ { poss := track[j].Possibilities() track = append(track, poss[rand.Intn(len(poss))]) } score, d := GetScore(track) members[i] = &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: track, Score: score, ScoreData: d, } //if i%100 == 0 { //for _, elem := range track { //fmt.Println(elem.Segment.String()) //} //fmt.Printf("========\n") //} } return &Pool{Members: members} } func (p *Pool) Mutate(rate float64) { // for each ride: // for each position in the ride: // add in a possibility of mutation - addition // if addition: // find a piece that is compatible with both ends. // if no piece: // advance to the next track piece and try again } // Assign scores for every member of the pool func (p *Pool) Evaluate() { var wg sync.WaitGroup for i := 0; i < POOL_SIZE; i++ { wg.Add(1) go func(i int, track []tracks.Element) { p.Members[i].Score, p.Members[i].ScoreData = GetScore(track) wg.Done() }(i, p.Members[i].Track) } wg.Wait() // Assign fitness for every member. In the future, consider a smarter // algorithm higher score members a better chance of reproducing. for i := 0; i < POOL_SIZE; i++ { p.Members[i].Fitness = float64(p.Members[i].Score) } } // Select chooses a member of the population at random func (p *Pool) Select() (int, *Member) { // Stupid dumb version of this taken from here: // http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python // If it's a bottleneck, rewrite it. var weightedTotal float64 = 0 totals := make([]float64, len(p.Members)) for i := 0; i < len(p.Members); i++ { weightedTotal += max(p.Members[i].Fitness, 0) totals[i] = weightedTotal } rnd := rand.Float64() * weightedTotal for index, element := range totals { if rnd < element { return index, p.Members[index] } } return -1, &Member{} } func min(a int, b int) int { if a < b { return a } return b } func max(a float64, b float64) float64 { if a > b { return a } return b } // crossPoint1 = splice point in parent1. func crossoverAtPoint(parent1 *Member, parent2 *Member, crossPoint1 int) (*Member, *Member) { // crossPoint2 = splice point in parent2. crossPoint2 := crossPoint1 + 1 foundMatch := false for { // say cross point 1 = 3, track length = 6 // parent 1 == 0-3, parent 2 = 4-5 if tracks.Compatible(parent1.Track[crossPoint1], parent2.Track[crossPoint2]) { // Increment so we splice *after* the cross point in parent 1 crossPoint1++ foundMatch = true break } crossPoint1++ if crossPoint1 >= len(parent1.Track) { break } if tracks.Compatible(parent1.Track[crossPoint1], parent2.Track[crossPoint2]) { foundMatch = true // Increment so we splice *after* the cross point in parent 1 crossPoint1++ break } crossPoint2++ if crossPoint2 >= len(parent2.Track) { break } } // swap the track pieces at the chosen point on track A and track B if foundMatch { c1, c2 := Swap(parent1, parent2, crossPoint1, crossPoint2) if os.Getenv("DEBUG_SWAPS") == "true" && crossPoint1 > 4 { fmt.Println("swapped at", crossPoint1, crossPoint2) fmt.Println("parent1") printTrack(parent1.Track) fmt.Println("parent2") printTrack(parent2.Track) fmt.Println("child1") printTrack(c1.Track) fmt.Println("child2") printTrack(c2.Track) } return c1, c2 } return parent1, parent2 } // crossoverOne crosses over the parent tracks. if point is -1, a random point // is chosen. (point is used for testing) func crossoverOne(parent1 *Member, parent2 *Member, point int) (*Member, *Member) { // choose a random point between the beginning and the end if point == -1 { minval := min(len(parent1.Track)-1, len(parent2.Track)-1) point = rand.Intn(minval) } return crossoverAtPoint(parent1, parent2, point) } func printTrack(t []tracks.Element) { for i, elem := range t { fmt.Printf("%d %s\n", i, elem.Segment.String()) } } // Crossover chooses two members of a pool and joins them at random. func (p *Pool) Crossover() *Pool { halfLen := len(p.Members) / 2 for i := 0; i < halfLen; i++ { // select 2 parents at random idx1, parent1 := p.Select() idx2, parent2 := p.Select() if idx1 == -1 || idx2 == -1 { continue } if idx1 == idx2 { // No point in crossing over with ourself continue } if rand.Float64() < CROSSOVER_PROBABILITY { // XXX delete parents child1, child2 := crossoverOne(parent1, parent2, -1) p.Members[idx1] = child1 p.Members[idx2] = child2 } } return p } // Swap creates two children out of the parents, by crossing over the tracks at // the given cross points. The sum of the two track lengths may be the same, // but the tracks themselves will change. func Swap(parent1 *Member, parent2 *Member, crossPoint1 int, crossPoint2 int) (*Member, *Member) { child1len := crossPoint1 + (len(parent2.Track) - crossPoint2) child2len := crossPoint2 + (len(parent1.Track) - crossPoint1) child1track := make([]tracks.Element, child1len) child2track := make([]tracks.Element, child2len) copy(child1track[:crossPoint1], parent1.Track[:crossPoint1]) // if we filled in to child1 from [crossPoint2:] we might have gaps in the // track. fill from the end of cross point 1, with the contents of cross // point 2. copy(child1track[crossPoint1:], parent2.Track[crossPoint2:]) copy(child2track[:crossPoint2], parent2.Track[:crossPoint2]) copy(child2track[crossPoint2:], parent1.Track[crossPoint1:]) child1 := &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: child1track, } child2 := &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: child2track, } return child1, child2 } func (p *Pool) Spawn(numParents int) *Pool { return nil }
random_line_split
genetic.go
// Code for genetic algorithms package genetic import ( "flag" "fmt" "log" "math/rand" "os" "os/exec" "path" "sort" "strconv" "strings" "sync" "time" "github.com/kevinburke/rct/Godeps/_workspace/src/github.com/pborman/uuid" "github.com/kevinburke/rct/tracks" ) var directory *string func init()
// constants which may be altered to affect the ride runtime const PARENTS = 2 const FIT_PERCENTAGE = 0.2 const MUTATION_RATE = 0.05 // crossover with a probability of 0.6 (taken from the book & De Jong 1975) const CROSSOVER_PROBABILITY = 0.6 const POOL_SIZE = 500 const ITERATIONS = 1000 const PRINT_RESULTS_EVERY = 5 type ExperimentMetadata struct { Hash string Date time.Time Notes string Runtime time.Duration PoolSize int16 Iterations int32 CrossoverProbability float32 MutationRate float32 } func Run(packageRoot string) error { if directory == nil { return fmt.Errorf("invalid directory - need to specify it") } expDir := path.Join(*directory, "experiments") err := os.MkdirAll(expDir, 0755) if err != nil { return err } id := fmt.Sprintf("exp_%s", uuid.New()) expIdDir := path.Join(expDir, id) err = os.MkdirAll(expIdDir, 0755) if err != nil { return err } iterationsDir := path.Join(expIdDir, "iterations") err = os.MkdirAll(iterationsDir, 0755) if err != nil { return err } cmd := exec.Command("git", "rev-parse", "HEAD") cmd.Dir = packageRoot hashb, err := cmd.Output() if err != nil { return err } mtd := ExperimentMetadata{ Hash: strings.TrimSpace(string(hashb)), Date: time.Now().UTC(), Notes: "(none)", // XXX CrossoverProbability: CROSSOVER_PROBABILITY, PoolSize: POOL_SIZE, MutationRate: MUTATION_RATE, Iterations: ITERATIONS, } metadataPath := path.Join(expIdDir, "meta.json") err = encode(metadataPath, mtd) if err != nil { return err } fmt.Printf("Experiment %s\n======================================\n", id) pool := SeedPool(POOL_SIZE) pool.Id = id for i := 0; i < ITERATIONS; i++ { pool = pool.Crossover() pool.Mutate(MUTATION_RATE) pool.Evaluate() iterationDir := path.Join(iterationsDir, strconv.Itoa(i)) err = os.MkdirAll(iterationDir, 0755) if err != nil { return err } pool.Statistics(i, *directory) } return nil } type Pool struct { Id string Members []*Member } type Member struct { Id string Score int64 // Advantage or disadvantage in reproducing Fitness float64 Runtime time.Duration Track []tracks.Element ScoreData scoreData } type scoresArray [500]int64 func (a *scoresArray) Len() int { return len(a) } func (a *scoresArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } func (a *scoresArray) Less(i, j int) bool { return a[i] < a[j] } func (p *Pool) Statistics(iteration int, outputDirectory string) { var scores scoresArray var highestScore int64 = -1 var worstScore int64 = 1000 * 1000 * 1000 bestMember := new(Member) worstMember := new(Member) for i := 0; i < len(p.Members); i++ { scores[i] = p.Members[i].Score if p.Members[i].Score > highestScore { highestScore = p.Members[i].Score bestMember = p.Members[i] } if p.Members[i].Score < worstScore { worstScore = p.Members[i].Score worstMember = p.Members[i] } } if iteration%PRINT_RESULTS_EVERY == 0 { middle := len(scores) / 2 median := (scores[middle] + scores[middle-1]) / 2 sort.Sort(&scores) bestScorer := fmt.Sprintf("\t(length: %d, collisions: %d, to completion: %d, negative speed points: %d)\n\n", bestMember.ScoreData.Length, bestMember.ScoreData.Collisions, bestMember.ScoreData.Distance, bestMember.ScoreData.NegativeSpeed) fmt.Printf("Iteration %d: %d members, best member %s has score %d, "+ "median %d, worst has score %d\n%s", iteration, len(p.Members), bestMember.Id, bestMember.Score, median, worstScore, bestScorer) if os.Getenv("DEBUG_BEST_TRACK") == "true" { if iteration%20 == 0 && iteration > 0 { for _, elem := range bestMember.Track { fmt.Printf("%s %t\n", elem.Segment.String(), elem.ChainLift) } fmt.Println("==================") } } } pth := path.Join(outputDirectory, "experiments", p.Id, "iterations", strconv.Itoa(iteration), fmt.Sprintf("%s.json", bestMember.Id)) err := encode(pth, bestMember) if err != nil { log.Print(err) } pth = path.Join(outputDirectory, "experiments", p.Id, "iterations", strconv.Itoa(iteration), fmt.Sprintf("%s.json", worstMember.Id)) err = encode(pth, worstMember) if err != nil { log.Print(err) } } // Create an initial pool func SeedPool(size int) *Pool { // 1. Create a station of length 10 // 2. For the start station piece, generate a list of possible pieces. // 3. Choose one at random. Advance a pointer one forward. // 4. Repeat for 50 pieces (Woodchip is length 108. Mischief is 123) members := make([]*Member, POOL_SIZE) for i := 0; i < POOL_SIZE; i++ { track := CreateStation() for j := STATION_LENGTH - 1; j < INITIAL_TRACK_LENGTH-STATION_LENGTH; j++ { poss := track[j].Possibilities() track = append(track, poss[rand.Intn(len(poss))]) } score, d := GetScore(track) members[i] = &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: track, Score: score, ScoreData: d, } //if i%100 == 0 { //for _, elem := range track { //fmt.Println(elem.Segment.String()) //} //fmt.Printf("========\n") //} } return &Pool{Members: members} } func (p *Pool) Mutate(rate float64) { // for each ride: // for each position in the ride: // add in a possibility of mutation - addition // if addition: // find a piece that is compatible with both ends. // if no piece: // advance to the next track piece and try again } // Assign scores for every member of the pool func (p *Pool) Evaluate() { var wg sync.WaitGroup for i := 0; i < POOL_SIZE; i++ { wg.Add(1) go func(i int, track []tracks.Element) { p.Members[i].Score, p.Members[i].ScoreData = GetScore(track) wg.Done() }(i, p.Members[i].Track) } wg.Wait() // Assign fitness for every member. In the future, consider a smarter // algorithm higher score members a better chance of reproducing. for i := 0; i < POOL_SIZE; i++ { p.Members[i].Fitness = float64(p.Members[i].Score) } } // Select chooses a member of the population at random func (p *Pool) Select() (int, *Member) { // Stupid dumb version of this taken from here: // http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python // If it's a bottleneck, rewrite it. var weightedTotal float64 = 0 totals := make([]float64, len(p.Members)) for i := 0; i < len(p.Members); i++ { weightedTotal += max(p.Members[i].Fitness, 0) totals[i] = weightedTotal } rnd := rand.Float64() * weightedTotal for index, element := range totals { if rnd < element { return index, p.Members[index] } } return -1, &Member{} } func min(a int, b int) int { if a < b { return a } return b } func max(a float64, b float64) float64 { if a > b { return a } return b } // crossPoint1 = splice point in parent1. func crossoverAtPoint(parent1 *Member, parent2 *Member, crossPoint1 int) (*Member, *Member) { // crossPoint2 = splice point in parent2. crossPoint2 := crossPoint1 + 1 foundMatch := false for { // say cross point 1 = 3, track length = 6 // parent 1 == 0-3, parent 2 = 4-5 if tracks.Compatible(parent1.Track[crossPoint1], parent2.Track[crossPoint2]) { // Increment so we splice *after* the cross point in parent 1 crossPoint1++ foundMatch = true break } crossPoint1++ if crossPoint1 >= len(parent1.Track) { break } if tracks.Compatible(parent1.Track[crossPoint1], parent2.Track[crossPoint2]) { foundMatch = true // Increment so we splice *after* the cross point in parent 1 crossPoint1++ break } crossPoint2++ if crossPoint2 >= len(parent2.Track) { break } } // swap the track pieces at the chosen point on track A and track B if foundMatch { c1, c2 := Swap(parent1, parent2, crossPoint1, crossPoint2) if os.Getenv("DEBUG_SWAPS") == "true" && crossPoint1 > 4 { fmt.Println("swapped at", crossPoint1, crossPoint2) fmt.Println("parent1") printTrack(parent1.Track) fmt.Println("parent2") printTrack(parent2.Track) fmt.Println("child1") printTrack(c1.Track) fmt.Println("child2") printTrack(c2.Track) } return c1, c2 } return parent1, parent2 } // crossoverOne crosses over the parent tracks. if point is -1, a random point // is chosen. (point is used for testing) func crossoverOne(parent1 *Member, parent2 *Member, point int) (*Member, *Member) { // choose a random point between the beginning and the end if point == -1 { minval := min(len(parent1.Track)-1, len(parent2.Track)-1) point = rand.Intn(minval) } return crossoverAtPoint(parent1, parent2, point) } func printTrack(t []tracks.Element) { for i, elem := range t { fmt.Printf("%d %s\n", i, elem.Segment.String()) } } // Crossover chooses two members of a pool and joins them at random. func (p *Pool) Crossover() *Pool { halfLen := len(p.Members) / 2 for i := 0; i < halfLen; i++ { // select 2 parents at random idx1, parent1 := p.Select() idx2, parent2 := p.Select() if idx1 == -1 || idx2 == -1 { continue } if idx1 == idx2 { // No point in crossing over with ourself continue } if rand.Float64() < CROSSOVER_PROBABILITY { // XXX delete parents child1, child2 := crossoverOne(parent1, parent2, -1) p.Members[idx1] = child1 p.Members[idx2] = child2 } } return p } // Swap creates two children out of the parents, by crossing over the tracks at // the given cross points. The sum of the two track lengths may be the same, // but the tracks themselves will change. func Swap(parent1 *Member, parent2 *Member, crossPoint1 int, crossPoint2 int) (*Member, *Member) { child1len := crossPoint1 + (len(parent2.Track) - crossPoint2) child2len := crossPoint2 + (len(parent1.Track) - crossPoint1) child1track := make([]tracks.Element, child1len) child2track := make([]tracks.Element, child2len) copy(child1track[:crossPoint1], parent1.Track[:crossPoint1]) // if we filled in to child1 from [crossPoint2:] we might have gaps in the // track. fill from the end of cross point 1, with the contents of cross // point 2. copy(child1track[crossPoint1:], parent2.Track[crossPoint2:]) copy(child2track[:crossPoint2], parent2.Track[:crossPoint2]) copy(child2track[crossPoint2:], parent1.Track[crossPoint1:]) child1 := &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: child1track, } child2 := &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: child2track, } return child1, child2 } func (p *Pool) Spawn(numParents int) *Pool { return nil }
{ directory = flag.String("directory", "/usr/local/rct", "Path to the folder storing RCT experiment data") }
identifier_body
genetic.go
// Code for genetic algorithms package genetic import ( "flag" "fmt" "log" "math/rand" "os" "os/exec" "path" "sort" "strconv" "strings" "sync" "time" "github.com/kevinburke/rct/Godeps/_workspace/src/github.com/pborman/uuid" "github.com/kevinburke/rct/tracks" ) var directory *string func init() { directory = flag.String("directory", "/usr/local/rct", "Path to the folder storing RCT experiment data") } // constants which may be altered to affect the ride runtime const PARENTS = 2 const FIT_PERCENTAGE = 0.2 const MUTATION_RATE = 0.05 // crossover with a probability of 0.6 (taken from the book & De Jong 1975) const CROSSOVER_PROBABILITY = 0.6 const POOL_SIZE = 500 const ITERATIONS = 1000 const PRINT_RESULTS_EVERY = 5 type ExperimentMetadata struct { Hash string Date time.Time Notes string Runtime time.Duration PoolSize int16 Iterations int32 CrossoverProbability float32 MutationRate float32 } func Run(packageRoot string) error { if directory == nil { return fmt.Errorf("invalid directory - need to specify it") } expDir := path.Join(*directory, "experiments") err := os.MkdirAll(expDir, 0755) if err != nil { return err } id := fmt.Sprintf("exp_%s", uuid.New()) expIdDir := path.Join(expDir, id) err = os.MkdirAll(expIdDir, 0755) if err != nil { return err } iterationsDir := path.Join(expIdDir, "iterations") err = os.MkdirAll(iterationsDir, 0755) if err != nil { return err } cmd := exec.Command("git", "rev-parse", "HEAD") cmd.Dir = packageRoot hashb, err := cmd.Output() if err != nil { return err } mtd := ExperimentMetadata{ Hash: strings.TrimSpace(string(hashb)), Date: time.Now().UTC(), Notes: "(none)", // XXX CrossoverProbability: CROSSOVER_PROBABILITY, PoolSize: POOL_SIZE, MutationRate: MUTATION_RATE, Iterations: ITERATIONS, } metadataPath := path.Join(expIdDir, "meta.json") err = encode(metadataPath, mtd) if err != nil { return err } fmt.Printf("Experiment %s\n======================================\n", id) pool := SeedPool(POOL_SIZE) pool.Id = id for i := 0; i < ITERATIONS; i++ { pool = pool.Crossover() pool.Mutate(MUTATION_RATE) pool.Evaluate() iterationDir := path.Join(iterationsDir, strconv.Itoa(i)) err = os.MkdirAll(iterationDir, 0755) if err != nil { return err } pool.Statistics(i, *directory) } return nil } type Pool struct { Id string Members []*Member } type Member struct { Id string Score int64 // Advantage or disadvantage in reproducing Fitness float64 Runtime time.Duration Track []tracks.Element ScoreData scoreData } type scoresArray [500]int64 func (a *scoresArray) Len() int { return len(a) } func (a *scoresArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } func (a *scoresArray) Less(i, j int) bool { return a[i] < a[j] } func (p *Pool) Statistics(iteration int, outputDirectory string) { var scores scoresArray var highestScore int64 = -1 var worstScore int64 = 1000 * 1000 * 1000 bestMember := new(Member) worstMember := new(Member) for i := 0; i < len(p.Members); i++ { scores[i] = p.Members[i].Score if p.Members[i].Score > highestScore { highestScore = p.Members[i].Score bestMember = p.Members[i] } if p.Members[i].Score < worstScore { worstScore = p.Members[i].Score worstMember = p.Members[i] } } if iteration%PRINT_RESULTS_EVERY == 0 { middle := len(scores) / 2 median := (scores[middle] + scores[middle-1]) / 2 sort.Sort(&scores) bestScorer := fmt.Sprintf("\t(length: %d, collisions: %d, to completion: %d, negative speed points: %d)\n\n", bestMember.ScoreData.Length, bestMember.ScoreData.Collisions, bestMember.ScoreData.Distance, bestMember.ScoreData.NegativeSpeed) fmt.Printf("Iteration %d: %d members, best member %s has score %d, "+ "median %d, worst has score %d\n%s", iteration, len(p.Members), bestMember.Id, bestMember.Score, median, worstScore, bestScorer) if os.Getenv("DEBUG_BEST_TRACK") == "true" { if iteration%20 == 0 && iteration > 0 { for _, elem := range bestMember.Track { fmt.Printf("%s %t\n", elem.Segment.String(), elem.ChainLift) } fmt.Println("==================") } } } pth := path.Join(outputDirectory, "experiments", p.Id, "iterations", strconv.Itoa(iteration), fmt.Sprintf("%s.json", bestMember.Id)) err := encode(pth, bestMember) if err != nil { log.Print(err) } pth = path.Join(outputDirectory, "experiments", p.Id, "iterations", strconv.Itoa(iteration), fmt.Sprintf("%s.json", worstMember.Id)) err = encode(pth, worstMember) if err != nil { log.Print(err) } } // Create an initial pool func SeedPool(size int) *Pool { // 1. Create a station of length 10 // 2. For the start station piece, generate a list of possible pieces. // 3. Choose one at random. Advance a pointer one forward. // 4. Repeat for 50 pieces (Woodchip is length 108. Mischief is 123) members := make([]*Member, POOL_SIZE) for i := 0; i < POOL_SIZE; i++ { track := CreateStation() for j := STATION_LENGTH - 1; j < INITIAL_TRACK_LENGTH-STATION_LENGTH; j++ { poss := track[j].Possibilities() track = append(track, poss[rand.Intn(len(poss))]) } score, d := GetScore(track) members[i] = &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: track, Score: score, ScoreData: d, } //if i%100 == 0 { //for _, elem := range track { //fmt.Println(elem.Segment.String()) //} //fmt.Printf("========\n") //} } return &Pool{Members: members} } func (p *Pool) Mutate(rate float64) { // for each ride: // for each position in the ride: // add in a possibility of mutation - addition // if addition: // find a piece that is compatible with both ends. // if no piece: // advance to the next track piece and try again } // Assign scores for every member of the pool func (p *Pool) Evaluate() { var wg sync.WaitGroup for i := 0; i < POOL_SIZE; i++ { wg.Add(1) go func(i int, track []tracks.Element) { p.Members[i].Score, p.Members[i].ScoreData = GetScore(track) wg.Done() }(i, p.Members[i].Track) } wg.Wait() // Assign fitness for every member. In the future, consider a smarter // algorithm higher score members a better chance of reproducing. for i := 0; i < POOL_SIZE; i++ { p.Members[i].Fitness = float64(p.Members[i].Score) } } // Select chooses a member of the population at random func (p *Pool) Select() (int, *Member) { // Stupid dumb version of this taken from here: // http://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python // If it's a bottleneck, rewrite it. var weightedTotal float64 = 0 totals := make([]float64, len(p.Members)) for i := 0; i < len(p.Members); i++ { weightedTotal += max(p.Members[i].Fitness, 0) totals[i] = weightedTotal } rnd := rand.Float64() * weightedTotal for index, element := range totals { if rnd < element { return index, p.Members[index] } } return -1, &Member{} } func min(a int, b int) int { if a < b
return b } func max(a float64, b float64) float64 { if a > b { return a } return b } // crossPoint1 = splice point in parent1. func crossoverAtPoint(parent1 *Member, parent2 *Member, crossPoint1 int) (*Member, *Member) { // crossPoint2 = splice point in parent2. crossPoint2 := crossPoint1 + 1 foundMatch := false for { // say cross point 1 = 3, track length = 6 // parent 1 == 0-3, parent 2 = 4-5 if tracks.Compatible(parent1.Track[crossPoint1], parent2.Track[crossPoint2]) { // Increment so we splice *after* the cross point in parent 1 crossPoint1++ foundMatch = true break } crossPoint1++ if crossPoint1 >= len(parent1.Track) { break } if tracks.Compatible(parent1.Track[crossPoint1], parent2.Track[crossPoint2]) { foundMatch = true // Increment so we splice *after* the cross point in parent 1 crossPoint1++ break } crossPoint2++ if crossPoint2 >= len(parent2.Track) { break } } // swap the track pieces at the chosen point on track A and track B if foundMatch { c1, c2 := Swap(parent1, parent2, crossPoint1, crossPoint2) if os.Getenv("DEBUG_SWAPS") == "true" && crossPoint1 > 4 { fmt.Println("swapped at", crossPoint1, crossPoint2) fmt.Println("parent1") printTrack(parent1.Track) fmt.Println("parent2") printTrack(parent2.Track) fmt.Println("child1") printTrack(c1.Track) fmt.Println("child2") printTrack(c2.Track) } return c1, c2 } return parent1, parent2 } // crossoverOne crosses over the parent tracks. if point is -1, a random point // is chosen. (point is used for testing) func crossoverOne(parent1 *Member, parent2 *Member, point int) (*Member, *Member) { // choose a random point between the beginning and the end if point == -1 { minval := min(len(parent1.Track)-1, len(parent2.Track)-1) point = rand.Intn(minval) } return crossoverAtPoint(parent1, parent2, point) } func printTrack(t []tracks.Element) { for i, elem := range t { fmt.Printf("%d %s\n", i, elem.Segment.String()) } } // Crossover chooses two members of a pool and joins them at random. func (p *Pool) Crossover() *Pool { halfLen := len(p.Members) / 2 for i := 0; i < halfLen; i++ { // select 2 parents at random idx1, parent1 := p.Select() idx2, parent2 := p.Select() if idx1 == -1 || idx2 == -1 { continue } if idx1 == idx2 { // No point in crossing over with ourself continue } if rand.Float64() < CROSSOVER_PROBABILITY { // XXX delete parents child1, child2 := crossoverOne(parent1, parent2, -1) p.Members[idx1] = child1 p.Members[idx2] = child2 } } return p } // Swap creates two children out of the parents, by crossing over the tracks at // the given cross points. The sum of the two track lengths may be the same, // but the tracks themselves will change. func Swap(parent1 *Member, parent2 *Member, crossPoint1 int, crossPoint2 int) (*Member, *Member) { child1len := crossPoint1 + (len(parent2.Track) - crossPoint2) child2len := crossPoint2 + (len(parent1.Track) - crossPoint1) child1track := make([]tracks.Element, child1len) child2track := make([]tracks.Element, child2len) copy(child1track[:crossPoint1], parent1.Track[:crossPoint1]) // if we filled in to child1 from [crossPoint2:] we might have gaps in the // track. fill from the end of cross point 1, with the contents of cross // point 2. copy(child1track[crossPoint1:], parent2.Track[crossPoint2:]) copy(child2track[:crossPoint2], parent2.Track[:crossPoint2]) copy(child2track[crossPoint2:], parent1.Track[crossPoint1:]) child1 := &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: child1track, } child2 := &Member{ Id: fmt.Sprintf("iter_%s", uuid.New()), Track: child2track, } return child1, child2 } func (p *Pool) Spawn(numParents int) *Pool { return nil }
{ return a }
conditional_block
msg-send.ts
/** * 发送消息组件 * 使用此组件需传入pciId * 流程5,9,20,25显示预约时间组件 * 流程 3,24,30 打回修改弹窗,发送消息给应聘者 * by wzb * */ import {Component, OnInit} from '@angular/core'; import {PcsNameGroup} from '../../../shared/pcsNameGroup'; import {LoggerService} from '../../../services/logger.service'; import {MeditorService} from '../../../services/meditor.service'; import {AlertMsg} from '../../../shared/alert/alert.component'; import {HttpService} from '../../../services/http.service'; import {errorCodes} from '../../../../constant/errorCodes'; import {urls} from '../../../../constant/urls'; import {config} from '../../../../constant/config'; import {isUndefined} from 'util'; import {AlcMsgInfo, CustomerMsgData, PcsDataModel, ResnData} from '../../../services/http.interface'; import {ReviewData} from '../../../models/pcsdata.model'; import {SIDEMSG_ID} from "../../../shared/left-side/left-side.component"; import {MODAL_ID} from "../../../shared/modal-dialog/modal-dialog.component"; import {ProgressSpinner} from "../../../shared/progress-spinner/progress-spinner"; @Component({ selector: 'msg-send', templateUrl: './msg-send.html', styleUrls: ['./msg-send.css'] }) export class MsgSend implements OnInit { public selectDate: string; // 日期 public startTime: string ; // 开始时间点 public endTime: string; // 结束时间点 public pcsName: string; // 流程名称 public msgSend: string; // 存放发送的消息 public alcData: any; // 存放应聘者信息 public normal = false;//发送普通消息为真 public pcsid: number; // 获取流程id public alcoid:string[] = []; // 存放openid public sentences: string[] = []; public commonMsg:boolean; //为false不显示常用语(打回修改不显示),true显示 public changeback = false; //是否是打回修改的消息 public alcMsgInfoArray: AlcMsgInfo[] = []; //存放oid,name数组 // 是否批量操作 public sendAll = false; /** * MSG_SysMsgType: 0, // 系统消息类型 * MSG_AppointmentType: 1, // 预约消息类型 * MSG_BusinessLetterType: 2, // 商调函消息类型 * MSG_ConfirmHealthCheck: 3, // 体检确认消息类型 * MSG_MgrSimpleMsgType: 4, // 管理员普通消息类型 * */ ngOnInit(): void { if (this.alcData.length === null || this.alcData.length <= 0 ){ this.showAlert('提示', '获取应聘者信息失败,请重试!'); return; } if(this.sendAll === false){ console.log(this.alcData[0]); this.pcsid = this.alcData[0].curpcsid; this.alcoid[0] = this.alcData[0].alcoid; this.alcMsgInfoArray[0]= { alcoid:this.alcData[0].alcoid, alcname:this.alcData[0].name }; this.pcsName = PcsNameGroup.getPcsName(this.pcsid); //判断是否打回修改 if(this.changeback){ // 打回修改,完成之后通知应聘者 //打回修改不显示常用语 this.commonMsg = false; }else{ this.commonMsg = true; this.getContent(this.pcsid); } } if(this.sendAll === true){ // 批量发送的情况,不显示当前流程 for (let i = 0 ; i < this.alcData.length ; i++){ this.alcoid[i] = this.alcData[i].alcoid; this.alcMsgInfoArray[i]= { alcoid:this.alcData[i].alcoid, alcname:this.alcData[i].name }; } this.commonMsg = true; this.getContent(errorCodes.custom.MSG_MgrSimpleMsgType); } } constructor(private writeLog: LoggerService, private m: MeditorService, private http: HttpService ) {} // 设置常用语 public setContent(content: string) { this.msgSend = content; } // 获取常用语 public getContent(pcsid: number){ if (!pcsid || pcsid < 0) { this.showAlert('注意', '流程id错误,获取常用语失败!'); return; } let reqtype: number; //根据流程id获取常用语类型 switch (pcsid){ //预约 pcsid 5,9,20,25 case 5: case 9: case 20: case 25: reqtype = errorCodes.custom.MSG_AppointmentType; this.startTime = '09:00'; this.endTime = '16:00'; this.selectDate = ''; //this.normal = false; break; default: reqtype = errorCodes.custom.MSG_MgrSimpleMsgType; break; } this.http.getJson<any>(config.common.getApiPrefix() + urls.api.getcomword, {reqtype: reqtype}).subscribe((resp) => { if(resp.errCode !== errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示' , '获取常用语失败!错误码:' + resp.errCode); return; } this.sentences = resp.data; }, (err) => { console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('获取常用语出错(getContent)' , '获取常用语失败!' + JSON.stringify(err)); this.sentences = SENTENCES; return; }); } //发送通知 public onSubmit(){ if(typeof this.msgSend === 'undefined' || this.msgSend === ''){ this.showAlert('提示','发送消息不能为空!'); return; } if(this.normal === false && this.sendAll === false && (this.pcsid ===5 || this.pcsid ===9 || this.pcsid ===20 || this.pcsid ===25) && this.changeback === false){ //发送预约消息数据 console.log('发送预约消息'); this.sendAppointmentMsg(); return; } if(this.changeback && this.sendAll === false){ console.log('发送打回修改数据'); this.sendReviewMsg(); return; } if(this.normal === true && this.changeback === false) { //单独发送消息/批量发送消息 console.log('单独或批量发送自定义消息'); this.sendCustomerMsg(); } } //发送普通消息 public sendCustomerMsg(){ const cstMsg: CustomerMsgData = { alcInfo:this.alcMsgInfoArray , Info: this.msgSend }; this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.sendSimpleMsgUrl , JSON.stringify(cstMsg)).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示','消息发送成功!'); } else if(resp.errCode === errorCodes.custom.MSG_FAILD){ if(!resp.data || resp.data === null || resp.data === 'null'){ this.showAlert('提示','消息发送失败!'); return; } let nameArray = ''; for(let i = 0 ; i < resp.data.length ; i ++){ nameArray = nameArray + resp.data[i] + '、'; } this.showAlert('提示',`${nameArray}消息发送失败!`); return; }else{ this.m.push({id:MODAL_ID, body:{hidden:true}}); this.showAlert('提示','消息发送失败!错误码:' + resp.errCode); return; } },(err) =>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('单独发送普通消息失败(sendAloneMsg)' , '单独发送普通消息失败!' +JSON.stringify(err)); return; } ); } // 发送预约消息 public sendAppointmentMsg(){ console.log(this.selectDate); if(isUndefined(this.selectDate) || this.selectDate === ''){ this.showAlert('提示' , '请选择日期!'); return; } if(isUndefined(this.startTime) || isUndefined(this.endTime)){ this.showAlert('提示','所选时间非法!'); return; } const appointmentMsg: ResnData = { StartTime:this.getUnix( new Date(this.selectDate).toLocaleDateString() , this.startTime) , EndTime:this.getUnix( new Date(this.selectDate).toLocaleDateString() , this.endTime) , Comment:this.msgSend }; const submitAppointmentMsg: PcsDataModel = { PcsId: this.pcsid , OperCode: errorCodes.custom.OPER_BOOK , AlcOpenid: this.alcoid[0] }; submitAppointmentMsg.ResnData = appointmentMsg; //显示缓冲 this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.submitPcsDataUrl , JSON.stringify(submitAppointmentMsg)).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode !== errorCodes.custom.PCS_SUCCESS){ this.showAlert('提示','消息发送失败!错误码:' + resp.errCode); return; } this.m.push({id: SIDEMSG_ID,body:{hidden:true}}); this.showAlert('提示','消息发送成功!'); console.log(resp); },(err)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('发送预约消息失败(sendAloneMsg)' , '发送预约消息失败!' + JSON.stringify(err)); return; } ); } // 打回修改 有关应聘者流程(简历/商调函/计划生育证明)要通知应聘者。 public sendReviewMsg(){ console.log(this.alcoid); //let reviewMsg = new PcsDataModel(this.alcoid , this.msgSend); const reviewMsg: PcsDataModel = { PcsId: this.pcsid , OperCode: errorCodes.custom.OPER_NEED_TO_MODIFY , AlcOpenid: this.alcoid[0] }; reviewMsg.AnnexData = null; reviewMsg.ReviewData = new ReviewData(errorCodes.custom.OPER_NEED_TO_MODIFY); reviewMsg.Mark = this.msgSend; //提交打回修改流程数据 this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.submitPcsDataUrl ,JSON.stringify(reviewMsg)).subscribe( (resp)=>{ if(resp.errCode !== errorCodes.custom.PCS_SUCCESS){ this.showAlert('提示','打回修改失败!错误码:' + resp.errCode); console.log('打回修改失败!错误码:' + resp.errCode); return; } console.log('打回修改流程数据提交成功!' + resp); this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); //通知应聘者 this.http.getJson<any>(config.common.getApiPrefix()+ urls.api.sendSysMsg+'?alcid=' + this.alcoid[0] + '&pcsid=' + this.pcsid + '&opercode=' + errorCodes.custom.OPER_NEED_TO_MODIFY).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示','发送成功!'); }else{ this.showAlert('提示','发送通知消息失败!错误码:' + resp.errCode); return; } },(err)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('打回修改发送系统消息给应聘者失败(sendReviewMsg)' , '打回修改发送系统消息给应聘者失败!' +JSON.stringify(err)); return; } ); // 简历和商调函和计划生育打回修改,发给应聘者信息 if(this.pcsid === 3 || this.pcsid === 24 || this.pcsid === 30){ const cstMsg: CustomerMsgData = { alcInfo: this.alcMsgInfoArray , Info: this.msgSend}; this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.sendSimpleMsgUrl , JSON.stringify(cstMsg)).subscribe( (resp)=>{ if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.m.push({id:MODAL_ID, body:{hidden:true}}); this.showAlert('提示','消息发送成功!'); }else{ this.showAlert('提示','消息发送失败!错误码:' + resp.errCode ); return; } },(err) =>{ this.m.push({id:MODAL_ID, body:{hidden:true}});
console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('单独发送普通消息打回修改失败(sendReviewMsg)' , '单独发送普通消息打回修改失败!' +JSON.stringify(err)); return; } ); } },(err)=>{ console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('发送打回修改消息失败(sendReviewMsg)' , '发送预约消息失败!' + JSON.stringify(err)); return; }); } //将时间转化为时间戳 private getUnix(data: string, time: string): number { const timeArr = time.split(/:/); const tData = new Date(data); tData.setHours(parseInt(timeArr[0], 10)); tData.setMinutes(parseInt(timeArr[1], 10)); return tData.valueOf(); } public showAlert(title, content){ const alertMsg: AlertMsg = { title: title, content: content, confirmEvn: () => { console.log('click ok'); }, }; this.m.push({id: 'alert', body: alertMsg}); } } const SENTENCES: string[] = [ '你好,你已经通过我校面试,请准备上岗。', '抱歉,你的条件不符合我们的要求。', '对不起,您的话费不足,请充值。', 'hi,明天见。', '这个测试用例,我是想找一条非常非常长的句子,这样的话也许这个例子可以占两行!占两行真是太棒了!虽然一整条文本看起来有点丑...', '你好,你已经通过我校面试,请准备上岗。', '抱歉,你的条件不符合我们的要求。', '对不起,您的话费不足,请充值。', 'hi,明天见。' ];
random_line_split
msg-send.ts
/** * 发送消息组件 * 使用此组件需传入pciId * 流程5,9,20,25显示预约时间组件 * 流程 3,24,30 打回修改弹窗,发送消息给应聘者 * by wzb * */ import {Component, OnInit} from '@angular/core'; import {PcsNameGroup} from '../../../shared/pcsNameGroup'; import {LoggerService} from '../../../services/logger.service'; import {MeditorService} from '../../../services/meditor.service'; import {AlertMsg} from '../../../shared/alert/alert.component'; import {HttpService} from '../../../services/http.service'; import {errorCodes} from '../../../../constant/errorCodes'; import {urls} from '../../../../constant/urls'; import {config} from '../../../../constant/config'; import {isUndefined} from 'util'; import {AlcMsgInfo, CustomerMsgData, PcsDataModel, ResnData} from '../../../services/http.interface'; import {ReviewData} from '../../../models/pcsdata.model'; import {SIDEMSG_ID} from "../../../shared/left-side/left-side.component"; import {MODAL_ID} from "../../../shared/modal-dialog/modal-dialog.component"; import {ProgressSpinner} from "../../../shared/progress-spinner/progress-spinner"; @Component({ selector: 'msg-send', templateUrl: './msg-send.html', styleUrls: ['./msg-send.css'] }) export class MsgSend implements OnInit { public selectDate: string; // 日期 public startTime: string ; // 开始时间点 public endTime: string; // 结束时间点 public pcsName: string; // 流程名称 public msgSend: string; // 存放发送的消息 public alcData: any; // 存放应聘者信息 public normal = false;//发送普通消息为真 public pcsid: number; // 获取流程id public alcoid:string[] = []; // 存放openid public sentences: string[] = []; public commonMsg:boolean; //为false不显示常用语(打回修改不显示),true显示 public changeback = false; //是否是打回修改的消息 public alcMsgInfoArray: AlcMsgInfo[] = []; //存放oid,name数组 // 是否批量操作 public sendAll = false; /** * MSG_SysMsgType: 0, // 系统消息类型 * MSG_AppointmentType: 1, // 预约消息类型 * MSG_BusinessLetterType: 2, // 商调函消息类型 * MSG_ConfirmHealthCheck: 3, // 体检确认消息类型 * MSG_MgrSimpleMsgType: 4, // 管理员普通消息类型 * */ ngOnInit(): void { if (this.alcData.length === null || this.alcData.length <= 0 ){ this.showAlert('提示', '获取应聘者信息失败,请重试!'); return; } if(this.sendAll === false){ console.log(this.alcData[0]); this.pcsid = this.alcData[0].curpcsid; this.alcoid[0] = this.alcData[0].alcoid; this.alcMsgInfoArray[0]= { alcoid:this.alcData[0].alcoid, alcname:this.alcData[0].name }; this.pcsName = PcsNameGroup.getPcsName(this.pcsid); //判断是否打回修改 if(this.changeback){ // 打回修改,完成之后通知应聘者 //打回修改不显示常用语 this.commonMsg = false; }else{ this.commonMsg = true; this.getContent(this.pcsid); } } if(this.sendAll === true){ // 批量发送的情况,不显示当前流程 for (let i = 0 ; i < this.alcData.length ; i++){ this.alcoid[i] = this.alcData[i].alcoid; this.alcMsgInfoArray[i]= { alcoid:this.alcData[i].alcoid, alcname:this.alcData[i].name }; } this.commonMsg = true; this.getContent(errorCodes.custom.MSG_MgrSimpleMsgType); } } constructor(private writeLog: LoggerService, private m: MeditorService, private http: HttpService ) {} // 设置常用语 public setContent(content: string) { this.msgSend = content; } // 获取常用语 public getContent(pcsid: number){ if (!pcsid || pcsid < 0) { this.showAlert('注意', '流程id错误,获取常用语失败!'); return; } let reqtype: number; //根据流程id获取常用语类型 switch (pcsid){ //预约 pcsid 5,9,20,25 case 5: case 9: case 20: case 25: reqtype = errorCodes.custom.MSG_AppointmentType; this.startTime = '09:00'; this.endTime = '16:00'; this.selectDate = ''; //this.normal = false; break; default: reqtype = errorCodes.custom.MSG_MgrSimpleMsgType; break; } this.http.getJson<any>(config.common.getApiPrefix() + urls.api.getcomword, {reqtype: reqtype}).subscribe((resp) => { if(resp.errCode !== errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示' , '获取常用语失败!错误码:' + resp.errCode); return; } this.sentences = resp.data; }, (err) => { console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('获取常用语出错(getContent)' , '获取常用语失败!' + JSON.stringify(err)); this.sentences = SENTENCES; return; }); } //发送通知 public onSubmit(){ if(typeof this.msgSend === 'undefined' || this.msgSend === ''){ this.showAlert('提示','发送消息不能为空!'); return; } if(this.normal === false && this.sendAll === false && (this.pcsid ===5 || this.pcsid ===9 || this.pcsid ===20 || this.pcsid ===25) && this.changeback === false){ //发送预约消息数据 console.log('发送预约消息'); this.sendAppointmentMsg(); return; } if(this.changeback && this.sendAll === false){ console.log('发送打回修改数据'); this.sendReviewMsg(); return; } if(this.normal === true && this.changeback === false) { //单独发送消息/批量发送消息 console.log('单独或批量发送自定义消息'); this.sendCustomerMsg(); } } //发送普通消息 public sendCustomerMsg(){ const cstMsg: CustomerMsgData = { alcInfo:this.alcMsgInfoArray , Info: this.msgSend }; this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.sendSimpleMsgUrl , JSON.stringify(cstMsg)).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示','消息发送成功!'); } else if(resp.errCode === errorCodes.custom.MSG_FAILD){ if(!resp.data || resp.data === null || resp.data === 'null'){ this.showAlert('提示','消息发送失败!'); return; } let nameArray = ''; for(let i = 0 ; i < resp.data.length ; i ++){ nameArray = nameArray + resp.data[i] + '、'; } this.showAlert('提示',`${nameArray}消息发送失败!`); return; }else{ this.m.push({id:MODAL_ID, body:{hidden:true}}); this.showAlert('提示','消息发送失败!错误码:' + resp.errCode); return; } },(err) =>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('单独发送普通消息失败(sendAloneMsg)' , '单独发送普通消息失败!' +JSON.stringify(err)); return; } ); } // 发送预约消息 public sendAppointmentMsg(){ console.log(this.selectDate); if(isUndefined(this.selectDate) || this.selectDate === ''){ this.showAlert('提示' , '请选择日期!'); return; } if(isUndefined(this.startTime) || isUndefined(this.endTime)){ this.showAlert('提示','所选时间非法!'); return; } const appointmentMsg: ResnData = { StartTime:this.getUnix( new Date(this.selectDate).toLocaleDateString() , this.startTime) , EndTime:this.getUnix( new Date(this.selectDate).toLocaleDateString() , this.endTime) , Comment:this.msgSend }; const submitAppointmentMsg: PcsDataModel = { PcsId: this.pcsid , OperCode: errorCodes.custom.OPER_BOOK , AlcOpenid: this.alcoid[0] }; submitAppointmentMsg.ResnData = appointmentMsg; //显示缓冲 this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.submitPcsDataUrl , JSON.stringify(submitAppointmentMsg)).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode !== errorCodes.custom.PCS_SUCCESS){ this.showAlert('提示','消息发送失败!错误码:' + resp.errCode); return; } this.m.push({id: SIDEMSG_ID,body:{hidden:true}}); this.showAlert('提示','消息发送成功!'); console.log(resp); },(err)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('发送预约消息失败(sendAloneMsg)' , '发送预约消息失败!' + JSON.stringify(err)); return; } ); } // 打回修改 有关应聘者流程(简历/商调函/计划生育证明)要通知应聘者。 public sendReviewMsg(){ console.log(this.alcoid); //let reviewMsg = new PcsDataModel(this.alcoid , this.msgSend); const reviewMsg: PcsDataModel = { PcsId: this.pcsid , OperCode: errorCodes.custom.OPER_NEED_TO_MODIFY , AlcOpenid: this.alcoid[0] }; reviewMsg.AnnexData = null; reviewMsg.ReviewData = new ReviewData(errorCodes.custom.OPER_NEED_TO_MODIFY); reviewMsg.Mark = this.msgSend; //提交打回修改流程数据 this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.submitPcsDataUrl ,JSON.stringify(reviewMsg)).subscribe( (resp)=>{ if(resp.errCode !== errorCodes.custom.PCS_SUCCESS){ this.showAlert('提示','打回修改失败!错误码:' + resp.errCode); console.log('打回修改失败!错误码:' + resp.errCode); return; } console.log('打回修改流程数据提交成功!' + resp); this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); //通知应聘者 this.http.getJson<any>(config.common.getApiPrefix()+ urls.api.sendSysMsg+'?alcid=' + this.alcoid[0] + '&pcsid=' + this.pcsid + '&opercode=' + errorCodes.custom.OPER_NEED_TO_MODIFY).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示','发送成功!'); }else{ this.showAlert('提示','发送通知消息失败!错误码:' + resp.errCode); return; } },(err)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('打回修改发送系统消息给应聘者失败(sendReviewMsg)' , '打回修改发送系统消息给应聘者失败!' +JSON.stringify(err)); return; } ); // 简历和商调函和计划生育打回修改,发给应聘者信息 if(this.pcsid === 3 || this.pcsid === 24 || this.pcsid === 30){ const cstMsg: CustomerMsgData = { alcInfo: this.alcMsgInfoArray , Info: this.msgSend}; this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.sendSimpleMsgUrl , JSON.stringify(cstMsg)).subscribe( (resp)=>{ if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.m.push({id:MODAL_ID, body:{hidden:true}}); this.showAlert('提示','消息发送成功!'); }else{
this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('单独发送普通消息打回修改失败(sendReviewMsg)' , '单独发送普通消息打回修改失败!' +JSON.stringify(err)); return; } ); } },(err)=>{ console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('发送打回修改消息失败(sendReviewMsg)' , '发送预约消息失败!' + JSON.stringify(err)); return; }); } //将时间转化为时间戳 private getUnix(data: string, time: string): number { const timeArr = time.split(/:/); const tData = new Date(data); tData.setHours(parseInt(timeArr[0], 10)); tData.setMinutes(parseInt(timeArr[1], 10)); return tData.valueOf(); } public showAlert(title, content){ const alertMsg: AlertMsg = { title: title, content: content, confirmEvn: () => { console.log('click ok'); }, }; this.m.push({id: 'alert', body: alertMsg}); } } const SENTENCES: string[] = [ '你好,你已经通过我校面试,请准备上岗。', '抱歉,你的条件不符合我们的要求。', '对不起,您的话费不足,请充值。', 'hi,明天见。', '这个测试用例,我是想找一条非常非常长的句子,这样的话也许这个例子可以占两行!占两行真是太棒了!虽然一整条文本看起来有点丑...', '你好,你已经通过我校面试,请准备上岗。', '抱歉,你的条件不符合我们的要求。', '对不起,您的话费不足,请充值。', 'hi,明天见。' ];
this.showAlert('提示','消息发送失败!错误码:' + resp.errCode ); return; } },(err) =>{
conditional_block
msg-send.ts
/** * 发送消息组件 * 使用此组件需传入pciId * 流程5,9,20,25显示预约时间组件 * 流程 3,24,30 打回修改弹窗,发送消息给应聘者 * by wzb * */ import {Component, OnInit} from '@angular/core'; import {PcsNameGroup} from '../../../shared/pcsNameGroup'; import {LoggerService} from '../../../services/logger.service'; import {MeditorService} from '../../../services/meditor.service'; import {AlertMsg} from '../../../shared/alert/alert.component'; import {HttpService} from '../../../services/http.service'; import {errorCodes} from '../../../../constant/errorCodes'; import {urls} from '../../../../constant/urls'; import {config} from '../../../../constant/config'; import {isUndefined} from 'util'; import {AlcMsgInfo, CustomerMsgData, PcsDataModel, ResnData} from '../../../services/http.interface'; import {ReviewData} from '../../../models/pcsdata.model'; import {SIDEMSG_ID} from "../../../shared/left-side/left-side.component"; import {MODAL_ID} from "../../../shared/modal-dialog/modal-dialog.component"; import {ProgressSpinner} from "../../../shared/progress-spinner/progress-spinner"; @Component({ selector: 'msg-send', templateUrl: './msg-send.html', styleUrls: ['./msg-send.css'] }) export class MsgSend implements OnInit { public selectDate: string; // 日期 public startTime: string ; // 开始时间点 public endTime: string; // 结束时间点 public pcsName: string; // 流程名称 public msgSend: string; // 存放发送的消息 public alcData: any; // 存放应聘者信息 public normal = false;//发送普通消息为真 public pcsid: number; // 获取流程id public alcoid:string[] = []; // 存放openid public sentences: string[] = []; public commonMsg:boolean; //为false不显示常用语(打回修改不显示),true显示 public changeback = false; //是否是打回修改的消息 public alcMsgInfoArray: AlcMsgInfo[] = []; //存放oid,name数组 // 是否批量操作 public sendAll = false; /** * MSG_SysMsgType: 0, // 系统消息类型 * MSG_AppointmentType: 1, // 预约消息类型 * MSG_BusinessLetterType: 2, // 商调函消息类型 * MSG_ConfirmHealthCheck: 3, // 体检确认消息类型 * MSG_MgrSimpleMsgType: 4, // 管理员普通消息类型 * */ ngOnInit(): void { if (this.alcData.length === null || this.alcData.length <= 0 ){ this.showAlert('提示', '获取应聘者信息失败,请重试!'); return; } if(this.sendAll === false){ console.log(this.alcData[0]); this.pcsid = this.alcData[0].curpcsid; this.alcoid[0] = this.alcData[0].alcoid; this.alcMsgInfoArray[0]= { alcoid:this.alcData[0].alcoid, alcname:this.alcData[0].name }; this.pcsName = PcsNameGroup.getPcsName(this.pcsid); //判断是否打回修改 if(this.changeback){ // 打回修改,完成之后通知应聘者 //打回修改不显示常用语 this.commonMsg = false; }else{ this.commonMsg = true; this.getContent(this.pcsid); } } if(this.sendAll === true){ // 批量发送的情况,不显示当前流程 for (let i = 0 ; i < this.alcData.length ; i++){ this.alcoid[i] = this.alcData[i].alcoid; this.alcMsgInfoArray[i]= { alcoid:this.alcData[i].alcoid, alcname:this.alcData[i].name }; } this.commonMsg = true; this.getContent(errorCodes.custom.MSG_MgrSimpleMsgType); } } constructor(private writeLog: LoggerService, private m: MeditorService, private http: HttpService ) {} // 设置常用语 public setContent(content: string) { this.msgSend = content; } // 获取常用语 public getContent(pcsid: number){ if (!pcsid || pcsid < 0) { this.showAlert('注意', '流程id错误,获取常用语失败!'); return; } let reqtype: number; //根据流程id获取常用语类型 switch (pcsid){ //预约 pcsid 5,9,20,25 case 5: case 9: case 20: case 25: reqtype = errorCodes.custom.MSG_AppointmentType; this.startTime = '09:00'; this.endTime = '16:00'; this.selectDate = ''; //this.normal = false; break; default: reqtype = errorCodes.custom.MSG_MgrSimpleMsgType; break; } this.http.getJson<any>(config.common.getApiPrefix() + urls.api.getcomword, {reqtype: reqtype}).subscribe((resp) => { if(resp.errCode !== errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示' , '获取常用语失败!错误码:' + resp.errCode); return; } this.sentences = resp.data; }, (err) => { console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('获取常用语出错(getContent)' , '获取常用语失败!' + JSON.stringify(err)); this.sentences = SENTENCES; return; }); } //发送通知 public onSubmit(){ if(typeof this.msgSend === 'undefined' || this.msgSend === ''){ this.showAlert('提示','发送消息不能为空!'); return; } if(this.normal === false && this.sendAll === false && (this.pcsid ===5 || this.pcsid ===9 || this.pcsid ===20 || this.pcsid ===25) && this.changeback === false){ //发送预约消息数据 console.log('发送预约消息'); this.sendAppointmentMsg(); return; } if(this.changeback && this.sendAll === false){ console.log('发送打回修改数据'); this.sendReviewMsg(); return; } if(this.normal === true && this.changeback === false) { //单独发送消息/批量发送消息 console.log('单独或批量发送自定义消息'); this.sendCustomerMsg(); } } //发送普通消息 public sendCustomerMsg(){ const cstMsg: CustomerMsgData = { alcInfo:this.alcMsgInfoArray , Info: this.msgSend }; this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.sendSimpleMsgUrl , JSON.stringify(cstMsg)).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示','消息发送成功!'); } else if(resp.errCode === errorCodes.custom.MSG_FAILD){ if(!resp.data || resp.data === null || resp.data === 'null'){ this.showAlert('提示','消息发送失败!'); return; } let nameArray = ''; for(let i = 0 ; i < resp.data.length ; i ++){ nameArray = nameArray + resp.data[i] + '、'; } this.showAlert('提示',`${nameArray}消息发送失败!`); return; }else{ this.m.push({id:MODAL_ID, body:{hidden:true}}); this.showAlert('提示','消息发送失败!错误码:' + resp.errCode); return; } },(err) =>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('单独发送普通消息失败(sendAloneMsg)' , '单独发送普通消息失败!' +JSON.stringify(err)); return; } ); } // 发送预约消息 public sendAppointmentMsg(){ console.log(this.selectDate); if(isUndefined(this.selectDate) || this.selectDate === ''){ this.showAlert('提示' , '请选择日期!'); return; } if(isUndefined(this.startTime) || isUndefined(this.endTime)){ this.showAlert('提示','所选时间非法!'); return; } const appointmentMsg: ResnData = { StartTime:this.getUnix( new Date(this.selectDate).toLocaleDateString() , this.startTime) , EndTime:this.getUnix( new Date(this.selectDate).toLocaleDateString() , this.endTime) , Comment:this.msgSend }; const submitAppointmentMsg: PcsDataModel = { PcsId: this.pcsid , OperCode: errorCodes.custom.OPER_BOOK , AlcOpenid: this.alcoid[0] }; submitAppointmentMsg.ResnData = appointmentMsg; //显示缓冲 this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=>
'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.submitPcsDataUrl , JSON.stringify(submitAppointmentMsg)).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode !== errorCodes.custom.PCS_SUCCESS){ this.showAlert('提示','消息发送失败!错误码:' + resp.errCode); return; } this.m.push({id: SIDEMSG_ID,body:{hidden:true}}); this.showAlert('提示','消息发送成功!'); console.log(resp); },(err)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('发送预约消息失败(sendAloneMsg)' , '发送预约消息失败!' + JSON.stringify(err)); return; } ); } // 打回修改 有关应聘者流程(简历/商调函/计划生育证明)要通知应聘者。 public sendReviewMsg(){ console.log(this.alcoid); //let reviewMsg = new PcsDataModel(this.alcoid , this.msgSend); const reviewMsg: PcsDataModel = { PcsId: this.pcsid , OperCode: errorCodes.custom.OPER_NEED_TO_MODIFY , AlcOpenid: this.alcoid[0] }; reviewMsg.AnnexData = null; reviewMsg.ReviewData = new ReviewData(errorCodes.custom.OPER_NEED_TO_MODIFY); reviewMsg.Mark = this.msgSend; //提交打回修改流程数据 this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.submitPcsDataUrl ,JSON.stringify(reviewMsg)).subscribe( (resp)=>{ if(resp.errCode !== errorCodes.custom.PCS_SUCCESS){ this.showAlert('提示','打回修改失败!错误码:' + resp.errCode); console.log('打回修改失败!错误码:' + resp.errCode); return; } console.log('打回修改流程数据提交成功!' + resp); this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); //通知应聘者 this.http.getJson<any>(config.common.getApiPrefix()+ urls.api.sendSysMsg+'?alcid=' + this.alcoid[0] + '&pcsid=' + this.pcsid + '&opercode=' + errorCodes.custom.OPER_NEED_TO_MODIFY).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示','发送成功!'); }else{ this.showAlert('提示','发送通知消息失败!错误码:' + resp.errCode); return; } },(err)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('打回修改发送系统消息给应聘者失败(sendReviewMsg)' , '打回修改发送系统消息给应聘者失败!' +JSON.stringify(err)); return; } ); // 简历和商调函和计划生育打回修改,发给应聘者信息 if(this.pcsid === 3 || this.pcsid === 24 || this.pcsid === 30){ const cstMsg: CustomerMsgData = { alcInfo: this.alcMsgInfoArray , Info: this.msgSend}; this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.sendSimpleMsgUrl , JSON.stringify(cstMsg)).subscribe( (resp)=>{ if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.m.push({id:MODAL_ID, body:{hidden:true}}); this.showAlert('提示','消息发送成功!'); }else{ this.showAlert('提示','消息发送失败!错误码:' + resp.errCode ); return; } },(err) =>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('单独发送普通消息打回修改失败(sendReviewMsg)' , '单独发送普通消息打回修改失败!' +JSON.stringify(err)); return; } ); } },(err)=>{ console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('发送打回修改消息失败(sendReviewMsg)' , '发送预约消息失败!' + JSON.stringify(err)); return; }); } //将时间转化为时间戳 private getUnix(data: string, time: string): number { const timeArr = time.split(/:/); const tData = new Date(data); tData.setHours(parseInt(timeArr[0], 10)); tData.setMinutes(parseInt(timeArr[1], 10)); return tData.valueOf(); } public showAlert(title, content){ const alertMsg: AlertMsg = { title: title, content: content, confirmEvn: () => { console.log('click ok'); }, }; this.m.push({id: 'alert', body: alertMsg}); } } const SENTENCES: string[] = [ '你好,你已经通过我校面试,请准备上岗。', '抱歉,你的条件不符合我们的要求。', '对不起,您的话费不足,请充值。', 'hi,明天见。', '这个测试用例,我是想找一条非常非常长的句子,这样的话也许这个例子可以占两行!占两行真是太棒了!虽然一整条文本看起来有点丑...', '你好,你已经通过我校面试,请准备上岗。', '抱歉,你的条件不符合我们的要求。', '对不起,您的话费不足,请充值。', 'hi,明天见。' ];
{}, params:{color:
identifier_name
msg-send.ts
/** * 发送消息组件 * 使用此组件需传入pciId * 流程5,9,20,25显示预约时间组件 * 流程 3,24,30 打回修改弹窗,发送消息给应聘者 * by wzb * */ import {Component, OnInit} from '@angular/core'; import {PcsNameGroup} from '../../../shared/pcsNameGroup'; import {LoggerService} from '../../../services/logger.service'; import {MeditorService} from '../../../services/meditor.service'; import {AlertMsg} from '../../../shared/alert/alert.component'; import {HttpService} from '../../../services/http.service'; import {errorCodes} from '../../../../constant/errorCodes'; import {urls} from '../../../../constant/urls'; import {config} from '../../../../constant/config'; import {isUndefined} from 'util'; import {AlcMsgInfo, CustomerMsgData, PcsDataModel, ResnData} from '../../../services/http.interface'; import {ReviewData} from '../../../models/pcsdata.model'; import {SIDEMSG_ID} from "../../../shared/left-side/left-side.component"; import {MODAL_ID} from "../../../shared/modal-dialog/modal-dialog.component"; import {ProgressSpinner} from "../../../shared/progress-spinner/progress-spinner"; @Component({ selector: 'msg-send', templateUrl: './msg-send.html', styleUrls: ['./msg-send.css'] }) export class MsgSend implements OnInit { public selectDate: string; // 日期 public startTime: string ; // 开始时间点 public endTime: string; // 结束时间点 public pcsName: string; // 流程名称 public msgSend: string; // 存放发送的消息 public alcData: any; // 存放应聘者信息 public normal = false;//发送普通消息为真 public pcsid: number; // 获取流程id public alcoid:string[] = []; // 存放openid public sentences: string[] = []; public commonMsg:boolean; //为false不显示常用语(打回修改不显示),true显示 public changeback = false; //是否是打回修改的消息 public alcMsgInfoArray: AlcMsgInfo[] = []; //存放oid,name数组 // 是否批量操作 public sendAll = false; /** * MSG_SysMsgType: 0, // 系统消息类型 * MSG_AppointmentType: 1, // 预约消息类型 * MSG_BusinessLetterType: 2, // 商调函消息类型 * MSG_ConfirmHealthCheck: 3, // 体检确认消息类型 * MSG_MgrSimpleMsgType: 4, // 管理员普通消息类型 * */ ngOnInit(): void { if (this.alcData.length === null || this.alcData.length <= 0 ){ this.showAlert('提示', '获取应聘者信息失败,请重试!'); return; } if(this.sendAll === false){ console.log(this.alcData[0]); this.pcsid = this.alcData[0].curpcsid; this.alcoid[0] = this.alcData[0].alcoid; this.alcMsgInfoArray[0]= { alcoid:this.alcData[0].alcoid, alcname:this.alcData[0].name }; this.pcsName = PcsNameGroup.getPcsName(this.pcsid); //判断是否打回修改 if(this.changeback){ // 打回修改,完成之后通知应聘者 //打回修改不显示常用语 this.commonMsg = false; }else{ this.commonMsg = true; this.getContent(this.pcsid); } } if(this.sendAll === true){ // 批量发送的情况,不显示当前流程 for (let i = 0 ; i < this.alcData.length ; i++){ this.alcoid[i] = this.alcData[i].alcoid; this.alcMsgInfoArray[i]= { alcoid:this.alcData[i].alcoid, alcname:this.alcData[i].name }; } this.commonMsg = true; this.getContent(errorCodes.custom.MSG_MgrSimpleMsgType); } } constructor(private writeLog: LoggerService, private m: MeditorService, private http: HttpService ) {} // 设置常用语 public setContent(content: string) { this.msgSend = content; } // 获取常用语 public getContent(pcsid: number){ if (!pcsid || pcsid < 0) { this.showAlert('注意', '流程id错误,获取常用语失败!'); return; } let reqtype: number; //根据流程id获取常用语类型 switch (pcsid){ //预约 pcsid 5,9,20,25 case 5: case 9: case 20: case 25: reqtype = errorCodes.custom.MSG_AppointmentType; this.startTime = '09:00'; this.endTime = '16:00'; this.selectDate = ''; //this.normal = false; break; default: reqtype = errorCodes.custom.MSG_MgrSimpleMsgType; break; } this.http.getJson<any>(config.common.getApiPrefix() + urls.api.getcomword, {reqtype: reqtype}).subscribe((resp) => { if(resp.errCode !== errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示' , '获取常用语失败!错误码:' + resp.errCode); return; } this.sentences = resp.data; }, (err) => { console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('获取常用语出错(getContent)' , '获取常用语失败!' + JSON.stringify(err)); this.sentences = SENTENCES; return; }); } //发送通知 public onSubmit(){ if(typeof this.msgSend === 'undefined' || this.msgSend === ''){ this.showAlert('提示','发送消息不能为空!'); return; } if(this.normal === false && this.sendAll === false && (this.pcsid ===5 || this.pcsid ===9 || this.pcsid ===20 || this.pcsid ===25) && this.changeback === false){ //发送预约消息数据 console.log('发送预约消息'); this.sendAppointmentMsg(); return; } if(this.changeback && this.sendAll === false){ console.log('发送打回修改数据'); this.sendReviewMsg(); return; } if(this.normal === true && this.changeback === false) { //单独发送消息/批量发送消息 console.log('单独或批量发送自定义消息'); this.sendCustomerMsg(); } } //发送普通消息 public sendCustomerMsg(){ const cstMsg: CustomerMsgData = { alcInfo:this.alcMsgInfoArray , Info: this.msgSend }; this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.sendSimpleMsgUrl , JSON.stringify(cstMsg)).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示','消息发送成功!'); } else if(resp.errCode === errorCodes.custom.MSG_FAILD){ if(!resp.data || resp.data === null || resp.data === 'null'){ this.showAlert('提示','消息发送失败!'); return; } let nameArray = ''; for(let i = 0 ; i < resp.data.length ; i ++){ nameArray = nameArray + resp.data[i] + '、'; } this.showAlert('提示',`${nameArray}消息发送失败!`); return; }else{ this.m.push({id:MODAL_ID, body:{hidden:true}}); this.showAlert('提示','消息发送失败!错误码:' + resp.errCode); return; } },(err) =>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('单独发送普通消息失败(sendAloneMsg)' , '单独发送普通消息失败!' +JSON.stringify(err)); return; } ); } // 发送预约消息 public sendAppointmentMsg(){ console.log(this.selectDate); if(isUndefined(this.selectDate) || this.selectDate === ''){ this.showAlert('提示' , '请选择日期!'); return; } if(isUndefined(this.startTime) || isUndefined(this.endTime)){ this.showAlert('提示','所选时间非法!'); return; } const appointmentMsg: ResnData = { StartTime:this.getUnix( new Date(this.selectDate).toLocaleDateString() , this.startTime) , EndTime:this.getUnix( new Date(this.selectDate).toLocaleDateString() , this.endTime) , Comment:this.msgSend }; const submitAppointmentMsg: PcsDataModel = { PcsId: this.pcsid , OperCode: errorCodes.custom.OPER_BOOK , AlcOpenid: this.alcoid[0] }; submitAppointmentMsg.ResnData = appointmentMsg; //显示缓冲 this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.submitPcsDataUrl , JSON.stringify(submitAppointmentMsg)).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode !== errorCodes.custom.PCS_SUCCESS){ this.showAlert('提示','消息发送失败!错误码:' + resp.errCode); return; } this.m.push({id: SIDEMSG_ID,body:{hidden:true}}); this.showAlert('提示','消息发送成功!'); console.log(resp); },(err)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('发送预约消息失败(sendAloneMsg)' , '发送预约消息失败!' + JSON.stringify(err)); return; } ); } // 打回修改 有关应聘者流程(简历/商调函/计划生育证明)要通知应聘者。 public sendReviewMsg(){ console.log(this.alcoid); //let reviewMsg = new PcsDataModel(this.alcoid , this.msgSend); const reviewMsg: PcsDataModel = { PcsId: this.pcsid , OperCode: errorCodes.custom.OPER_NEED_TO_MODIFY , AlcOpenid: this.alcoid[0] }; reviewMsg.AnnexData = null; reviewMsg.ReviewData = new ReviewData(errorCodes.custom.OPER_NEED_TO_MODIFY); reviewMsg.Mark = this.msgSend; //提交打回修改流程数据 this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.submitPcsDataUrl ,JSON.stringify(reviewMsg)).subscribe( (resp)=>{ if(resp.errCode !== errorCodes.custom.PCS_SUCCESS){ this.showAlert('提示','打回修改失败!错误码:' + resp.errCode); console.log('打回修改失败!错误码:' + resp.errCode); return; } console.log('打回修改流程数据提交成功!' + resp); this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); //通知应聘者 this.http.getJson<any>(config.common.getApiPrefix()+ urls.api.sendSysMsg+'?alcid=' + this.alcoid[0] + '&pcsid=' + this.pcsid + '&opercode=' + errorCodes.custom.OPER_NEED_TO_MODIFY).subscribe( (resp)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.showAlert('提示','发送成功!'); }else{ this.showAlert('提示','发送通知消息失败!错误码:' + resp.errCode); return; } },(err)=>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('打回修改发送系统消息给应聘者失败(sendReviewMsg)' , '打回修改发送系统消息给应聘者失败!' +JSON.stringify(err)); return; } ); // 简历和商调函和计划生育打回修改,发给应聘者信息 if(this.pcsid === 3 || this.pcsid === 24 || this.pcsid === 30){ const cstMsg: CustomerMsgData = { alcInfo: this.alcMsgInfoArray , Info: this.msgSend}; this.m.push({id:MODAL_ID, body:{view:ProgressSpinner,outsideEvn:()=> {}, params:{color:'primary'}}}); this.http.postJson<any>(config.common.getApiPrefix()+ urls.api.sendSimpleMsgUrl , JSON.stringify(cstMsg)).subscribe( (resp)=>{ if(resp.errCode === errorCodes.custom.MSG_SUCCESS){ this.m.push({id:MODAL_ID, body:{hidden:true}}); this.showAlert('提示','消息发送成功!'); }else{ this.showAlert('提示','消息发送失败!错误码:' + resp.errCode ); return; } },(err) =>{ this.m.push({id:MODAL_ID, body:{hidden:true}}); console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('单独发送普通消息打回修改失败(sendReviewMsg)' , '单独发送普通消息打回修改失败!' +JSON.stringify(err)); return; } ); } },(err)=>{ console.log(err['status'], err['statusText']); this.showAlert(`${err['status']}`, `${err['statusText']}`); this.writeLog.E('发送打回修改消息失败(sendReviewMsg)' , '发送预约消息失败!' + JSON.stringify(err)); return; }); } //将时间转化为时间戳 private getUnix(data: string, time: string): number { const timeArr = time.split(/:/); const tData = new Date(data); tData.setHours(parseInt(timeArr[0], 10)); tData.setMinutes(parseInt(timeArr[1], 10)); return tData.valueOf(); } public showAlert(title, content){ const alertMsg: AlertMsg = { title: title, content: content, confirmEvn: () => { console.log('click ok'); }, }; this.m.push({id: 'alert', body: alertMsg}); } } const SENTENCES: string[] = [ '你好,你已经通过我校面试,请准备上岗。', '抱歉,你的条件不符合我们的要求。', '对不起,您的话费不足,请充值。', 'hi,明天见。', '这个测试用例,我是想找一条非常非常长的句子,这样的话也许这个例子可以占两行!占两行真是太棒了!虽然一整条文本看起来有点丑...', '你好,你已经通过我校面试,请准备上岗。', '抱歉,你的条件不符合我们的要求。', '对不起,您的话费不足,请充值。', 'hi,明天见。' ];
identifier_body
trainer.rs
use crate::models::unigram::{lattice::Lattice, model::Unigram}; use crate::tokenizer::{AddedToken, Result, Trainer}; use crate::utils::parallelism::*; use crate::utils::progress::{ProgressBar, ProgressStyle}; use log::debug; use serde::{Deserialize, Serialize}; use std::cmp::Reverse; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; // A token and a score type SentencePiece = (String, f64); // A full sentence or word + it's count within the dataset type Sentence = (String, u32); fn digamma(mut x: f64) -> f64 { let mut result = 0.0; while x < 7.0 { result -= 1.0 / x; x += 1.0; } x -= 1.0 / 2.0; let xx = 1.0 / x; let xx2 = xx * xx; let xx4 = xx2 * xx2; result += x.ln() + (1.0 / 24.0) * xx2 - 7.0 / 960.0 * xx4 + (31.0 / 8064.0) * xx4 * xx2 - (127.0 / 30720.0) * xx4 * xx4; result } #[derive(thiserror::Error, Debug)] pub enum UnigramTrainerError { #[error("The vocabulary is not large enough to contain all chars")] VocabularyTooSmall, } fn to_log_prob(pieces: &mut [SentencePiece]) { let sum: f64 = pieces.iter().map(|(_, score)| score).sum(); let logsum = sum.ln(); for (_, score) in pieces.iter_mut() { *score = score.ln() - logsum; } } /// A `UnigramTrainer` can train a `Unigram` model from `word_counts`. #[non_exhaustive] #[derive(Builder, Debug, Clone, Serialize, Deserialize)] pub struct UnigramTrainer { #[builder(default = "true")] pub show_progress: bool, #[builder(default = "8000")] pub vocab_size: u32, #[builder(default = "2")] pub n_sub_iterations: u32, #[builder(default = "0.75")] pub shrinking_factor: f64, #[builder(default = "vec![]")] pub special_tokens: Vec<AddedToken>, #[builder(default = "HashSet::new()")] pub initial_alphabet: HashSet<char>, #[builder(default = "None")] pub unk_token: Option<String>, #[builder(default = "16")] pub max_piece_length: usize, #[builder(default = "1_000_000")] seed_size: usize, #[builder(default = "HashMap::new()")] words: HashMap<String, u32>, } impl Default for UnigramTrainer { fn default() -> Self { Self::builder().build().unwrap() } } impl UnigramTrainer { pub fn builder() -> UnigramTrainerBuilder { UnigramTrainerBuilder::default() } /// Setup a progress bar if asked to show progress fn setup_progress(&self) -> Option<ProgressBar> { if self.show_progress { let p = ProgressBar::new(0); p.set_style( ProgressStyle::default_bar() .template("[{elapsed_precise}] {msg:<40!} {wide_bar} {pos:<9!}/{len:>9!}"), ); Some(p) } else { None } } fn is_valid_sentencepiece(&self, char_string: &[char]) -> bool { // Checks string length // Space not in the substring, numbers, hiragana and more should be taken // care of within pre_tokenizers. // https://github.com/google/sentencepiece/blob/26be9516cd81d5315ee31c48d2438018e0eab879/src/trainer_interface.cc#L203 let n = char_string.len(); if char_string.is_empty() || n > self.max_piece_length { return false; } true } fn finalize(&self, model: Unigram, required_chars: HashSet<String>) -> Result<Unigram> { let mut min_score_penalty = 0.0; let min_score_penalty_delta = 0.0001; let mut pieces: Vec<(String, f64)> = vec![]; let mut inserted: HashSet<String> = HashSet::new(); // We don't want to include the <UNK> that was used to train inserted.insert("<UNK>".into()); let existing_pieces: HashMap<String, f64> = model.iter().cloned().collect(); for c in required_chars { if let Some(t) = existing_pieces.get(&c) { inserted.insert(c.clone()); pieces.push((c, *t)); } else { let score = model.min_score + min_score_penalty; inserted.insert(c.clone()); pieces.push((c, score)); min_score_penalty += min_score_penalty_delta; } } let (unk_id, need_add_unk) = if let Some(ref unk) = self.unk_token { let unk_id = self.special_tokens.iter().enumerate().find_map(|(i, t)| { if t.content == *unk { Some(i) } else { None } }); match unk_id { Some(id) => (Some(id), false), None => (Some(0), true), } } else { (None, false) }; let vocab_size_without_special_tokens = if need_add_unk { self.vocab_size as usize - self.special_tokens.len() - 1 } else { self.vocab_size as usize - self.special_tokens.len() }; for (token, score) in model.iter() { if inserted.contains::<str>(token) { continue; } inserted.insert(token.to_string()); pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score })); if pieces.len() == vocab_size_without_special_tokens { break; } } pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); // Insert the necessary tokens let mut special_tokens = self .special_tokens .iter() .map(|t| (t.content.clone(), 0.0)) .collect::<Vec<_>>(); if need_add_unk { special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0)); } Unigram::from( special_tokens.into_iter().chain(pieces).collect(), unk_id, model.byte_fallback(), ) } fn required_chars(&self, word_counts: &[Sentence]) -> HashSet<String> { word_counts .iter() .flat_map(|(s, _count)| s.chars()) .chain(self.initial_alphabet.iter().copied()) .map(|c| c.to_string()) .collect() } fn make_seed_sentence_pieces( &self, sentences: &[Sentence], _progress: &Option<ProgressBar>, ) -> Vec<SentencePiece> { // Put all sentences in a string, separated by \0 let total: usize = sentences .iter() .map(|(s, _)| s.chars().count()) .sum::<usize>() + sentences.len(); let mut flat_string = String::with_capacity(total); let mut all_chars: HashMap<char, u32> = HashMap::new(); let c_sentence_boundary = '\0'; let k_sentence_boundary = '\0'.to_string(); for (string, n) in sentences { if string.is_empty() { continue; } flat_string.push_str(string); // XXX // Comment suggests we add sentence boundary, but it seems to be missing from actual // code in spm. flat_string.push_str(&k_sentence_boundary); for c in string.chars() { if c != c_sentence_boundary { *all_chars.entry(c).or_insert(0) += n; } } } flat_string.shrink_to_fit(); #[cfg(feature = "esaxx_fast")] let suffix = esaxx_rs::suffix(&flat_string).unwrap(); #[cfg(not(feature = "esaxx_fast"))] let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap(); // Basic chars need to be in sentence pieces. let mut seed_sentencepieces: Vec<SentencePiece> = vec![]; let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect(); // Reversed order sall_chars.sort_by_key(|&a| Reverse(a)); let mut substr_index: Vec<_> = suffix .iter() .filter_map(|(string, freq)| { if string.len() <= 1 { return None; } if string.contains(&c_sentence_boundary) { return None; } if !self.is_valid_sentencepiece(string) { return None; } let score = freq * string.len() as u32; // if let Some(p) = &progress { // p.inc(1); // } Some((score, string)) }) .collect(); // Fill seed_sentencepieces for (count, character) in sall_chars { seed_sentencepieces.push((character.to_string(), count.into())); } // sort by decreasing score substr_index.sort_by_key(|&a| Reverse(a)); for (score, char_string) in substr_index { // Just in case assert!(self.is_valid_sentencepiece(char_string)); let string: String = char_string.iter().collect(); seed_sentencepieces.push((string, score.into())); if seed_sentencepieces.len() >= self.seed_size { break; } } to_log_prob(&mut seed_sentencepieces); seed_sentencepieces } fn prune_sentence_pieces( &self, model: &Unigram, pieces: &[SentencePiece], sentences: &[Sentence], ) -> Vec<SentencePiece> { let mut always_keep = vec![true; pieces.len()]; let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; let bos_id = pieces.len() + 1; let eos_id = pieces.len() + 2; // First, segments the current sentencepieces to know // how each sentencepiece is resegmented if this sentencepiece is removed // from the vocabulary. // To do so, we take the second best segmentation of sentencepiece[i]. // alternatives[i] stores the sequence of second best sentencepieces. for (id, (token, _score)) in pieces.iter().enumerate() { // Always keep unk. if id == 0 { always_keep[id] = false; continue; } let mut lattice = Lattice::from(token, bos_id, eos_id); model.populate_nodes(&mut lattice); let nbests = lattice.nbest(2); if nbests.len() == 1 { always_keep[id] = true; } else if nbests[0].len() >= 2 { always_keep[id] = false; } else if nbests[0].len() == 1 { always_keep[id] = true; for node in &nbests[1] { let alt_id = node.borrow().id; alternatives[id].push(alt_id); } } } // Second, segments all sentences to compute likelihood // with a unigram language model. inverted[i] stores // the set of sentence index where the sentencepieces[i] appears. let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect(); let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences .maybe_par_chunks(chunk_size) .map(|enumerated_sentence_count_chunk| { let mut vsum = 0.0; let mut freq: Vec<f64> = vec![0.0; pieces.len()]; let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; for (i, (sentence, count)) in enumerated_sentence_count_chunk { let mut lattice = Lattice::from(sentence, bos_id, eos_id); model.populate_nodes(&mut lattice); vsum += *count as f64; for node_ref in lattice.viterbi() { let id = node_ref.borrow().id; freq[id] += *count as f64; inverted[id].push(*i); } } (vsum, freq, inverted) }) .reduce( || (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]), |(vsum, freq, inverted), (lvsum, lfreq, linverted)| { ( vsum + lvsum, freq.iter() .zip(lfreq) .map(|(global_el, local_el)| global_el + local_el) .collect(), inverted .iter() .zip(linverted) .map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat()) .collect(), ) }, ); let (vsum, freq, inverted) = collected; let sum: f64 = freq.iter().sum(); let logsum = sum.ln(); let mut candidates: Vec<(usize, f64)> = vec![]; let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize); new_pieces.push(pieces[0].clone()); // Finally, computes how likely the LM likelihood is reduced if // the sentencepiece[i] is removed from the vocabulary. // Since the exact computation of loss is difficult, we compute the // loss approximately by assuming that all sentencepiece[i] in the sentences // are replaced with alternatives[i] when sentencepiece[i] is removed. for (id, (token, score)) in pieces.iter().enumerate() { if id == 0 { continue; } if freq[id] == 0.0 && !always_keep[id] { // not found in Viterbi path. Can remove this entry safely. continue; } else if alternatives[id].is_empty() { // no alternatives. Keeps this entry. new_pieces.push((token.to_string(), *score)); } else { let mut f = 0.0; // the frequency of pieces[i]; for n in &inverted[id] { let score = sentences[*n].1 as f64; f += score; } // TODO: Temporary hack to avoid Nans. if f == 0.0 || f.is_nan() { // new_pieces.push((token.to_string(), *score)); continue; } f /= vsum; // normalizes by all sentence frequency. let logprob_sp = freq[id].ln() - logsum; // After removing the sentencepiece[i], its frequency freq[i] is // re-assigned to alternatives. // new_sum = current_sum - freq[i] + freq[i] * alternatives.size() // = current_sum + freq[i] (alternatives - 1) let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln(); // The frequencies of altenatives are increased by freq[i]. let mut logprob_alt = 0.0; for n in &alternatives[id] { logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt; } // loss: the diff of likelihood after removing the sentencepieces[i]. let loss = f * (logprob_sp - logprob_alt); if loss.is_nan() { panic!(""); } candidates.push((id, loss)); } } let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize; let pruned_size = desired_vocab_size.max(pruned_size); candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); for (id, _score) in candidates { if new_pieces.len() == pruned_size { break; } new_pieces.push(pieces[id].clone()); } new_pieces.to_vec() } /// Update the progress bar with the new provided length and message fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) { if let Some(p) = p { p.set_message(message); p.set_length(len as u64); p.set_draw_delta(len as u64 / 100); p.reset(); } } /// Set the progress bar in the finish state fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) { if let Some(p) = p { p.set_length(final_len as u64); p.finish(); println!(); } } fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) { let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum(); let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let collected: (f64, u32, Vec<f64>) = sentences .maybe_par_chunks(chunk_size) .map(|sentences_chunk| { let mut expected: Vec<f64> = vec![0.0; model.len()]; let mut objs: f64 = 0.0; let mut ntokens: u32 = 0; for (string, freq) in sentences_chunk { let mut lattice = Lattice::from(string, model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected); if z.is_nan() { panic!("likelihood is NAN. Input sentence may be too long."); } ntokens += lattice.viterbi().len() as u32; objs -= z / (all_sentence_freq as f64); } (objs, ntokens, expected) }) .reduce( || (0.0, 0, vec![0.0; model.len()]), |(objs, ntokens, expected), (lobjs, lntokens, lexpected)| { ( objs + lobjs, ntokens + lntokens, expected .iter() .zip(lexpected) .map(|(global_el, local_el)| global_el + local_el) .collect(), ) }, ); collected } fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> { if pieces.len() != expected.len() { panic!( "Those two iterators are supposed to be the same length ({} vs {})", pieces.len(), expected.len() ); } let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); let mut sum = 0.0; let expected_frequency_threshold = 0.5; for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() { // Always keep unk. if i == 0 { new_pieces.push((piece.clone(), f64::NAN)); continue; } if *freq < expected_frequency_threshold { continue; } new_pieces.push((piece.clone(), *freq)); sum += freq; } // // Here we do not use the original EM, but use the // // Bayesianified/DPified EM algorithm. // // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf // // This modification will act as a sparse prior. let logsum = digamma(sum); let new_pieces: Vec<_> = new_pieces .into_iter() .map(|(s, c)| (s, digamma(c) - logsum)) .collect(); new_pieces } pub fn do_train( &self, sentences: Vec<Sentence>, model: &mut Unigram, ) -> Result<Vec<AddedToken>> { let progress = self.setup_progress(); // // 1. Compute frequent substrings // TODO Should be able to upgrade to u64 when needed self.update_progress(&progress, sentences.len(), "Suffix array seeds"); let mut pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); // We use a UNK token when training, whatever the `self.unk_token` pieces.push(("<UNK>".into(), f64::NAN)); pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress)); self.finalize_progress(&progress, sentences.len()); // Useful to check compatibility with spm. debug!( "Using {} pieces on {} sentences for EM training", pieces.len(), sentences.len() ); let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 // 2. Run E-M Loops to fine grain the pieces. // We will shrink the vocab by shrinking_factor every loop on average // Some other pieces are dropped if logprob is too small // V = N * (f)**k // k = log(V / N) / log(f) let expected_loops = (((desired_vocab_size as f64).ln() - (pieces.len() as f64).ln()) / self.shrinking_factor.ln()) as usize + 1; let expected_updates = expected_loops * self.n_sub_iterations as usize; self.update_progress(&progress, expected_updates, "EM training"); let required_chars = self.required_chars(&sentences); if required_chars.len() as u32 > self.vocab_size { return Err(Box::new(UnigramTrainerError::VocabularyTooSmall)); } let mut new_model = Unigram::from(pieces.clone(), Some(0), false)?; loop { // Sub-EM iteration. for _iter in 0..self.n_sub_iterations { // Executes E step let (_objective, _num_tokens, expected) = self.run_e_step(&new_model, &sentences); // Executes M step. pieces = self.run_m_step(&pieces, &expected); new_model = Unigram::from(pieces.clone(), Some(0), false)?; // Useful comment for checking compatibility with spm debug!( "Em iter={} size={} obj={} num_tokens={} num_tokens/piece={}", _iter, new_model.len(), _objective, _num_tokens, _num_tokens as f64 / model.len() as f64 ); if let Some(p) = &progress { p.inc(1); } } // end of Sub EM iteration // Stops the iteration when the size of sentences reaches to the // desired symbol size. if pieces.len() <= desired_vocab_size { break; } // Prunes pieces. pieces = self.prune_sentence_pieces(&new_model, &pieces, &sentences); new_model = Unigram::from(pieces.clone(), Some(0), false)?; } self.finalize_progress(&progress, expected_updates); // Finally, adjusts the size of sentencepices to be |vocab_size|. *model = self.finalize(new_model, required_chars)?; Ok(self.special_tokens.clone()) } } impl Trainer for UnigramTrainer { type Model = Unigram; /// Train a Unigram model fn train(&self, model: &mut Unigram) -> Result<Vec<AddedToken>> { let sentences: Vec<_> = self.words.iter().map(|(s, i)| (s.to_owned(), *i)).collect(); self.do_train(sentences, model) } /// Whether we should show progress fn should_show_progress(&self) -> bool { self.show_progress } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { let words: Result<HashMap<String, u32>> = iterator .maybe_par_bridge() .map(|sequence| { let words = process(sequence.as_ref())?; let mut map = HashMap::new(); for word in words { map.entry(word).and_modify(|c| *c += 1).or_insert(1); } Ok(map) }) .reduce( || Ok(HashMap::new()), |acc, ws| { let mut acc = acc?; for (k, v) in ws? { acc.entry(k).and_modify(|c| *c += v).or_insert(v); }
Ok(acc) }, ); self.words = words?; Ok(()) } } #[cfg(test)] mod tests { use super::*; use assert_approx_eq::assert_approx_eq; use std::iter::FromIterator; #[test] fn test_unigram_chars() { let trainer = UnigramTrainerBuilder::default() .show_progress(false) .build() .unwrap(); let sentences = vec![ ("This is a".to_string(), 1), ("こんにちは友達".to_string(), 1), ]; let required_chars = trainer.required_chars(&sentences); assert_eq!(required_chars.len(), 13); let progress = None; let table = trainer.make_seed_sentence_pieces(&sentences, &progress); let target_strings = vec![ "s", "i", " ", "達", "友", "ん", "は", "に", "ち", "こ", "h", "a", "T", "is ", "s ", ]; let strings: Vec<_> = table.iter().map(|(string, _)| string).collect(); assert_eq!(strings, target_strings); let scores = table.iter().map(|(_, score)| score); let target_scores = vec![ -2.5649493574615367, // 2.0 -2.5649493574615367, // 2.0 -2.5649493574615367, // 2.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -1.4663370687934272, // 6.0 -1.8718021769015916, // 4.0 ]; for (score, target_score) in scores.zip(target_scores) { assert_approx_eq!(*score, target_score, 0.01); } } #[test] fn test_initial_alphabet() { let trainer = UnigramTrainerBuilder::default() .show_progress(false) .initial_alphabet(HashSet::from_iter(vec!['a', 'b', 'c', 'd', 'e', 'f'])) .build() .unwrap(); let sentences = vec![("こんにちは友達".to_string(), 1)]; let required_chars = trainer.required_chars(&sentences); assert_eq!( required_chars, vec!["こ", "ん", "に", "ち", "は", "友", "達", "a", "b", "c", "d", "e", "f"] .into_iter() .map(|s| s.to_owned()) .collect::<HashSet<_>>() ); } #[test] fn test_unk_token() { // 1. Should add `unk_token` as first special token let trainer = UnigramTrainerBuilder::default() .show_progress(false) .special_tokens(vec![ AddedToken::from("[SEP]", true), AddedToken::from("[CLS]", true), ]) .unk_token(Some("[UNK]".into())) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0))); // 2. Let it where it is let trainer = UnigramTrainerBuilder::default() .show_progress(false) .special_tokens(vec![ AddedToken::from("[SEP]", true), AddedToken::from("[CLS]", true), AddedToken::from("[UNK]", true), ]) .unk_token(Some("[UNK]".into())) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0))); // 3. Don't put it there if not needed let trainer = UnigramTrainerBuilder::default() .show_progress(false) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next().unwrap().0, "e".to_string()); } #[test] fn test_special_tokens() { let trainer = UnigramTrainerBuilder::default() .show_progress(false) .special_tokens(vec![ AddedToken::from("[SEP]", true), AddedToken::from("[CLS]", true), ]) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0))); } #[test] fn test_to_log_prob() { let mut a = vec![("".to_string(), 1.0), ("".to_string(), 2.0)]; to_log_prob(&mut a); let scores = a.iter().map(|(_, score)| *score).collect::<Vec<_>>(); // ln(1) - ln(3) assert_approx_eq!(scores[0], -1.098, 0.01); // ln(2) - ln(3) assert_approx_eq!(scores[1], -0.405, 0.01); } }
random_line_split