file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
http.rs | VmType, WithVM, IO};
use vm::gc::{Gc, Traverseable};
use gluon::import::add_extern_module;
use vm::internal::Value;
use gluon::{new_vm, Compiler};
// `Handler` is a type defined in http.glu but since we need to refer to it in the signature of
// listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust
struct Handler<T>(PhantomData<T>);
impl<T: VmType + 'static> VmType for Handler<T> {
type Type = Self;
fn make_type(vm: &Thread) -> ArcType {
let typ = (*vm.global_env()
.get_env()
.find_type_info("examples.http_types.Handler")
.unwrap())
.clone()
.into_type();
Type::app(typ, collect![T::make_type(vm)])
}
}
// Rust does not let us define traits on types defined in a different crate such as `hyper`. We can
// however work around this by defining a wrapper type which we are then able to define the traits
// on.
struct Wrap<T>(T);
macro_rules! define_vmtype {
($name: ident) => {
impl VmType for Wrap<$name> {
type Type = $name;
fn make_type(vm: &Thread) -> ArcType {
let typ = concat!("examples.http_types.", stringify!($name));
(*vm.global_env().get_env().find_type_info(typ).unwrap())
.clone()
.into_type()
}
}
}
}
define_vmtype! { Method }
impl<'vm> Pushable<'vm> for Wrap<Method> {
fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> {
use hyper::Method::*;
context.stack.push(Value::tag(match self.0 {
Get => 0,
Post => 1,
Delete => 2,
_ => {
return Err(VmError::Message(format!(
"Method `{:?}` does not exist in gluon",
self.0
)).into())
}
}));
Ok(())
}
}
define_vmtype! { StatusCode }
impl<'vm> Getable<'vm> for Wrap<StatusCode> {
fn from_value(_: &'vm Thread, value: Variants) -> Self {
use hyper::StatusCode::*;
match value.as_ref() {
ValueRef::Data(data) => Wrap(match data.tag() {
0 => Ok,
1 => NotFound,
2 => InternalServerError,
_ => panic!("Unexpected tag"),
}),
_ => panic!(),
}
}
}
// Representation of a http body that is in the prograss of being read
pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>);
// By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon
// threads
impl Userdata for Body {}
// Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Body")
}
}
// `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the
// value for garbage collected references. Normally objects do not contain any references so this
// can be empty
impl Traverseable for Body {
fn traverse(&self, _: &mut Gc) {}
}
// `VmType` is the last trait required for a type to implement `Userdata` and defines the type used
// in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough
// as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine
// which should have been registered earlier with `Thread::register_type`
impl VmType for Body {
type Type = Self;
}
// Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation
// into `&Body` argument
fn read_chunk(
body: &Body,
) -> FutureResult<
Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send + 'static>,
> {
use futures::future::poll_fn;
let body = body.0.clone();
// `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is | stream.poll().map(|async| async.map(IO::Value))
})))
}
// A http body that is being written
pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>);
impl fmt::Debug for ResponseBody {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Response")
}
}
impl Userdata for ResponseBody {}
impl Traverseable for ResponseBody {
fn traverse(&self, _: &mut Gc) {}
}
impl VmType for ResponseBody {
type Type = Self;
}
fn write_response(
response: &ResponseBody,
bytes: &[u8],
) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send + 'static>> {
use futures::future::poll_fn;
use futures::AsyncSink;
// Turn `bytes´ into a `Chunk` which can be sent to the http body
let mut unsent_chunk = Some(Ok(bytes.to_owned().into()));
let response = response.0.clone();
FutureResult(Box::new(poll_fn(move || {
info!("Starting response send");
let mut sender = response.lock().unwrap();
let sender = sender
.as_mut()
.expect("Sender has been dropped while still in use");
if let Some(chunk) = unsent_chunk.take() {
match sender.start_send(chunk) {
Ok(AsyncSink::NotReady(chunk)) => {
unsent_chunk = Some(chunk);
return Ok(Async::NotReady);
}
Ok(AsyncSink::Ready) => (),
Err(_) => {
info!("Could not send http response");
return Ok(Async::Ready(IO::Value(())));
}
}
}
match sender.poll_complete() {
Ok(async) => Ok(async.map(IO::Value)),
Err(_) => {
info!("Could not send http response");
Ok(Async::Ready(IO::Value(())))
}
}
})))
}
// Next we define some record types which are marshalled to and from gluon. These have equivalent
// definitions in http_types.glu
field_decl! { method, uri, status, body, request, response }
type Request = record_type!{
method => Wrap<Method>,
uri => String,
body => Body
};
type Response = record_type!{
status => Wrap<StatusCode>
};
type HttpState = record_type!{
request => Request,
response => ResponseBody
};
fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> {
let WithVM {
value: handler,
vm: thread,
} = value;
use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse};
// Retrieve the `handle` function from the http module which we use to evaluate values of type
// `Handler Response`
type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>;
let handle: Function<RootedThread, ListenFn> = thread
.get_global("examples.http.handle")
.unwrap_or_else(|err| panic!("{}", err));
struct Listen {
handle: Function<RootedThread, ListenFn>,
handler: OpaqueValue<RootedThread, Handler<Response>>,
}
impl Service for Listen {
type Request = HyperRequest;
type Response = HyperResponse;
type Error = hyper::Error;
type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send + 'static>;
fn call(&self, request: HyperRequest) -> Self::Future {
let gluon_request = record_no_decl! {
// Here we use to `Wrap` type to make `hyper::Request` into a type that can be
// pushed to gluon
method => Wrap(request.method().clone()),
uri => request.uri().to_string(),
// Since `Body` implements `Userdata` it can be directly pushed to gluon
body => Body(Arc::new(Mutex::new(Box::new(request.body()
.map_err(|err| VmError::Message(format!("{}", err)))
// `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is
// marshalled to `Array Byte` in gluon
.map(PushAsRef::<_, [u8]>::new)))))
};
let (response_sender, response_body) = hyper::Body::pair();
let response_sender = Arc::new(Mutex | // polled until completion. After `poll` returns `Ready` the value is then returned to the
// gluon function which called `read_chunk`
FutureResult(Box::new(poll_fn(move || {
let mut stream = body.lock().unwrap(); | random_line_split |
http.rs | mType, WithVM, IO};
use vm::gc::{Gc, Traverseable};
use gluon::import::add_extern_module;
use vm::internal::Value;
use gluon::{new_vm, Compiler};
// `Handler` is a type defined in http.glu but since we need to refer to it in the signature of
// listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust
struct Handler<T>(PhantomData<T>);
impl<T: VmType + 'static> VmType for Handler<T> {
type Type = Self;
fn make_type(vm: &Thread) -> ArcType {
let typ = (*vm.global_env()
.get_env()
.find_type_info("examples.http_types.Handler")
.unwrap())
.clone()
.into_type();
Type::app(typ, collect![T::make_type(vm)])
}
}
// Rust does not let us define traits on types defined in a different crate such as `hyper`. We can
// however work around this by defining a wrapper type which we are then able to define the traits
// on.
struct Wrap<T>(T);
macro_rules! define_vmtype {
($name: ident) => {
impl VmType for Wrap<$name> {
type Type = $name;
fn make_type(vm: &Thread) -> ArcType {
let typ = concat!("examples.http_types.", stringify!($name));
(*vm.global_env().get_env().find_type_info(typ).unwrap())
.clone()
.into_type()
}
}
}
}
define_vmtype! { Method }
impl<'vm> Pushable<'vm> for Wrap<Method> {
fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> |
}
define_vmtype! { StatusCode }
impl<'vm> Getable<'vm> for Wrap<StatusCode> {
fn from_value(_: &'vm Thread, value: Variants) -> Self {
use hyper::StatusCode::*;
match value.as_ref() {
ValueRef::Data(data) => Wrap(match data.tag() {
0 => Ok,
1 => NotFound,
2 => InternalServerError,
_ => panic!("Unexpected tag"),
}),
_ => panic!(),
}
}
}
// Representation of a http body that is in the prograss of being read
pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>);
// By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon
// threads
impl Userdata for Body {}
// Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Body")
}
}
// `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the
// value for garbage collected references. Normally objects do not contain any references so this
// can be empty
impl Traverseable for Body {
fn traverse(&self, _: &mut Gc) {}
}
// `VmType` is the last trait required for a type to implement `Userdata` and defines the type used
// in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough
// as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine
// which should have been registered earlier with `Thread::register_type`
impl VmType for Body {
type Type = Self;
}
// Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation
// into `&Body` argument
fn read_chunk(
body: &Body,
) -> FutureResult<
Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send + 'static>,
> {
use futures::future::poll_fn;
let body = body.0.clone();
// `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is
// polled until completion. After `poll` returns `Ready` the value is then returned to the
// gluon function which called `read_chunk`
FutureResult(Box::new(poll_fn(move || {
let mut stream = body.lock().unwrap();
stream.poll().map(|async| async.map(IO::Value))
})))
}
// A http body that is being written
pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>);
impl fmt::Debug for ResponseBody {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Response")
}
}
impl Userdata for ResponseBody {}
impl Traverseable for ResponseBody {
fn traverse(&self, _: &mut Gc) {}
}
impl VmType for ResponseBody {
type Type = Self;
}
fn write_response(
response: &ResponseBody,
bytes: &[u8],
) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send + 'static>> {
use futures::future::poll_fn;
use futures::AsyncSink;
// Turn `bytes´ into a `Chunk` which can be sent to the http body
let mut unsent_chunk = Some(Ok(bytes.to_owned().into()));
let response = response.0.clone();
FutureResult(Box::new(poll_fn(move || {
info!("Starting response send");
let mut sender = response.lock().unwrap();
let sender = sender
.as_mut()
.expect("Sender has been dropped while still in use");
if let Some(chunk) = unsent_chunk.take() {
match sender.start_send(chunk) {
Ok(AsyncSink::NotReady(chunk)) => {
unsent_chunk = Some(chunk);
return Ok(Async::NotReady);
}
Ok(AsyncSink::Ready) => (),
Err(_) => {
info!("Could not send http response");
return Ok(Async::Ready(IO::Value(())));
}
}
}
match sender.poll_complete() {
Ok(async) => Ok(async.map(IO::Value)),
Err(_) => {
info!("Could not send http response");
Ok(Async::Ready(IO::Value(())))
}
}
})))
}
// Next we define some record types which are marshalled to and from gluon. These have equivalent
// definitions in http_types.glu
field_decl! { method, uri, status, body, request, response }
type Request = record_type!{
method => Wrap<Method>,
uri => String,
body => Body
};
type Response = record_type!{
status => Wrap<StatusCode>
};
type HttpState = record_type!{
request => Request,
response => ResponseBody
};
fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> {
let WithVM {
value: handler,
vm: thread,
} = value;
use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse};
// Retrieve the `handle` function from the http module which we use to evaluate values of type
// `Handler Response`
type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>;
let handle: Function<RootedThread, ListenFn> = thread
.get_global("examples.http.handle")
.unwrap_or_else(|err| panic!("{}", err));
struct Listen {
handle: Function<RootedThread, ListenFn>,
handler: OpaqueValue<RootedThread, Handler<Response>>,
}
impl Service for Listen {
type Request = HyperRequest;
type Response = HyperResponse;
type Error = hyper::Error;
type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send + 'static>;
fn call(&self, request: HyperRequest) -> Self::Future {
let gluon_request = record_no_decl! {
// Here we use to `Wrap` type to make `hyper::Request` into a type that can be
// pushed to gluon
method => Wrap(request.method().clone()),
uri => request.uri().to_string(),
// Since `Body` implements `Userdata` it can be directly pushed to gluon
body => Body(Arc::new(Mutex::new(Box::new(request.body()
.map_err(|err| VmError::Message(format!("{}", err)))
// `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is
// marshalled to `Array Byte` in gluon
.map(PushAsRef::<_, [u8]>::new)))))
};
let (response_sender, response_body) = hyper::Body::pair();
let response_sender = Arc::new | {
use hyper::Method::*;
context.stack.push(Value::tag(match self.0 {
Get => 0,
Post => 1,
Delete => 2,
_ => {
return Err(VmError::Message(format!(
"Method `{:?}` does not exist in gluon",
self.0
)).into())
}
}));
Ok(())
} | identifier_body |
http.rs | make_type(vm)])
}
}
// Rust does not let us define traits on types defined in a different crate such as `hyper`. We can
// however work around this by defining a wrapper type which we are then able to define the traits
// on.
struct Wrap<T>(T);
macro_rules! define_vmtype {
($name: ident) => {
impl VmType for Wrap<$name> {
type Type = $name;
fn make_type(vm: &Thread) -> ArcType {
let typ = concat!("examples.http_types.", stringify!($name));
(*vm.global_env().get_env().find_type_info(typ).unwrap())
.clone()
.into_type()
}
}
}
}
define_vmtype! { Method }
impl<'vm> Pushable<'vm> for Wrap<Method> {
fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> {
use hyper::Method::*;
context.stack.push(Value::tag(match self.0 {
Get => 0,
Post => 1,
Delete => 2,
_ => {
return Err(VmError::Message(format!(
"Method `{:?}` does not exist in gluon",
self.0
)).into())
}
}));
Ok(())
}
}
define_vmtype! { StatusCode }
impl<'vm> Getable<'vm> for Wrap<StatusCode> {
fn from_value(_: &'vm Thread, value: Variants) -> Self {
use hyper::StatusCode::*;
match value.as_ref() {
ValueRef::Data(data) => Wrap(match data.tag() {
0 => Ok,
1 => NotFound,
2 => InternalServerError,
_ => panic!("Unexpected tag"),
}),
_ => panic!(),
}
}
}
// Representation of a http body that is in the prograss of being read
pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>);
// By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon
// threads
impl Userdata for Body {}
// Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Body")
}
}
// `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the
// value for garbage collected references. Normally objects do not contain any references so this
// can be empty
impl Traverseable for Body {
fn traverse(&self, _: &mut Gc) {}
}
// `VmType` is the last trait required for a type to implement `Userdata` and defines the type used
// in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough
// as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine
// which should have been registered earlier with `Thread::register_type`
impl VmType for Body {
type Type = Self;
}
// Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation
// into `&Body` argument
fn read_chunk(
body: &Body,
) -> FutureResult<
Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send + 'static>,
> {
use futures::future::poll_fn;
let body = body.0.clone();
// `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is
// polled until completion. After `poll` returns `Ready` the value is then returned to the
// gluon function which called `read_chunk`
FutureResult(Box::new(poll_fn(move || {
let mut stream = body.lock().unwrap();
stream.poll().map(|async| async.map(IO::Value))
})))
}
// A http body that is being written
pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>);
impl fmt::Debug for ResponseBody {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Response")
}
}
impl Userdata for ResponseBody {}
impl Traverseable for ResponseBody {
fn traverse(&self, _: &mut Gc) {}
}
impl VmType for ResponseBody {
type Type = Self;
}
fn write_response(
response: &ResponseBody,
bytes: &[u8],
) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send + 'static>> {
use futures::future::poll_fn;
use futures::AsyncSink;
// Turn `bytes´ into a `Chunk` which can be sent to the http body
let mut unsent_chunk = Some(Ok(bytes.to_owned().into()));
let response = response.0.clone();
FutureResult(Box::new(poll_fn(move || {
info!("Starting response send");
let mut sender = response.lock().unwrap();
let sender = sender
.as_mut()
.expect("Sender has been dropped while still in use");
if let Some(chunk) = unsent_chunk.take() {
match sender.start_send(chunk) {
Ok(AsyncSink::NotReady(chunk)) => {
unsent_chunk = Some(chunk);
return Ok(Async::NotReady);
}
Ok(AsyncSink::Ready) => (),
Err(_) => {
info!("Could not send http response");
return Ok(Async::Ready(IO::Value(())));
}
}
}
match sender.poll_complete() {
Ok(async) => Ok(async.map(IO::Value)),
Err(_) => {
info!("Could not send http response");
Ok(Async::Ready(IO::Value(())))
}
}
})))
}
// Next we define some record types which are marshalled to and from gluon. These have equivalent
// definitions in http_types.glu
field_decl! { method, uri, status, body, request, response }
type Request = record_type!{
method => Wrap<Method>,
uri => String,
body => Body
};
type Response = record_type!{
status => Wrap<StatusCode>
};
type HttpState = record_type!{
request => Request,
response => ResponseBody
};
fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> {
let WithVM {
value: handler,
vm: thread,
} = value;
use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse};
// Retrieve the `handle` function from the http module which we use to evaluate values of type
// `Handler Response`
type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>;
let handle: Function<RootedThread, ListenFn> = thread
.get_global("examples.http.handle")
.unwrap_or_else(|err| panic!("{}", err));
struct Listen {
handle: Function<RootedThread, ListenFn>,
handler: OpaqueValue<RootedThread, Handler<Response>>,
}
impl Service for Listen {
type Request = HyperRequest;
type Response = HyperResponse;
type Error = hyper::Error;
type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send + 'static>;
fn call(&self, request: HyperRequest) -> Self::Future {
let gluon_request = record_no_decl! {
// Here we use to `Wrap` type to make `hyper::Request` into a type that can be
// pushed to gluon
method => Wrap(request.method().clone()),
uri => request.uri().to_string(),
// Since `Body` implements `Userdata` it can be directly pushed to gluon
body => Body(Arc::new(Mutex::new(Box::new(request.body()
.map_err(|err| VmError::Message(format!("{}", err)))
// `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is
// marshalled to `Array Byte` in gluon
.map(PushAsRef::<_, [u8]>::new)))))
};
let (response_sender, response_body) = hyper::Body::pair();
let response_sender = Arc::new(Mutex::new(Some(response_sender)));
let http_state = record_no_decl!{
request => gluon_request,
response => ResponseBody(response_sender.clone())
};
Box::new(
self.handle
.clone()
.call_async(self.handler.clone(), http_state)
.then(move |result| match result {
Ok(value) => {
match value {
IO::Value(record_p!{ status }) => {
// Drop the sender to so that it the receiver stops waiting for
// more chunks
*response_sender.lock().unwrap() = None;
Ok(
HyperResponse::new()
.with_status(status.0)
.with_body(response_body),
)
}
IO::Exception(err) => { |
let _ = stderr().write(err.as_bytes());
Ok(
HyperResponse::new()
.with_status(StatusCode::InternalServerError),
)
}
| conditional_block | |
http.rs | mType, WithVM, IO};
use vm::gc::{Gc, Traverseable};
use gluon::import::add_extern_module;
use vm::internal::Value;
use gluon::{new_vm, Compiler};
// `Handler` is a type defined in http.glu but since we need to refer to it in the signature of
// listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust
struct Handler<T>(PhantomData<T>);
impl<T: VmType + 'static> VmType for Handler<T> {
type Type = Self;
fn make_type(vm: &Thread) -> ArcType {
let typ = (*vm.global_env()
.get_env()
.find_type_info("examples.http_types.Handler")
.unwrap())
.clone()
.into_type();
Type::app(typ, collect![T::make_type(vm)])
}
}
// Rust does not let us define traits on types defined in a different crate such as `hyper`. We can
// however work around this by defining a wrapper type which we are then able to define the traits
// on.
struct Wrap<T>(T);
macro_rules! define_vmtype {
($name: ident) => {
impl VmType for Wrap<$name> {
type Type = $name;
fn make_type(vm: &Thread) -> ArcType {
let typ = concat!("examples.http_types.", stringify!($name));
(*vm.global_env().get_env().find_type_info(typ).unwrap())
.clone()
.into_type()
}
}
}
}
define_vmtype! { Method }
impl<'vm> Pushable<'vm> for Wrap<Method> {
fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> {
use hyper::Method::*;
context.stack.push(Value::tag(match self.0 {
Get => 0,
Post => 1,
Delete => 2,
_ => {
return Err(VmError::Message(format!(
"Method `{:?}` does not exist in gluon",
self.0
)).into())
}
}));
Ok(())
}
}
define_vmtype! { StatusCode }
impl<'vm> Getable<'vm> for Wrap<StatusCode> {
fn from_value(_: &'vm Thread, value: Variants) -> Self {
use hyper::StatusCode::*;
match value.as_ref() {
ValueRef::Data(data) => Wrap(match data.tag() {
0 => Ok,
1 => NotFound,
2 => InternalServerError,
_ => panic!("Unexpected tag"),
}),
_ => panic!(),
}
}
}
// Representation of a http body that is in the prograss of being read
pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>);
// By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon
// threads
impl Userdata for Body {}
// Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Body")
}
}
// `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the
// value for garbage collected references. Normally objects do not contain any references so this
// can be empty
impl Traverseable for Body {
fn traverse(&self, _: &mut Gc) {}
}
// `VmType` is the last trait required for a type to implement `Userdata` and defines the type used
// in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough
// as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine
// which should have been registered earlier with `Thread::register_type`
impl VmType for Body {
type Type = Self;
}
// Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation
// into `&Body` argument
fn read_chunk(
body: &Body,
) -> FutureResult<
Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send + 'static>,
> {
use futures::future::poll_fn;
let body = body.0.clone();
// `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is
// polled until completion. After `poll` returns `Ready` the value is then returned to the
// gluon function which called `read_chunk`
FutureResult(Box::new(poll_fn(move || {
let mut stream = body.lock().unwrap();
stream.poll().map(|async| async.map(IO::Value))
})))
}
// A http body that is being written
pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>);
impl fmt::Debug for ResponseBody {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "hyper::Response")
}
}
impl Userdata for ResponseBody {}
impl Traverseable for ResponseBody {
fn traverse(&self, _: &mut Gc) {}
}
impl VmType for ResponseBody {
type Type = Self;
}
fn | (
response: &ResponseBody,
bytes: &[u8],
) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send + 'static>> {
use futures::future::poll_fn;
use futures::AsyncSink;
// Turn `bytes´ into a `Chunk` which can be sent to the http body
let mut unsent_chunk = Some(Ok(bytes.to_owned().into()));
let response = response.0.clone();
FutureResult(Box::new(poll_fn(move || {
info!("Starting response send");
let mut sender = response.lock().unwrap();
let sender = sender
.as_mut()
.expect("Sender has been dropped while still in use");
if let Some(chunk) = unsent_chunk.take() {
match sender.start_send(chunk) {
Ok(AsyncSink::NotReady(chunk)) => {
unsent_chunk = Some(chunk);
return Ok(Async::NotReady);
}
Ok(AsyncSink::Ready) => (),
Err(_) => {
info!("Could not send http response");
return Ok(Async::Ready(IO::Value(())));
}
}
}
match sender.poll_complete() {
Ok(async) => Ok(async.map(IO::Value)),
Err(_) => {
info!("Could not send http response");
Ok(Async::Ready(IO::Value(())))
}
}
})))
}
// Next we define some record types which are marshalled to and from gluon. These have equivalent
// definitions in http_types.glu
field_decl! { method, uri, status, body, request, response }
type Request = record_type!{
method => Wrap<Method>,
uri => String,
body => Body
};
type Response = record_type!{
status => Wrap<StatusCode>
};
type HttpState = record_type!{
request => Request,
response => ResponseBody
};
fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> {
let WithVM {
value: handler,
vm: thread,
} = value;
use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse};
// Retrieve the `handle` function from the http module which we use to evaluate values of type
// `Handler Response`
type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>;
let handle: Function<RootedThread, ListenFn> = thread
.get_global("examples.http.handle")
.unwrap_or_else(|err| panic!("{}", err));
struct Listen {
handle: Function<RootedThread, ListenFn>,
handler: OpaqueValue<RootedThread, Handler<Response>>,
}
impl Service for Listen {
type Request = HyperRequest;
type Response = HyperResponse;
type Error = hyper::Error;
type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send + 'static>;
fn call(&self, request: HyperRequest) -> Self::Future {
let gluon_request = record_no_decl! {
// Here we use to `Wrap` type to make `hyper::Request` into a type that can be
// pushed to gluon
method => Wrap(request.method().clone()),
uri => request.uri().to_string(),
// Since `Body` implements `Userdata` it can be directly pushed to gluon
body => Body(Arc::new(Mutex::new(Box::new(request.body()
.map_err(|err| VmError::Message(format!("{}", err)))
// `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is
// marshalled to `Array Byte` in gluon
.map(PushAsRef::<_, [u8]>::new)))))
};
let (response_sender, response_body) = hyper::Body::pair();
let response_sender = Arc::new(M | write_response | identifier_name |
functional_dependencies.rs | ?;
Ok(idx)
})
.collect::<Result<Vec<_>>>()?;
Ok(if *is_primary {
Constraint::PrimaryKey(indices)
} else {
Constraint::Unique(indices)
})
}
TableConstraint::ForeignKey { .. } => Err(DataFusionError::Plan(
"Foreign key constraints are not currently supported".to_string(),
)),
TableConstraint::Check { .. } => Err(DataFusionError::Plan(
"Check constraints are not currently supported".to_string(),
)),
TableConstraint::Index { .. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
TableConstraint::FulltextOrSpatial { .. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
})
.collect::<Result<Vec<_>>>()?;
Ok(Constraints::new(constraints))
}
/// Check whether constraints is empty
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl Display for Constraints {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect();
let pk = pk.join(", ");
if !pk.is_empty() {
write!(f, " constraints=[{pk}]")
} else {
write!(f, "")
}
}
}
/// This object defines a functional dependence in the schema. A functional
/// dependence defines a relationship between determinant keys and dependent
/// columns. A determinant key is a column, or a set of columns, whose value
/// uniquely determines values of some other (dependent) columns. If two rows
/// have the same determinant key, dependent columns in these rows are
/// necessarily the same. If the determinant key is unique, the set of
/// dependent columns is equal to the entire schema and the determinant key can
/// serve as a primary key. Note that a primary key may "downgrade" into a
/// determinant key due to an operation such as a join, and this object is
/// used to track dependence relationships in such cases. For more information
/// on functional dependencies, see:
/// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/>
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependence {
// Column indices of the (possibly composite) determinant key:
pub source_indices: Vec<usize>,
// Column indices of dependent column(s): | /// this flag is `false`.
/// Note that as the schema changes between different stages in a plan,
/// such as after LEFT JOIN or RIGHT JOIN operations, this property may
/// change.
pub nullable: bool,
// The functional dependency mode:
pub mode: Dependency,
}
/// Describes functional dependency mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Dependency {
Single, // A determinant key may occur only once.
Multi, // A determinant key may occur multiple times (in multiple rows).
}
impl FunctionalDependence {
// Creates a new functional dependence.
pub fn new(
source_indices: Vec<usize>,
target_indices: Vec<usize>,
nullable: bool,
) -> Self {
Self {
source_indices,
target_indices,
nullable,
// Start with the least restrictive mode by default:
mode: Dependency::Multi,
}
}
pub fn with_mode(mut self, mode: Dependency) -> Self {
self.mode = mode;
self
}
}
/// This object encapsulates all functional dependencies in a given relation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependencies {
deps: Vec<FunctionalDependence>,
}
impl FunctionalDependencies {
/// Creates an empty `FunctionalDependencies` object.
pub fn empty() -> Self {
Self { deps: vec![] }
}
/// Creates a new `FunctionalDependencies` object from a vector of
/// `FunctionalDependence` objects.
pub fn new(dependencies: Vec<FunctionalDependence>) -> Self {
Self { deps: dependencies }
}
/// Creates a new `FunctionalDependencies` object from the given constraints.
pub fn new_from_constraints(
constraints: Option<&Constraints>,
n_field: usize,
) -> Self {
if let Some(Constraints { inner: constraints }) = constraints {
// Construct dependency objects based on each individual constraint:
let dependencies = constraints
.iter()
.map(|constraint| {
// All the field indices are associated with the whole table
// since we are dealing with table level constraints:
let dependency = match constraint {
Constraint::PrimaryKey(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
false,
),
Constraint::Unique(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
true,
),
};
// As primary keys are guaranteed to be unique, set the
// functional dependency mode to `Dependency::Single`:
dependency.with_mode(Dependency::Single)
})
.collect::<Vec<_>>();
Self::new(dependencies)
} else {
// There is no constraint, return an empty object:
Self::empty()
}
}
pub fn with_dependency(mut self, mode: Dependency) -> Self {
self.deps.iter_mut().for_each(|item| item.mode = mode);
self
}
/// Merges the given functional dependencies with these.
pub fn extend(&mut self, other: FunctionalDependencies) {
self.deps.extend(other.deps);
}
/// Adds the `offset` value to `source_indices` and `target_indices` for
/// each functional dependency.
pub fn add_offset(&mut self, offset: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
source_indices,
target_indices,
..
}| {
*source_indices = add_offset_to_vec(source_indices, offset);
*target_indices = add_offset_to_vec(target_indices, offset);
},
)
}
/// Updates `source_indices` and `target_indices` of each functional
/// dependence using the index mapping given in `proj_indices`.
///
/// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional
/// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`).
/// In the updated schema, fields at indices \[2, 5, 8\] will transform
/// to \[0, 1, 2\]. Therefore, the resulting functional dependence will
/// be \[1\] -> \[1, 2\].
pub fn project_functional_dependencies(
&self,
proj_indices: &[usize],
// The argument `n_out` denotes the schema field length, which is needed
// to correctly associate a `Single`-mode dependence with the whole table.
n_out: usize,
) -> FunctionalDependencies {
let mut projected_func_dependencies = vec![];
for FunctionalDependence {
source_indices,
target_indices,
nullable,
mode,
} in &self.deps
{
let new_source_indices =
update_elements_with_matching_indices(source_indices, proj_indices);
let new_target_indices = if *mode == Dependency::Single {
// Associate with all of the fields in the schema:
(0..n_out).collect()
} else {
// Update associations according to projection:
update_elements_with_matching_indices(target_indices, proj_indices)
};
// All of the composite indices should still be valid after projection;
// otherwise, functional dependency cannot be propagated.
if new_source_indices.len() == source_indices.len() {
let new_func_dependence = FunctionalDependence::new(
new_source_indices,
new_target_indices,
*nullable,
)
.with_mode(*mode);
projected_func_dependencies.push(new_func_dependence);
}
}
FunctionalDependencies::new(projected_func_dependencies)
}
/// This function joins this set of functional dependencies with the `other`
/// according to the given `join_type`.
pub fn join(
&self,
other: &FunctionalDependencies,
join_type: &JoinType,
left_cols_len: usize,
) -> FunctionalDependencies {
// Get mutable copies of left and right side dependencies:
let mut right_func_dependencies = other.clone();
let mut left_func_dependencies = self.clone();
match join_type {
JoinType::Inner | JoinType::Left | JoinType::Right => {
// Add offset to right schema:
right_func_dependencies.add_offset(left_cols_len);
// Result may have multiple values, update the dependency mode:
left_func_dependencies =
left_func_dependencies.with_dependency(Dependency::Multi);
right_func_dependencies =
right_func_dependencies.with_dependency(Dependency::Multi);
if *join_type == JoinType::Left {
// Downgrade the right side, since it may have additional NULL values:
right_func_dependencies.downgrade_dependencies();
} else if *join_type | pub target_indices: Vec<usize>,
/// Flag indicating whether one of the `source_indices` can receive NULL values.
/// For a data source, if the constraint in question is `Constraint::Unique`,
/// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`, | random_line_split |
functional_dependencies.rs | DataFusionError::Execution(
"Primary key doesn't exist".to_string(),
)
})?;
Ok(idx)
})
.collect::<Result<Vec<_>>>()?;
Ok(if *is_primary {
Constraint::PrimaryKey(indices)
} else {
Constraint::Unique(indices)
})
}
TableConstraint::ForeignKey { .. } => Err(DataFusionError::Plan(
"Foreign key constraints are not currently supported".to_string(),
)),
TableConstraint::Check { .. } => Err(DataFusionError::Plan(
"Check constraints are not currently supported".to_string(),
)),
TableConstraint::Index { .. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
TableConstraint::FulltextOrSpatial { .. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
})
.collect::<Result<Vec<_>>>()?;
Ok(Constraints::new(constraints))
}
/// Check whether constraints is empty
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl Display for Constraints {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect();
let pk = pk.join(", ");
if !pk.is_empty() {
write!(f, " constraints=[{pk}]")
} else {
write!(f, "")
}
}
}
/// This object defines a functional dependence in the schema. A functional
/// dependence defines a relationship between determinant keys and dependent
/// columns. A determinant key is a column, or a set of columns, whose value
/// uniquely determines values of some other (dependent) columns. If two rows
/// have the same determinant key, dependent columns in these rows are
/// necessarily the same. If the determinant key is unique, the set of
/// dependent columns is equal to the entire schema and the determinant key can
/// serve as a primary key. Note that a primary key may "downgrade" into a
/// determinant key due to an operation such as a join, and this object is
/// used to track dependence relationships in such cases. For more information
/// on functional dependencies, see:
/// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/>
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependence {
// Column indices of the (possibly composite) determinant key:
pub source_indices: Vec<usize>,
// Column indices of dependent column(s):
pub target_indices: Vec<usize>,
/// Flag indicating whether one of the `source_indices` can receive NULL values.
/// For a data source, if the constraint in question is `Constraint::Unique`,
/// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`,
/// this flag is `false`.
/// Note that as the schema changes between different stages in a plan,
/// such as after LEFT JOIN or RIGHT JOIN operations, this property may
/// change.
pub nullable: bool,
// The functional dependency mode:
pub mode: Dependency,
}
/// Describes functional dependency mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Dependency {
Single, // A determinant key may occur only once.
Multi, // A determinant key may occur multiple times (in multiple rows).
}
impl FunctionalDependence {
// Creates a new functional dependence.
pub fn new(
source_indices: Vec<usize>,
target_indices: Vec<usize>,
nullable: bool,
) -> Self {
Self {
source_indices,
target_indices,
nullable,
// Start with the least restrictive mode by default:
mode: Dependency::Multi,
}
}
pub fn with_mode(mut self, mode: Dependency) -> Self {
self.mode = mode;
self
}
}
/// This object encapsulates all functional dependencies in a given relation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependencies {
deps: Vec<FunctionalDependence>,
}
impl FunctionalDependencies {
/// Creates an empty `FunctionalDependencies` object.
pub fn empty() -> Self {
Self { deps: vec![] }
}
/// Creates a new `FunctionalDependencies` object from a vector of
/// `FunctionalDependence` objects.
pub fn new(dependencies: Vec<FunctionalDependence>) -> Self {
Self { deps: dependencies }
}
/// Creates a new `FunctionalDependencies` object from the given constraints.
pub fn new_from_constraints(
constraints: Option<&Constraints>,
n_field: usize,
) -> Self {
if let Some(Constraints { inner: constraints }) = constraints {
// Construct dependency objects based on each individual constraint:
let dependencies = constraints
.iter()
.map(|constraint| {
// All the field indices are associated with the whole table
// since we are dealing with table level constraints:
let dependency = match constraint {
Constraint::PrimaryKey(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
false,
),
Constraint::Unique(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
true,
),
};
// As primary keys are guaranteed to be unique, set the
// functional dependency mode to `Dependency::Single`:
dependency.with_mode(Dependency::Single)
})
.collect::<Vec<_>>();
Self::new(dependencies)
} else {
// There is no constraint, return an empty object:
Self::empty()
}
}
pub fn with_dependency(mut self, mode: Dependency) -> Self {
self.deps.iter_mut().for_each(|item| item.mode = mode);
self
}
/// Merges the given functional dependencies with these.
pub fn extend(&mut self, other: FunctionalDependencies) {
self.deps.extend(other.deps);
}
/// Adds the `offset` value to `source_indices` and `target_indices` for
/// each functional dependency.
pub fn add_offset(&mut self, offset: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
source_indices,
target_indices,
..
}| {
*source_indices = add_offset_to_vec(source_indices, offset);
*target_indices = add_offset_to_vec(target_indices, offset);
},
)
}
/// Updates `source_indices` and `target_indices` of each functional
/// dependence using the index mapping given in `proj_indices`.
///
/// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional
/// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`).
/// In the updated schema, fields at indices \[2, 5, 8\] will transform
/// to \[0, 1, 2\]. Therefore, the resulting functional dependence will
/// be \[1\] -> \[1, 2\].
pub fn project_functional_dependencies(
&self,
proj_indices: &[usize],
// The argument `n_out` denotes the schema field length, which is needed
// to correctly associate a `Single`-mode dependence with the whole table.
n_out: usize,
) -> FunctionalDependencies {
let mut projected_func_dependencies = vec![];
for FunctionalDependence {
source_indices,
target_indices,
nullable,
mode,
} in &self.deps
{
let new_source_indices =
update_elements_with_matching_indices(source_indices, proj_indices);
let new_target_indices = if *mode == Dependency::Single {
// Associate with all of the fields in the schema:
(0..n_out).collect()
} else {
// Update associations according to projection:
update_elements_with_matching_indices(target_indices, proj_indices)
};
// All of the composite indices should still be valid after projection;
// otherwise, functional dependency cannot be propagated.
if new_source_indices.len() == source_indices.len() {
let new_func_dependence = FunctionalDependence::new(
new_source_indices,
new_target_indices,
*nullable,
)
.with_mode(*mode);
projected_func_dependencies.push(new_func_dependence);
}
}
FunctionalDependencies::new(projected_func_dependencies)
}
/// This function joins this set of functional dependencies with the `other`
/// according to the given `join_type`.
pub fn join(
&self,
other: &FunctionalDependencies,
join_type: &JoinType,
left_cols_len: usize,
) -> FunctionalDependencies {
// Get mutable copies of left and right side dependencies:
let mut right_func_dependencies = other.clone | {
let constraints = constraints
.iter()
.map(|c: &TableConstraint| match c {
TableConstraint::Unique {
columns,
is_primary,
..
} => {
// Get primary key and/or unique indices in the schema:
let indices = columns
.iter()
.map(|pk| {
let idx = df_schema
.fields()
.iter()
.position(|item| {
item.qualified_name() == pk.value.clone()
})
.ok_or_else(|| { | identifier_body | |
functional_dependencies.rs | ?;
Ok(idx)
})
.collect::<Result<Vec<_>>>()?;
Ok(if *is_primary {
Constraint::PrimaryKey(indices)
} else {
Constraint::Unique(indices)
})
}
TableConstraint::ForeignKey { .. } => Err(DataFusionError::Plan(
"Foreign key constraints are not currently supported".to_string(),
)),
TableConstraint::Check { .. } => Err(DataFusionError::Plan(
"Check constraints are not currently supported".to_string(),
)),
TableConstraint::Index { .. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
TableConstraint::FulltextOrSpatial { .. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
})
.collect::<Result<Vec<_>>>()?;
Ok(Constraints::new(constraints))
}
/// Check whether constraints is empty
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl Display for Constraints {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect();
let pk = pk.join(", ");
if !pk.is_empty() {
write!(f, " constraints=[{pk}]")
} else {
write!(f, "")
}
}
}
/// This object defines a functional dependence in the schema. A functional
/// dependence defines a relationship between determinant keys and dependent
/// columns. A determinant key is a column, or a set of columns, whose value
/// uniquely determines values of some other (dependent) columns. If two rows
/// have the same determinant key, dependent columns in these rows are
/// necessarily the same. If the determinant key is unique, the set of
/// dependent columns is equal to the entire schema and the determinant key can
/// serve as a primary key. Note that a primary key may "downgrade" into a
/// determinant key due to an operation such as a join, and this object is
/// used to track dependence relationships in such cases. For more information
/// on functional dependencies, see:
/// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/>
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependence {
// Column indices of the (possibly composite) determinant key:
pub source_indices: Vec<usize>,
// Column indices of dependent column(s):
pub target_indices: Vec<usize>,
/// Flag indicating whether one of the `source_indices` can receive NULL values.
/// For a data source, if the constraint in question is `Constraint::Unique`,
/// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`,
/// this flag is `false`.
/// Note that as the schema changes between different stages in a plan,
/// such as after LEFT JOIN or RIGHT JOIN operations, this property may
/// change.
pub nullable: bool,
// The functional dependency mode:
pub mode: Dependency,
}
/// Describes functional dependency mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Dependency {
Single, // A determinant key may occur only once.
Multi, // A determinant key may occur multiple times (in multiple rows).
}
impl FunctionalDependence {
// Creates a new functional dependence.
pub fn new(
source_indices: Vec<usize>,
target_indices: Vec<usize>,
nullable: bool,
) -> Self {
Self {
source_indices,
target_indices,
nullable,
// Start with the least restrictive mode by default:
mode: Dependency::Multi,
}
}
pub fn with_mode(mut self, mode: Dependency) -> Self {
self.mode = mode;
self
}
}
/// This object encapsulates all functional dependencies in a given relation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependencies {
deps: Vec<FunctionalDependence>,
}
impl FunctionalDependencies {
/// Creates an empty `FunctionalDependencies` object.
pub fn empty() -> Self {
Self { deps: vec![] }
}
/// Creates a new `FunctionalDependencies` object from a vector of
/// `FunctionalDependence` objects.
pub fn | (dependencies: Vec<FunctionalDependence>) -> Self {
Self { deps: dependencies }
}
/// Creates a new `FunctionalDependencies` object from the given constraints.
pub fn new_from_constraints(
constraints: Option<&Constraints>,
n_field: usize,
) -> Self {
if let Some(Constraints { inner: constraints }) = constraints {
// Construct dependency objects based on each individual constraint:
let dependencies = constraints
.iter()
.map(|constraint| {
// All the field indices are associated with the whole table
// since we are dealing with table level constraints:
let dependency = match constraint {
Constraint::PrimaryKey(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
false,
),
Constraint::Unique(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
true,
),
};
// As primary keys are guaranteed to be unique, set the
// functional dependency mode to `Dependency::Single`:
dependency.with_mode(Dependency::Single)
})
.collect::<Vec<_>>();
Self::new(dependencies)
} else {
// There is no constraint, return an empty object:
Self::empty()
}
}
pub fn with_dependency(mut self, mode: Dependency) -> Self {
self.deps.iter_mut().for_each(|item| item.mode = mode);
self
}
/// Merges the given functional dependencies with these.
pub fn extend(&mut self, other: FunctionalDependencies) {
self.deps.extend(other.deps);
}
/// Adds the `offset` value to `source_indices` and `target_indices` for
/// each functional dependency.
pub fn add_offset(&mut self, offset: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
source_indices,
target_indices,
..
}| {
*source_indices = add_offset_to_vec(source_indices, offset);
*target_indices = add_offset_to_vec(target_indices, offset);
},
)
}
/// Updates `source_indices` and `target_indices` of each functional
/// dependence using the index mapping given in `proj_indices`.
///
/// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional
/// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`).
/// In the updated schema, fields at indices \[2, 5, 8\] will transform
/// to \[0, 1, 2\]. Therefore, the resulting functional dependence will
/// be \[1\] -> \[1, 2\].
pub fn project_functional_dependencies(
&self,
proj_indices: &[usize],
// The argument `n_out` denotes the schema field length, which is needed
// to correctly associate a `Single`-mode dependence with the whole table.
n_out: usize,
) -> FunctionalDependencies {
let mut projected_func_dependencies = vec![];
for FunctionalDependence {
source_indices,
target_indices,
nullable,
mode,
} in &self.deps
{
let new_source_indices =
update_elements_with_matching_indices(source_indices, proj_indices);
let new_target_indices = if *mode == Dependency::Single {
// Associate with all of the fields in the schema:
(0..n_out).collect()
} else {
// Update associations according to projection:
update_elements_with_matching_indices(target_indices, proj_indices)
};
// All of the composite indices should still be valid after projection;
// otherwise, functional dependency cannot be propagated.
if new_source_indices.len() == source_indices.len() {
let new_func_dependence = FunctionalDependence::new(
new_source_indices,
new_target_indices,
*nullable,
)
.with_mode(*mode);
projected_func_dependencies.push(new_func_dependence);
}
}
FunctionalDependencies::new(projected_func_dependencies)
}
/// This function joins this set of functional dependencies with the `other`
/// according to the given `join_type`.
pub fn join(
&self,
other: &FunctionalDependencies,
join_type: &JoinType,
left_cols_len: usize,
) -> FunctionalDependencies {
// Get mutable copies of left and right side dependencies:
let mut right_func_dependencies = other.clone();
let mut left_func_dependencies = self.clone();
match join_type {
JoinType::Inner | JoinType::Left | JoinType::Right => {
// Add offset to right schema:
right_func_dependencies.add_offset(left_cols_len);
// Result may have multiple values, update the dependency mode:
left_func_dependencies =
left_func_dependencies.with_dependency(Dependency::Multi);
right_func_dependencies =
right_func_dependencies.with_dependency(Dependency::Multi);
if *join_type == JoinType::Left {
// Downgrade the right side, since it may have additional NULL values:
right_func_dependencies.downgrade_dependencies();
} else if *join | new | identifier_name |
functional_dependencies.rs | ?;
Ok(idx)
})
.collect::<Result<Vec<_>>>()?;
Ok(if *is_primary | else {
Constraint::Unique(indices)
})
}
TableConstraint::ForeignKey { .. } => Err(DataFusionError::Plan(
"Foreign key constraints are not currently supported".to_string(),
)),
TableConstraint::Check { .. } => Err(DataFusionError::Plan(
"Check constraints are not currently supported".to_string(),
)),
TableConstraint::Index { .. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
TableConstraint::FulltextOrSpatial { .. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
})
.collect::<Result<Vec<_>>>()?;
Ok(Constraints::new(constraints))
}
/// Check whether constraints is empty
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl Display for Constraints {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect();
let pk = pk.join(", ");
if !pk.is_empty() {
write!(f, " constraints=[{pk}]")
} else {
write!(f, "")
}
}
}
/// This object defines a functional dependence in the schema. A functional
/// dependence defines a relationship between determinant keys and dependent
/// columns. A determinant key is a column, or a set of columns, whose value
/// uniquely determines values of some other (dependent) columns. If two rows
/// have the same determinant key, dependent columns in these rows are
/// necessarily the same. If the determinant key is unique, the set of
/// dependent columns is equal to the entire schema and the determinant key can
/// serve as a primary key. Note that a primary key may "downgrade" into a
/// determinant key due to an operation such as a join, and this object is
/// used to track dependence relationships in such cases. For more information
/// on functional dependencies, see:
/// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/>
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependence {
// Column indices of the (possibly composite) determinant key:
pub source_indices: Vec<usize>,
// Column indices of dependent column(s):
pub target_indices: Vec<usize>,
/// Flag indicating whether one of the `source_indices` can receive NULL values.
/// For a data source, if the constraint in question is `Constraint::Unique`,
/// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`,
/// this flag is `false`.
/// Note that as the schema changes between different stages in a plan,
/// such as after LEFT JOIN or RIGHT JOIN operations, this property may
/// change.
pub nullable: bool,
// The functional dependency mode:
pub mode: Dependency,
}
/// Describes functional dependency mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Dependency {
Single, // A determinant key may occur only once.
Multi, // A determinant key may occur multiple times (in multiple rows).
}
impl FunctionalDependence {
// Creates a new functional dependence.
pub fn new(
source_indices: Vec<usize>,
target_indices: Vec<usize>,
nullable: bool,
) -> Self {
Self {
source_indices,
target_indices,
nullable,
// Start with the least restrictive mode by default:
mode: Dependency::Multi,
}
}
pub fn with_mode(mut self, mode: Dependency) -> Self {
self.mode = mode;
self
}
}
/// This object encapsulates all functional dependencies in a given relation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependencies {
deps: Vec<FunctionalDependence>,
}
impl FunctionalDependencies {
/// Creates an empty `FunctionalDependencies` object.
pub fn empty() -> Self {
Self { deps: vec![] }
}
/// Creates a new `FunctionalDependencies` object from a vector of
/// `FunctionalDependence` objects.
pub fn new(dependencies: Vec<FunctionalDependence>) -> Self {
Self { deps: dependencies }
}
/// Creates a new `FunctionalDependencies` object from the given constraints.
pub fn new_from_constraints(
constraints: Option<&Constraints>,
n_field: usize,
) -> Self {
if let Some(Constraints { inner: constraints }) = constraints {
// Construct dependency objects based on each individual constraint:
let dependencies = constraints
.iter()
.map(|constraint| {
// All the field indices are associated with the whole table
// since we are dealing with table level constraints:
let dependency = match constraint {
Constraint::PrimaryKey(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
false,
),
Constraint::Unique(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
true,
),
};
// As primary keys are guaranteed to be unique, set the
// functional dependency mode to `Dependency::Single`:
dependency.with_mode(Dependency::Single)
})
.collect::<Vec<_>>();
Self::new(dependencies)
} else {
// There is no constraint, return an empty object:
Self::empty()
}
}
pub fn with_dependency(mut self, mode: Dependency) -> Self {
self.deps.iter_mut().for_each(|item| item.mode = mode);
self
}
/// Merges the given functional dependencies with these.
pub fn extend(&mut self, other: FunctionalDependencies) {
self.deps.extend(other.deps);
}
/// Adds the `offset` value to `source_indices` and `target_indices` for
/// each functional dependency.
pub fn add_offset(&mut self, offset: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
source_indices,
target_indices,
..
}| {
*source_indices = add_offset_to_vec(source_indices, offset);
*target_indices = add_offset_to_vec(target_indices, offset);
},
)
}
/// Updates `source_indices` and `target_indices` of each functional
/// dependence using the index mapping given in `proj_indices`.
///
/// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional
/// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`).
/// In the updated schema, fields at indices \[2, 5, 8\] will transform
/// to \[0, 1, 2\]. Therefore, the resulting functional dependence will
/// be \[1\] -> \[1, 2\].
pub fn project_functional_dependencies(
&self,
proj_indices: &[usize],
// The argument `n_out` denotes the schema field length, which is needed
// to correctly associate a `Single`-mode dependence with the whole table.
n_out: usize,
) -> FunctionalDependencies {
let mut projected_func_dependencies = vec![];
for FunctionalDependence {
source_indices,
target_indices,
nullable,
mode,
} in &self.deps
{
let new_source_indices =
update_elements_with_matching_indices(source_indices, proj_indices);
let new_target_indices = if *mode == Dependency::Single {
// Associate with all of the fields in the schema:
(0..n_out).collect()
} else {
// Update associations according to projection:
update_elements_with_matching_indices(target_indices, proj_indices)
};
// All of the composite indices should still be valid after projection;
// otherwise, functional dependency cannot be propagated.
if new_source_indices.len() == source_indices.len() {
let new_func_dependence = FunctionalDependence::new(
new_source_indices,
new_target_indices,
*nullable,
)
.with_mode(*mode);
projected_func_dependencies.push(new_func_dependence);
}
}
FunctionalDependencies::new(projected_func_dependencies)
}
/// This function joins this set of functional dependencies with the `other`
/// according to the given `join_type`.
pub fn join(
&self,
other: &FunctionalDependencies,
join_type: &JoinType,
left_cols_len: usize,
) -> FunctionalDependencies {
// Get mutable copies of left and right side dependencies:
let mut right_func_dependencies = other.clone();
let mut left_func_dependencies = self.clone();
match join_type {
JoinType::Inner | JoinType::Left | JoinType::Right => {
// Add offset to right schema:
right_func_dependencies.add_offset(left_cols_len);
// Result may have multiple values, update the dependency mode:
left_func_dependencies =
left_func_dependencies.with_dependency(Dependency::Multi);
right_func_dependencies =
right_func_dependencies.with_dependency(Dependency::Multi);
if *join_type == JoinType::Left {
// Downgrade the right side, since it may have additional NULL values:
right_func_dependencies.downgrade_dependencies();
} else if *join | {
Constraint::PrimaryKey(indices)
} | conditional_block |
views.py | current_page + 3 >= pages:
# return range(pages - 4, pages + 1)
# else:
# return range(current_page - 2, current_page + 2)
def loginValid(fun):
@functools.wraps(fun)
def inner(*args, **kwargs):
id = request.cookies.get('id', 0)
username = request.cookies.get('username')
session_username = session.get('username')
user = User.query.get(int(id))
if user:
if user.username == username and username == session_username:
return fun(*args, **kwargs)
return redirect('/login/')
return inner
class Calendar: # 日历类
def __init__(self, year=datetime.datetime.now().year, month=datetime.datetime.now().month):
assert int(month) <= 12
date = datetime.datetime(year, month, 1, 0, 0) # 当前月1日
self.start_day = date.weekday() # 当前月1号是周几
self.days = list(self.back_days(year, month)) # 当月天数
self.work = ['语文', '数学', '英语', '物理', '化学', '地理', '生物']
def back_days(self, year, month): # 返回当月天数
big_month = [1, 3, 5, 7, 8, 10, 12]
small_month = [4, 6, 9, 11]
two_month = 28
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
two_month = 29
assert int(month) <= 12
if month in big_month:
return range(1, 32)
elif month in small_month:
return range(1, 31)
else:
return range(1, two_month + 1)
def first_list(self, start_day, days): # 日历第一行
ca_list = [{self.days.pop(0): random.choice(self.work)} for i in range(1, 8 - start_day)]
[ca_list.insert(0, 'empty') for j in range(7 - len(ca_list))]
return ca_list
def return_calendar(self): # 返回日历的列表
first_line = self.first_list(self.start_day, self.days) # 日历第一行
lines = [first_line] # 存日历的列表
while self.days: # 得到每一行
line = [{self.days.pop(0): random.choice(self.work)} for i in range(7) if self.days]
[line.append('empty') for j in range(7 - len(line))] # 长度不足补空
lines.append(line)
return lines
class Paginator:
def __init__(self, datas, page_size):
self.datas = datas
self.page_size = page_size
self.all_pages = math.ceil(self.datas.count() / self.page_size)
def back_page(self, current_page):
if self.all_pages <= 5:
return range(1, self.all_pages + 1)
if current_page <= 3:
return range(1, 6)
elif current_page + 3 >= self.all_pages:
return range(self.all_pages - 4, self.all_pages + 1)
else:
return range(current_page - 2, current_page + 2)
def back_data(self, current_page):
datas = self.datas.offset((current_page - 1) * self.page_size).limit(self.page_size)
return datas
@main.route('/') # 路由
def base(): # 视图
# c = Curriculum(c_id='0001', c_name='python', c_time=datetime.datetime.now())
# c.save()
return render_template('base.html')
@main.route('/index/') # 路由
@loginValid
def index(): # 视图
return render_template('index.html')
@main.route('/register/', methods=['GET', 'POST']) # 路由
def register(): # 视图
err_msg = ''
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if username:
if email:
if password:
user = User()
user.username = username
user.email = email
user.password = set_pwd(password)
user.save()
return redirect('/login/')
else:
err_msg = '密码不可为空'
else:
err_msg = '邮箱不可为空'
else:
err_msg = '用户名不可为空'
return render_template('register.html', **locals())
@main.route('/login/', methods=['GET', 'POST']) # 路由
def login(): # 视图
err_msg = ''
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if user:
if set_pwd(password) == user.password:
response = redirect('/index/')
response.set_cookie('email', user.email)
response.set_cookie('id', str(user.id))
response.set_cookie('username', user.username)
print(user.username)
session['username'] = user.username
return response
else:
err_msg = '密码错误'
else:
err_msg = '该账号未注册'
return render_template('login.html', **locals())
@main.route('/logout/')
def logout(): # 退出
response = redirect('/login/')
response.delete_cookie('email')
response.delete_cookie('id')
response.delete_cookie('username')
session.pop('username')
return response
@main.route('/user_info/') # 路由
@loginValid
def user_info(): # 个人中心
c = Calendar()
datas = c.return_calendar()
day = datetime.datetime.now().day
return render_template('user_info.html', **locals())
@main.route('/leave/', methods=['get', 'post'])
# @csrf.exempt
@loginValid
def leave():
err_msg = ''
if request.method == 'POST':
leave_name = request.form.get('leave_name')
leave_type = request.form.get('leave_type')
leave_start = request.form.get('leave_start')
leave_end = request.form.get('leave_end')
leave_desc = request.form.get('leave_desc')
leave_phone = request.form.get('leave_phone')
if leave_name and leave_type and leave_start and leave_end and leave_desc and leave_phone:
id = int(request.cookies.get('id'))
lea = Leave()
lea.leave_id = id
lea.leave_name = leave_name
lea.leave_type = leave_type
lea.leave_start = leave_start
lea.leave_end = leave_end
lea.leave_desc = leave_desc
lea.leave_phone = leave_phone
lea.leave_status = '0'
lea.save()
else:
err_msg = '请填写全部内容'
return render_template('leave.html', **locals())
@main.route('/leave_list/<p>/', methods=['get', 'post'])
@loginValid
def leave_list(p):
page_size = 5
p = int(p)
id = int(request.cookies.get('id'))
leaves = Leave.query.filter_by(leave_id=id)
pagin = Paginator(leaves, page_size)
pages = pagin.back_page(p)
leaves = pagin.back_data(p)
return render_template('leave_list.html', **locals())
@main.route('/cancel/')
def cancel():
id = request.args.get('id') # 通过args接受get请求数据
leave = Leave.query.get(int(id))
leave.delete()
return jsonify({'data': '删除成功'})
@main.route('/add_task/', methods=['get', 'post'])
| task.validate_on_submit() # 判断是否是一个有效的post请求
task.validate() # 判断是否是一个有效的post请求
task.data # 提交的数据
:return:
'''
errors = {}
task = TaskForm()
if request.method == 'POST':
if task.validate_on_submit():
formData = task.data
else:
errors_list = list(task.errors.keys())
errors = task.errors
print(errors)
return render_template('add_task.html', **locals())
@api.resource('/Api/Leave/')
class LeaveApi(Resource):
def __init__(self): # 定义返回的格式
super(LeaveApi, self).__init__()
self.result = {
'version': '1.0',
'data': ''
}
def set_data(self, leave): # 定义返回的数据
result_data = {
'leave_name': leave.leave_name,
'leave_type': leave.leave_type,
'leave_start': leave.leave_start,
'leave_end': leave.leave_end,
'leave_desc': leave.leave_desc,
'leave_phone': leave.leave_phone,
}
return result_data
def | def add_task():
'''
task.errors # 表单校验错误
| random_line_split |
views.py | _page + 3 >= pages:
# return range(pages - 4, pages + 1)
# else:
# return range(current_page - 2, current_page + 2)
def loginValid(fun):
@functools.wraps(fun)
def inner(*args, **kwargs):
id = request.cookies.get('id', 0)
username = request.cookies.get('username')
session_username = session.get('username')
user = User.query.get(int(id))
if user:
if user.username == username and username == session_username:
return fun(*args, **kwargs)
return redirect('/login/')
return inner
class Calendar: # 日历类
def __init__(self, year=datetime.datetime.now().year, month=datetime.datetime.now().month):
assert int(month) <= 12
date = datetime.datetime(year, month, 1, 0, 0) # 当前月1日
self.start_day = date.weekday() # 当前月1号是周几
self.days = list(self.back_days(year, month)) # 当月天数
self.work = ['语文', '数学', '英语', '物理', '化学', '地理', '生物']
def back_days(self, year, month): # 返回当月天数
big_month = [1, 3, 5, 7, 8, 10, 12]
small_month = [4, 6, 9, 11]
two_month = 28
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
two_month = 29
assert int(month) <= 12
if month in big_month:
return range(1, 32)
elif month in small_month:
return range(1, 31)
else:
return range(1, two_month + 1)
def first_list(self, start_day, days): # 日历第一行
ca_list = [{self.days.pop(0): random.choice(self.work)} for i in range(1, 8 - start_day)]
[ca_list.insert(0, 'empty') for j in range(7 - len(ca_list))]
return ca_list
def return_calendar(self): # 返回日历的列表
first_line = self.first_list(self.start_day, self.days) # 日历第一行
lines = [first_line] # 存日历的列表
while self.days: # 得到每一行
line = [{self.days.pop(0): random.choice(self.work)} for i in range(7) if self.days]
[line.append('empty') for j in range(7 - len(line))] # 长度不足补空
lines.append(line)
return lines
class Paginator:
def __init__(self, datas, page_size):
self.datas = datas
self.page_size = page_size
self.all_pages = math.ceil(self.datas.count() / self.page_size)
def back_page(self, current_page):
if self.all_pages <= 5:
return range(1, self.all_pages + 1)
if current_page <= 3:
return range(1, 6)
elif current_page + 3 >= self.all_pages:
return range(self.all_pages - 4, self.all_pages + 1)
else:
return range(current_page - 2, current_page + 2)
def back_data(self, current_page):
datas = self.datas.offset((current_page - 1) * self.page_size).limit(self.page_size)
return datas
@main.route('/') # 路由
def base(): # 视图
# c = Curriculum(c_id='0001', c_name='python', c_time=datetime.datetime.now())
# c.save()
return render_template('base.html')
@main.route('/index/') # 路由
@loginValid
def index(): # 视图
return render_template('index.html')
@main.route('/register/', meth | ister(): # 视图
err_msg = ''
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if username:
if email:
if password:
user = User()
user.username = username
user.email = email
user.password = set_pwd(password)
user.save()
return redirect('/login/')
else:
err_msg = '密码不可为空'
else:
err_msg = '邮箱不可为空'
else:
err_msg = '用户名不可为空'
return render_template('register.html', **locals())
@main.route('/login/', methods=['GET', 'POST']) # 路由
def login(): # 视图
err_msg = ''
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if user:
if set_pwd(password) == user.password:
response = redirect('/index/')
response.set_cookie('email', user.email)
response.set_cookie('id', str(user.id))
response.set_cookie('username', user.username)
print(user.username)
session['username'] = user.username
return response
else:
err_msg = '密码错误'
else:
err_msg = '该账号未注册'
return render_template('login.html', **locals())
@main.route('/logout/')
def logout(): # 退出
response = redirect('/login/')
response.delete_cookie('email')
response.delete_cookie('id')
response.delete_cookie('username')
session.pop('username')
return response
@main.route('/user_info/') # 路由
@loginValid
def user_info(): # 个人中心
c = Calendar()
datas = c.return_calendar()
day = datetime.datetime.now().day
return render_template('user_info.html', **locals())
@main.route('/leave/', methods=['get', 'post'])
# @csrf.exempt
@loginValid
def leave():
err_msg = ''
if request.method == 'POST':
leave_name = request.form.get('leave_name')
leave_type = request.form.get('leave_type')
leave_start = request.form.get('leave_start')
leave_end = request.form.get('leave_end')
leave_desc = request.form.get('leave_desc')
leave_phone = request.form.get('leave_phone')
if leave_name and leave_type and leave_start and leave_end and leave_desc and leave_phone:
id = int(request.cookies.get('id'))
lea = Leave()
lea.leave_id = id
lea.leave_name = leave_name
lea.leave_type = leave_type
lea.leave_start = leave_start
lea.leave_end = leave_end
lea.leave_desc = leave_desc
lea.leave_phone = leave_phone
lea.leave_status = '0'
lea.save()
else:
err_msg = '请填写全部内容'
return render_template('leave.html', **locals())
@main.route('/leave_list/<p>/', methods=['get', 'post'])
@loginValid
def leave_list(p):
page_size = 5
p = int(p)
id = int(request.cookies.get('id'))
leaves = Leave.query.filter_by(leave_id=id)
pagin = Paginator(leaves, page_size)
pages = pagin.back_page(p)
leaves = pagin.back_data(p)
return render_template('leave_list.html', **locals())
@main.route('/cancel/')
def cancel():
id = request.args.get('id') # 通过args接受get请求数据
leave = Leave.query.get(int(id))
leave.delete()
return jsonify({'data': '删除成功'})
@main.route('/add_task/', methods=['get', 'post'])
def add_task():
'''
task.errors # 表单校验错误
task.validate_on_submit() # 判断是否是一个有效的post请求
task.validate() # 判断是否是一个有效的post请求
task.data # 提交的数据
:return:
'''
errors = {}
task = TaskForm()
if request.method == 'POST':
if task.validate_on_submit():
formData = task.data
else:
errors_list = list(task.errors.keys())
errors = task.errors
print(errors)
return render_template('add_task.html', **locals())
@api.resource('/Api/Leave/')
class LeaveApi(Resource):
def __init__(self): # 定义返回的格式
super(LeaveApi, self).__init__()
self.result = {
'version': '1.0',
'data': ''
}
def set_data(self, leave): # 定义返回的数据
result_data = {
'leave_name': leave.leave_name,
'leave_type': leave.leave_type,
'leave_start': leave.leave_start,
'leave_end': leave.leave_end,
'leave_desc': leave.leave_desc,
'leave_phone': leave.leave_phone,
}
return result_data
def | ods=['GET', 'POST']) # 路由
def reg | identifier_body |
views.py | 加密
hl = md5(pwd.encode(encoding='utf-8'))
new_pwd = hl.hexdigest()
return new_pwd
# def back_page(pages, current_page): # 返回页数
# if pages <= 5:
# return range(1, pages + 1)
# if current_page <= 3:
# return range(1, 6)
# elif current_page + 3 >= pages:
# return range(pages - 4, pages + 1)
# else:
# return range(current_page - 2, current_page + 2)
def loginValid(fun):
@functools.wraps(fun)
def inner(*args, **kwargs):
id = request.cookies.get('id', 0)
username = request.cookies.get('username')
session_username = session.get('username')
user = User.query.get(int(id))
if user:
if user.username == username and username == session_username:
return fun(*args, **kwargs)
return redirect('/login/')
return inner
class Calendar: # 日历类
def __init__(self, year=datetime.datetime.now().year, month=datetime.datetime.now().month):
assert int(month) <= 12
date = datetime.datetime(year, month, 1, 0, 0) # 当前月1日
self.start_day = date.weekday() # 当前月1号是周几
self.days = list(self.back_days(year, month)) # 当月天数
self.work = ['语文', '数学', '英语', '物理', '化学', '地理', '生物']
def back_days(self, year, month): # 返回当月天数
big_month = [1, 3, 5, 7, 8, 10, 12]
small_month = [4, 6, 9, 11]
two_month = 28
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
two_month = 29
assert int(month) <= 12
if month in big_month:
return range(1, 32)
elif month in small_month:
return range(1, 31)
else:
return range(1, two_month + 1)
def first_list(self, start_day, days): # 日历第一行
ca_list = [{self.days.pop(0): random.choice(self.work)} for i in range(1, 8 - start_day)]
[ca_list.insert(0, 'empty') for j in range(7 - len(ca_list))]
return ca_list
def return_calendar(self): # 返回日历的列表
first_line = self.first_list(self.start_day, self.days) # 日历第一行
lines = [first_line] # 存日历的列表
while self.days: # 得到每一行
line = [{self.days.pop(0): random.choice(self.work)} for i in range(7) if self.days]
[line.append('empty') for j in range(7 - len(line))] # 长度不足补空
lines.append(line)
return lines
class Paginator:
def __init__(self, datas, page_size):
self.datas = datas
self.page_size = page_size
self.all_pages = math.ceil(self.datas.count() / self.page_size)
def back_page(self, current_page):
if self.all_pages <= 5:
return range(1, self.all_pages + 1)
if current_page <= 3:
return range(1, 6)
elif current_page + 3 >= self.all_pages:
return range(self.all_pages - 4, self.all_pages + 1)
else:
return range(current_page - 2, current_page + 2)
def back_data(self, current_page):
datas = self.datas.offset((current_page - 1) * self.page_size).limit(self.page_size)
return datas
@main.route('/') # 路由
def base(): # 视图
# c = Curriculum(c_id='0001', c_name='python', c_time=datetime.datetime.now())
# c.save()
return render_template('base.html')
@main.route('/index/') # 路由
@loginValid
def index(): # 视图
return render_template('index.html')
@main.route('/register/', methods=['GET', 'POST']) # 路由
def register(): # 视图
err_msg = ''
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if username:
if email:
if password:
user = User()
user.username = username
user.email = email
user.password = set_pwd(password)
user.save()
return redirect('/login/')
else:
err_msg = '密码不可为空'
else:
err_msg = '邮箱不可为空'
else:
err_msg = '用户名不可为空'
return render_template('register.html', **locals())
@main.route('/login/', methods=['GET', 'POST']) # 路由
def login(): # 视图
err_msg = ''
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if user:
if set_pwd(password) == user.password:
response = redirect('/index/')
response.set_cookie('email', user.email)
response.set_cookie('id', str(user.id))
response.set_cookie('username', user.username)
print(user.username)
session['username'] = user.username
return response
else:
err_msg = '密码错误'
else:
err_msg = '该账号未注册'
return render_template('login.html', **locals())
@main.route('/logout/')
def logout(): # 退出
response = redirect('/login/')
response.delete_cookie('email')
response.delete_cookie('id')
response.delete_cookie('username')
session.pop('username')
return response
@main.route('/user_info/') # 路由
@loginValid
def user_info(): # 个人中心
c = Calendar()
datas = c.return_calendar()
day = datetime.datetime.now().day
return render_template('user_info.html', **locals())
@main.route('/leave/', methods=['get', 'post'])
# @csrf.exempt
@loginValid
def leave():
err_msg = ''
if request.method == 'POST':
leave_name = request.form.get('leave_name')
leave_type = request.form.get('leave_type')
leave_start = request.form.get('leave_start')
leave_end = request.form.get('leave_end')
leave_desc = request.form.get('leave_desc')
leave_phone = request.form.get('leave_phone')
if leave_name and leave_type and leave_start and leave_end and leave_desc and leave_phone:
id = int(request.cookies.get('id'))
lea = Leave()
lea.leave_id = id
lea.leave_name = leave_name
lea.leave_type = leave_type
lea.leave_start = leave_start
lea.leave_end = leave_end
lea.leave_desc = leave_desc
lea.leave_phone = leave_phone
lea.leave_status = '0'
lea.save()
else:
err_msg = '请填写全部内容'
return render_template('leave.html', **locals())
@main.route('/leave_list/<p>/', methods=['get', 'post'])
@loginValid
def leave_list(p):
page_size = 5
p = int(p)
id = int(request.cookies.get('id'))
leaves = Leave.query.filter_by(leave_id=id)
pagin = Paginator(leaves, page_size)
pages = pagin.back_page(p)
leaves = pagin.back_data(p)
return render_template('leave_list.html', **locals())
@main.route('/cancel/')
def cancel():
id = request.args.get('id') # 通过args接受get请求数据
leave = Leave.query.get(int(id))
leave.delete()
return jsonify({'data': '删除成功'})
@main.route('/add_task/', methods=['get', 'post'])
def add_task():
'''
task.errors # 表单校验错误
task.validate_on_submit() # 判断是否是一个有效的post请求
task.validate() # 判断是否是一个有效的post请求
task.data # 提交的数据
:return:
'''
errors = {}
task = TaskForm()
if request.method == 'POST':
if task.validate_on_submit():
formData = task.data
else:
errors_list = list(task.errors.keys())
errors = task.errors
print(errors)
return render_template('add_task.html', **locals())
@api.resource('/Api/Leave/')
class LeaveApi(Resource):
def __init__(self): # 定义返回的格式
super(LeaveApi, self).__init__()
self.result = {
'version': '1.0',
| : # 密码 | identifier_name | |
views.py | _page + 3 >= pages:
# return range(pages - 4, pages + 1)
# else:
# return range(current_page - 2, current_page + 2)
def loginValid(fun):
@functools.wraps(fun)
def inner(*args, **kwargs):
id = request.cookies.get('id', 0)
username = request.cookies.get('username')
session_username = session.get('username')
user = User.query.get(int(id))
if user:
if user.username == username and username == session_username:
return fun(*args, **kwargs)
return redirect('/login/')
return inner
class Calendar: # 日历类
def __init__(self, year=datetime.datetime.now().year, month=datetime.datetime.now().month):
assert int(month) <= 12
date = datetime.datetime(year, month, 1, 0, 0) # 当前月1日
self.start_day = date.weekday() # 当前月1号是周几
self.days = list(self.back_days(year, month)) # 当月天数
self.work = ['语文', '数学', '英语', '物理', '化学', '地理', '生物']
def back_days(self, year, month): # 返回当月天数
big_month = [1, 3, 5, 7, 8, 10, 12]
small_month = [4, 6, 9, 11]
two_month = 28
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
two_month = 29
assert int(month) <= 12
if month in big_month:
return range(1, 32)
elif month in small_month:
return range(1, 31)
else:
return range(1, two_month + 1)
def first_list(self, start_day, days): # 日历第一行
ca_list = [{self.days.pop(0): random.choice(self.work)} for i in range(1, 8 - start_day)]
[ca_list.insert(0, 'empty') for j in range(7 - len(ca_list))]
return ca_list
def return_calendar(self): # 返回日历的列表
first_line = self.first_list(self.start_day, self.days) # 日历第一行
lines = [first_line] # 存日历的列表
while self.days: # 得到每一行
line = [{self.days.pop(0): random.choice(self.work)} for i in range(7) if self.days]
[line.append('empty') for j in range(7 - len(line))] # 长度不足补空
lines.append(line)
return lines
class Paginator:
def __init__(self, datas, page_size):
self.datas = datas
self.page_size = page_size
self.all_pages = math.ceil(self.datas.count() / self.page_size)
def back_page(self, current_page):
if self.all_pages <= 5:
return range(1, self.all_pages + 1)
if current_page <= 3:
return range(1, 6)
elif current_page + 3 >= self.all_pages:
return range(self.all_pages - 4, self.all_pages + 1)
else:
return range(current_page - 2, current_page + 2)
def back_data(self, current_page):
datas = self.datas.offset((current_page - 1) * self.page_size).limit(self.page_size)
return datas
@main.route('/') # 路由
def base(): # 视图
# c = Curriculum(c_id='0001', c_name='python', c_time=datetime.datetime.now())
# c.save()
return render_template('base.html')
@main.route('/index/') # 路由
@loginValid
def index(): # 视图
return render_template('index.html')
@main.route('/register/', methods=['GET', 'POST']) # 路由
def register(): # 视图
err_msg = ''
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
if username:
if email:
if password:
user = User()
user.username = username
user.email = email
user.password = set_pwd(password)
user.save()
return redirect('/login/')
else:
err_msg = '密码不可为空'
else:
err_msg = '邮箱不可为空'
else:
err_msg = '用户名不可为空'
return render_template('register.html', **locals())
@main.route('/login/', methods=['GET', 'POST']) # 路由
def login(): # 视图
| ethod == 'POST':
email = request.form.get('email')
password = request.form.get('password')
user = User.query.filter_by(email=email).first()
if user:
if set_pwd(password) == user.password:
response = redirect('/index/')
response.set_cookie('email', user.email)
response.set_cookie('id', str(user.id))
response.set_cookie('username', user.username)
print(user.username)
session['username'] = user.username
return response
else:
err_msg = '密码错误'
else:
err_msg = '该账号未注册'
return render_template('login.html', **locals())
@main.route('/logout/')
def logout(): # 退出
response = redirect('/login/')
response.delete_cookie('email')
response.delete_cookie('id')
response.delete_cookie('username')
session.pop('username')
return response
@main.route('/user_info/') # 路由
@loginValid
def user_info(): # 个人中心
c = Calendar()
datas = c.return_calendar()
day = datetime.datetime.now().day
return render_template('user_info.html', **locals())
@main.route('/leave/', methods=['get', 'post'])
# @csrf.exempt
@loginValid
def leave():
err_msg = ''
if request.method == 'POST':
leave_name = request.form.get('leave_name')
leave_type = request.form.get('leave_type')
leave_start = request.form.get('leave_start')
leave_end = request.form.get('leave_end')
leave_desc = request.form.get('leave_desc')
leave_phone = request.form.get('leave_phone')
if leave_name and leave_type and leave_start and leave_end and leave_desc and leave_phone:
id = int(request.cookies.get('id'))
lea = Leave()
lea.leave_id = id
lea.leave_name = leave_name
lea.leave_type = leave_type
lea.leave_start = leave_start
lea.leave_end = leave_end
lea.leave_desc = leave_desc
lea.leave_phone = leave_phone
lea.leave_status = '0'
lea.save()
else:
err_msg = '请填写全部内容'
return render_template('leave.html', **locals())
@main.route('/leave_list/<p>/', methods=['get', 'post'])
@loginValid
def leave_list(p):
page_size = 5
p = int(p)
id = int(request.cookies.get('id'))
leaves = Leave.query.filter_by(leave_id=id)
pagin = Paginator(leaves, page_size)
pages = pagin.back_page(p)
leaves = pagin.back_data(p)
return render_template('leave_list.html', **locals())
@main.route('/cancel/')
def cancel():
id = request.args.get('id') # 通过args接受get请求数据
leave = Leave.query.get(int(id))
leave.delete()
return jsonify({'data': '删除成功'})
@main.route('/add_task/', methods=['get', 'post'])
def add_task():
'''
task.errors # 表单校验错误
task.validate_on_submit() # 判断是否是一个有效的post请求
task.validate() # 判断是否是一个有效的post请求
task.data # 提交的数据
:return:
'''
errors = {}
task = TaskForm()
if request.method == 'POST':
if task.validate_on_submit():
formData = task.data
else:
errors_list = list(task.errors.keys())
errors = task.errors
print(errors)
return render_template('add_task.html', **locals())
@api.resource('/Api/Leave/')
class LeaveApi(Resource):
def __init__(self): # 定义返回的格式
super(LeaveApi, self).__init__()
self.result = {
'version': '1.0',
'data': ''
}
def set_data(self, leave): # 定义返回的数据
result_data = {
'leave_name': leave.leave_name,
'leave_type': leave.leave_type,
'leave_start': leave.leave_start,
'leave_end': leave.leave_end,
'leave_desc': leave.leave_desc,
'leave_phone': leave.leave_phone,
}
return result_data
def get | err_msg = ''
if request.m | conditional_block |
apigroup.rs | , ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (apiresource, caps) in apigroup.versioned_resources("v1") {
/// println!("Found ApiResource {}", apiresource.kind);
/// }
/// Ok(())
/// }
/// ```
///
/// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`].
///
/// Whichever way you choose the end result is something describing a resource and its abilities:
/// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup
/// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud
///
/// These two types: [`ApiResource`], and [`ApiCapabilities`]
/// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API.
/// You will likely need to use [`DynamicObject`] as the generic type for Api to do this,
/// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
/// [`ApiResource`]: crate::discovery::ApiResource
/// [`ApiCapabilities`]: crate::discovery::ApiCapabilities
/// [`DynamicObject`]: crate::api::DynamicObject
/// [`Resource`]: crate::Resource
/// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest
/// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources
/// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources
/// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind
pub struct ApiGroup {
/// Name of the group e.g. apiregistration.k8s.io
name: String,
/// List of resource information, capabilities at particular versions
data: Vec<GroupVersionData>,
/// Preferred version if exported by the `APIGroup`
preferred: Option<String>,
}
/// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup
///
/// These queriers ignore groups with empty versions.
/// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer.
/// On construction, they also sort the internal vec of GroupVersionData according to `Version`.
impl ApiGroup {
pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> {
tracing::debug!(name = g.name.as_str(), "Listing group versions");
let key = g.name;
if g.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
let mut data = vec![];
for vers in &g.versions {
let resources = client.list_api_group_resources(&vers.group_version).await?;
data.push(GroupVersionData::new(vers.version.clone(), resources)?);
}
let mut group = ApiGroup {
name: key,
data,
preferred: g.preferred_version.map(|v| v.version),
};
group.sort_versions();
Ok(group)
}
pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> {
let mut data = vec![];
let key = ApiGroup::CORE_GROUP.to_string();
if coreapis.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
for v in coreapis.versions {
let resources = client.list_core_api_resources(&v).await?;
data.push(GroupVersionData::new(v, resources)?);
}
let mut group = ApiGroup {
name: ApiGroup::CORE_GROUP.to_string(),
data,
preferred: Some("v1".to_string()),
};
group.sort_versions();
Ok(group)
}
fn sort_versions(&mut self) {
self.data
.sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str()))
}
// shortcut method to give cheapest return for a single GVK
pub(crate) async fn query_gvk(
client: &Client,
gvk: &GroupVersionKind,
) -> Result<(ApiResource, ApiCapabilities)> {
let apiver = gvk.api_version();
let list = if gvk.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
for res in &list.resources {
if res.kind == gvk.kind && !res.name.contains('/') |
}
Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into())
}
// shortcut method to give cheapest return for a pinned group
pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> {
let apiver = gv.api_version();
let list = if gv.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
let data = GroupVersionData::new(gv.version.clone(), list)?;
let group = ApiGroup {
name: gv.group.clone(),
data: vec![data],
preferred: Some(gv.version.clone()), // you preferred what you asked for
};
Ok(group)
}
}
/// Public ApiGroup interface
impl ApiGroup {
/// Core group name
pub const CORE_GROUP: &'static str = "";
/// Returns the name of this group.
pub fn name(&self) -> &str {
&self.name
}
/// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group.
///
/// This list is always non-empty, and sorted in the following order:
/// - Stable versions (with the last being the first)
/// - Beta versions (with the last being the first)
/// - Alpha versions (with the last being the first)
/// - Other versions, alphabetically
///
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn versions(&self) -> impl Iterator<Item = &str> {
self.data.as_slice().iter().map(|gvd| gvd.version.as_str())
}
/// Returns preferred version for working with given group.
pub fn preferred_version(&self) -> Option<&str> {
self.preferred.as_deref()
}
/// Returns the preferred version or latest version for working with given group.
///
/// If server does not recommend one, we pick the "most stable and most recent" version
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn preferred_version_or_latest(&self) -> &str {
// NB: self.versions is non-empty by construction in ApiGroup
self.preferred
.as_deref()
.unwrap_or_else(|| self.versions().next().unwrap())
}
/// Returns the resources in the group at an arbitrary version string.
///
/// If the group does not support this version, the returned vector is empty.
///
/// If you are looking for the api recommended list of resources, or just on particular kind
/// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead.
pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> {
self.data
.iter()
.find(|gvd| gvd.version == ver)
.map(|gvd| gvd.resources.clone())
.unwrap_or_default()
}
/// Returns the recommended (preferred or latest) versioned resources in the group
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (ar, | {
let ar = parse::parse_apiresource(res, &list.group_version)?;
let caps = parse::parse_apicapabilities(&list, &res.name)?;
return Ok((ar, caps));
} | conditional_block |
apigroup.rs | discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (apiresource, caps) in apigroup.versioned_resources("v1") {
/// println!("Found ApiResource {}", apiresource.kind);
/// }
/// Ok(())
/// }
/// ```
///
/// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`].
///
/// Whichever way you choose the end result is something describing a resource and its abilities:
/// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup
/// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud
///
/// These two types: [`ApiResource`], and [`ApiCapabilities`]
/// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API.
/// You will likely need to use [`DynamicObject`] as the generic type for Api to do this,
/// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
/// [`ApiResource`]: crate::discovery::ApiResource
/// [`ApiCapabilities`]: crate::discovery::ApiCapabilities
/// [`DynamicObject`]: crate::api::DynamicObject
/// [`Resource`]: crate::Resource
/// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest
/// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources
/// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources
/// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind
pub struct ApiGroup {
/// Name of the group e.g. apiregistration.k8s.io
name: String,
/// List of resource information, capabilities at particular versions
data: Vec<GroupVersionData>,
/// Preferred version if exported by the `APIGroup`
preferred: Option<String>,
}
/// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup
///
/// These queriers ignore groups with empty versions.
/// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer.
/// On construction, they also sort the internal vec of GroupVersionData according to `Version`.
impl ApiGroup {
pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> {
tracing::debug!(name = g.name.as_str(), "Listing group versions");
let key = g.name;
if g.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
let mut data = vec![];
for vers in &g.versions {
let resources = client.list_api_group_resources(&vers.group_version).await?;
data.push(GroupVersionData::new(vers.version.clone(), resources)?);
}
let mut group = ApiGroup {
name: key,
data,
preferred: g.preferred_version.map(|v| v.version),
};
group.sort_versions();
Ok(group)
}
pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> {
let mut data = vec![];
let key = ApiGroup::CORE_GROUP.to_string();
if coreapis.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
for v in coreapis.versions {
let resources = client.list_core_api_resources(&v).await?;
data.push(GroupVersionData::new(v, resources)?);
}
let mut group = ApiGroup {
name: ApiGroup::CORE_GROUP.to_string(),
data,
preferred: Some("v1".to_string()),
};
group.sort_versions();
Ok(group)
}
fn | (&mut self) {
self.data
.sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str()))
}
// shortcut method to give cheapest return for a single GVK
pub(crate) async fn query_gvk(
client: &Client,
gvk: &GroupVersionKind,
) -> Result<(ApiResource, ApiCapabilities)> {
let apiver = gvk.api_version();
let list = if gvk.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
for res in &list.resources {
if res.kind == gvk.kind && !res.name.contains('/') {
let ar = parse::parse_apiresource(res, &list.group_version)?;
let caps = parse::parse_apicapabilities(&list, &res.name)?;
return Ok((ar, caps));
}
}
Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into())
}
// shortcut method to give cheapest return for a pinned group
pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> {
let apiver = gv.api_version();
let list = if gv.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
let data = GroupVersionData::new(gv.version.clone(), list)?;
let group = ApiGroup {
name: gv.group.clone(),
data: vec![data],
preferred: Some(gv.version.clone()), // you preferred what you asked for
};
Ok(group)
}
}
/// Public ApiGroup interface
impl ApiGroup {
/// Core group name
pub const CORE_GROUP: &'static str = "";
/// Returns the name of this group.
pub fn name(&self) -> &str {
&self.name
}
/// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group.
///
/// This list is always non-empty, and sorted in the following order:
/// - Stable versions (with the last being the first)
/// - Beta versions (with the last being the first)
/// - Alpha versions (with the last being the first)
/// - Other versions, alphabetically
///
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn versions(&self) -> impl Iterator<Item = &str> {
self.data.as_slice().iter().map(|gvd| gvd.version.as_str())
}
/// Returns preferred version for working with given group.
pub fn preferred_version(&self) -> Option<&str> {
self.preferred.as_deref()
}
/// Returns the preferred version or latest version for working with given group.
///
/// If server does not recommend one, we pick the "most stable and most recent" version
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn preferred_version_or_latest(&self) -> &str {
// NB: self.versions is non-empty by construction in ApiGroup
self.preferred
.as_deref()
.unwrap_or_else(|| self.versions().next().unwrap())
}
/// Returns the resources in the group at an arbitrary version string.
///
/// If the group does not support this version, the returned vector is empty.
///
/// If you are looking for the api recommended list of resources, or just on particular kind
/// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead.
pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> {
self.data
.iter()
.find(|gvd| gvd.version == ver)
.map(|gvd| gvd.resources.clone())
.unwrap_or_default()
}
/// Returns the recommended (preferred or latest) versioned resources in the group
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (ar, | sort_versions | identifier_name |
apigroup.rs | s.io").await?;
/// for (apiresource, caps) in apigroup.versioned_resources("v1") {
/// println!("Found ApiResource {}", apiresource.kind);
/// }
/// Ok(())
/// }
/// ```
///
/// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`].
///
/// Whichever way you choose the end result is something describing a resource and its abilities:
/// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup
/// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud
///
/// These two types: [`ApiResource`], and [`ApiCapabilities`]
/// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API.
/// You will likely need to use [`DynamicObject`] as the generic type for Api to do this,
/// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
/// [`ApiResource`]: crate::discovery::ApiResource
/// [`ApiCapabilities`]: crate::discovery::ApiCapabilities
/// [`DynamicObject`]: crate::api::DynamicObject
/// [`Resource`]: crate::Resource
/// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest
/// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources
/// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources
/// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind
pub struct ApiGroup {
/// Name of the group e.g. apiregistration.k8s.io
name: String,
/// List of resource information, capabilities at particular versions
data: Vec<GroupVersionData>,
/// Preferred version if exported by the `APIGroup`
preferred: Option<String>,
}
/// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup
///
/// These queriers ignore groups with empty versions.
/// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer.
/// On construction, they also sort the internal vec of GroupVersionData according to `Version`.
impl ApiGroup {
pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> {
tracing::debug!(name = g.name.as_str(), "Listing group versions");
let key = g.name;
if g.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
let mut data = vec![];
for vers in &g.versions {
let resources = client.list_api_group_resources(&vers.group_version).await?;
data.push(GroupVersionData::new(vers.version.clone(), resources)?);
}
let mut group = ApiGroup {
name: key,
data,
preferred: g.preferred_version.map(|v| v.version),
};
group.sort_versions();
Ok(group)
}
pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> {
let mut data = vec![];
let key = ApiGroup::CORE_GROUP.to_string();
if coreapis.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
for v in coreapis.versions {
let resources = client.list_core_api_resources(&v).await?;
data.push(GroupVersionData::new(v, resources)?);
}
let mut group = ApiGroup {
name: ApiGroup::CORE_GROUP.to_string(),
data,
preferred: Some("v1".to_string()),
};
group.sort_versions();
Ok(group)
}
fn sort_versions(&mut self) {
self.data
.sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str()))
}
// shortcut method to give cheapest return for a single GVK
pub(crate) async fn query_gvk(
client: &Client,
gvk: &GroupVersionKind,
) -> Result<(ApiResource, ApiCapabilities)> {
let apiver = gvk.api_version();
let list = if gvk.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
for res in &list.resources {
if res.kind == gvk.kind && !res.name.contains('/') {
let ar = parse::parse_apiresource(res, &list.group_version)?;
let caps = parse::parse_apicapabilities(&list, &res.name)?;
return Ok((ar, caps));
}
}
Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into())
}
// shortcut method to give cheapest return for a pinned group
pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> {
let apiver = gv.api_version();
let list = if gv.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
let data = GroupVersionData::new(gv.version.clone(), list)?;
let group = ApiGroup {
name: gv.group.clone(),
data: vec![data],
preferred: Some(gv.version.clone()), // you preferred what you asked for
};
Ok(group)
}
}
/// Public ApiGroup interface
impl ApiGroup {
/// Core group name
pub const CORE_GROUP: &'static str = "";
/// Returns the name of this group.
pub fn name(&self) -> &str {
&self.name
}
/// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group.
///
/// This list is always non-empty, and sorted in the following order:
/// - Stable versions (with the last being the first)
/// - Beta versions (with the last being the first)
/// - Alpha versions (with the last being the first)
/// - Other versions, alphabetically
///
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn versions(&self) -> impl Iterator<Item = &str> {
self.data.as_slice().iter().map(|gvd| gvd.version.as_str())
}
/// Returns preferred version for working with given group.
pub fn preferred_version(&self) -> Option<&str> {
self.preferred.as_deref()
}
/// Returns the preferred version or latest version for working with given group.
///
/// If server does not recommend one, we pick the "most stable and most recent" version
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn preferred_version_or_latest(&self) -> &str {
// NB: self.versions is non-empty by construction in ApiGroup
self.preferred
.as_deref()
.unwrap_or_else(|| self.versions().next().unwrap())
}
/// Returns the resources in the group at an arbitrary version string.
///
/// If the group does not support this version, the returned vector is empty.
///
/// If you are looking for the api recommended list of resources, or just on particular kind
/// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead.
pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> {
self.data
.iter()
.find(|gvd| gvd.version == ver)
.map(|gvd| gvd.resources.clone())
.unwrap_or_default()
}
/// Returns the recommended (preferred or latest) versioned resources in the group
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; | /// for (ar, caps) in apigroup.recommended_resources() {
/// if !caps.supports_operation(verbs::LIST) {
/// continue;
/// }
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); | random_line_split | |
apigroup.rs | , ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (apiresource, caps) in apigroup.versioned_resources("v1") {
/// println!("Found ApiResource {}", apiresource.kind);
/// }
/// Ok(())
/// }
/// ```
///
/// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`].
///
/// Whichever way you choose the end result is something describing a resource and its abilities:
/// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup
/// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud
///
/// These two types: [`ApiResource`], and [`ApiCapabilities`]
/// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API.
/// You will likely need to use [`DynamicObject`] as the generic type for Api to do this,
/// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
/// [`ApiResource`]: crate::discovery::ApiResource
/// [`ApiCapabilities`]: crate::discovery::ApiCapabilities
/// [`DynamicObject`]: crate::api::DynamicObject
/// [`Resource`]: crate::Resource
/// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest
/// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources
/// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources
/// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind
pub struct ApiGroup {
/// Name of the group e.g. apiregistration.k8s.io
name: String,
/// List of resource information, capabilities at particular versions
data: Vec<GroupVersionData>,
/// Preferred version if exported by the `APIGroup`
preferred: Option<String>,
}
/// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup
///
/// These queriers ignore groups with empty versions.
/// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer.
/// On construction, they also sort the internal vec of GroupVersionData according to `Version`.
impl ApiGroup {
pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> {
tracing::debug!(name = g.name.as_str(), "Listing group versions");
let key = g.name;
if g.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
let mut data = vec![];
for vers in &g.versions {
let resources = client.list_api_group_resources(&vers.group_version).await?;
data.push(GroupVersionData::new(vers.version.clone(), resources)?);
}
let mut group = ApiGroup {
name: key,
data,
preferred: g.preferred_version.map(|v| v.version),
};
group.sort_versions();
Ok(group)
}
pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> {
let mut data = vec![];
let key = ApiGroup::CORE_GROUP.to_string();
if coreapis.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
for v in coreapis.versions {
let resources = client.list_core_api_resources(&v).await?;
data.push(GroupVersionData::new(v, resources)?);
}
let mut group = ApiGroup {
name: ApiGroup::CORE_GROUP.to_string(),
data,
preferred: Some("v1".to_string()),
};
group.sort_versions();
Ok(group)
}
fn sort_versions(&mut self) {
self.data
.sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str()))
}
// shortcut method to give cheapest return for a single GVK
pub(crate) async fn query_gvk(
client: &Client,
gvk: &GroupVersionKind,
) -> Result<(ApiResource, ApiCapabilities)> |
// shortcut method to give cheapest return for a pinned group
pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> {
let apiver = gv.api_version();
let list = if gv.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
let data = GroupVersionData::new(gv.version.clone(), list)?;
let group = ApiGroup {
name: gv.group.clone(),
data: vec![data],
preferred: Some(gv.version.clone()), // you preferred what you asked for
};
Ok(group)
}
}
/// Public ApiGroup interface
impl ApiGroup {
/// Core group name
pub const CORE_GROUP: &'static str = "";
/// Returns the name of this group.
pub fn name(&self) -> &str {
&self.name
}
/// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group.
///
/// This list is always non-empty, and sorted in the following order:
/// - Stable versions (with the last being the first)
/// - Beta versions (with the last being the first)
/// - Alpha versions (with the last being the first)
/// - Other versions, alphabetically
///
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn versions(&self) -> impl Iterator<Item = &str> {
self.data.as_slice().iter().map(|gvd| gvd.version.as_str())
}
/// Returns preferred version for working with given group.
pub fn preferred_version(&self) -> Option<&str> {
self.preferred.as_deref()
}
/// Returns the preferred version or latest version for working with given group.
///
/// If server does not recommend one, we pick the "most stable and most recent" version
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn preferred_version_or_latest(&self) -> &str {
// NB: self.versions is non-empty by construction in ApiGroup
self.preferred
.as_deref()
.unwrap_or_else(|| self.versions().next().unwrap())
}
/// Returns the resources in the group at an arbitrary version string.
///
/// If the group does not support this version, the returned vector is empty.
///
/// If you are looking for the api recommended list of resources, or just on particular kind
/// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead.
pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> {
self.data
.iter()
.find(|gvd| gvd.version == ver)
.map(|gvd| gvd.resources.clone())
.unwrap_or_default()
}
/// Returns the recommended (preferred or latest) versioned resources in the group
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (ar, | {
let apiver = gvk.api_version();
let list = if gvk.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
for res in &list.resources {
if res.kind == gvk.kind && !res.name.contains('/') {
let ar = parse::parse_apiresource(res, &list.group_version)?;
let caps = parse::parse_apicapabilities(&list, &res.name)?;
return Ok((ar, caps));
}
}
Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into())
} | identifier_body |
tlcell.rs | same memory.
#[inline]
pub fn rw3<'a, T: ?Sized, U: ?Sized, V: ?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
tc3: &'a TLCell<Q, V>,
) -> (&'a mut T, &'a mut U, &'a mut V) {
assert!(
(tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize)
&& (tc2 as *const _ as *const () as usize != tc3 as *const _ as *const () as usize)
&& (tc3 as *const _ as *const () as usize != tc1 as *const _ as *const () as usize),
"Illegal to borrow same TLCell twice with rw3()"
);
unsafe {
(
&mut *tc1.value.get(),
&mut *tc2.value.get(),
&mut *tc3.value.get(),
)
}
}
}
/// Cell whose contents is owned (for borrowing purposes) by a
/// [`TLCellOwner`].
///
/// To borrow from this cell, use the borrowing calls on the
/// [`TLCellOwner`] instance that shares the same marker type. Since
/// there may be another indistinguishable [`TLCellOwner`] in another
/// thread, `Sync` is not supported for this type. However it *is*
/// possible to send the cell to another thread, which then allows its
/// contents to be borrowed using the owner in that thread.
///
/// See also [crate documentation](index.html).
///
/// [`TLCellOwner`]: struct.TLCellOwner.html
#[repr(transparent)]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCell<Q, T: ?Sized> {
// Use Invariant<Q> for invariant parameter
owner: PhantomData<Invariant<Q>>,
// TLCell absolutely cannot be Sync, since otherwise you could send
// two &TLCell's to two different threads, that each have their own
// TLCellOwner<Q> instance and that could therefore both give out
// a &mut T to the same T.
//
// However, it's fine to Send a TLCell to a different thread, because
// you can only send something if nothing borrows it, so nothing can
// be accessing its contents. After sending the TLCell, the original
// TLCellOwner can no longer give access to the TLCell's contents since
// TLCellOwner is !Send + !Sync. Only the TLCellOwner of the new thread
// can give access to this TLCell's contents now.
//
// `UnsafeCell` already disables `Sync` and gives the right `Send` implementation.
value: UnsafeCell<T>,
}
impl<Q, T> TLCell<Q, T> {
/// Create a new `TLCell` owned for borrowing purposes by the
/// `TLCellOwner` derived from the same marker type `Q`.
#[inline]
pub const fn new(value: T) -> TLCell<Q, T> {
TLCell {
owner: PhantomData,
value: UnsafeCell::new(value),
}
}
/// Destroy the cell and return the contained value
///
/// Safety: Since this consumes the cell, there can be no other
/// references to the cell or the data at this point.
#[inline]
pub fn into_inner(self) -> T {
self.value.into_inner()
}
}
impl<Q, T: ?Sized> TLCell<Q, T> {
/// Borrow contents of this cell immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a>(&'a self, owner: &'a TLCellOwner<Q>) -> &'a T {
owner.ro(self)
}
/// Borrow contents of this cell mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed. To mutably borrow from two or three
/// cells at the same time, see [`TLCellOwner::rw2`] or
/// [`TLCellOwner::rw3`].
#[inline]
pub fn rw<'a>(&'a self, owner: &'a mut TLCellOwner<Q>) -> &'a mut T {
owner.rw(self)
}
/// Returns a mutable reference to the underlying data
///
/// Note that this is only useful at the beginning-of-life or
/// end-of-life of the cell when you have exclusive access to it.
/// Normally you'd use [`TLCell::rw`] or [`TLCellOwner::rw`] to
/// get a mutable reference to the contents of the cell.
///
/// Safety: This call borrows `TLCell` mutably which guarantees
/// that we possess the only reference. This means that there can
/// be no active borrows of other forms, even ones obtained using
/// an immutable reference.
#[inline]
pub fn get_mut(&mut self) -> &mut T {
self.value.get_mut()
}
}
impl<Q: 'static, T: Default + ?Sized> Default for TLCell<Q, T> {
fn default() -> Self {
TLCell::new(T::default())
}
}
#[cfg(test)]
mod tests {
use super::{TLCell, TLCellOwner};
#[test]
#[should_panic]
fn tlcell_singleton_1() {
struct Marker;
let _owner1 = TLCellOwner::<Marker>::new();
let _owner2 = TLCellOwner::<Marker>::new(); // Panic here
}
#[test]
fn tlcell_singleton_2() {
struct Marker;
let owner1 = TLCellOwner::<Marker>::new();
drop(owner1);
let _owner2 = TLCellOwner::<Marker>::new();
}
#[test]
fn tlcell_singleton_3() {
struct Marker1;
struct Marker2;
let _owner1 = TLCellOwner::<Marker1>::new();
let _owner2 = TLCellOwner::<Marker2>::new();
}
#[test]
fn tlcell() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let mut owner = ACellOwner::new();
let c1 = ACell::new(100u32);
let c2 = owner.cell(200u32);
(*owner.rw(&c1)) += 1;
(*owner.rw(&c2)) += 2;
let c1ref = owner.ro(&c1);
let c2ref = owner.ro(&c2);
let total = *c1ref + *c2ref;
assert_eq!(total, 303);
}
#[test]
fn tlcell_threads() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
let mut _owner1 = ACellOwner::new();
std::thread::spawn(|| {
let mut _owner2 = ACellOwner::new();
})
.join()
.unwrap();
}
#[test]
fn tlcell_get_mut() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let owner = ACellOwner::new();
let mut cell = ACell::new(100u32);
let mut_ref = cell.get_mut();
*mut_ref = 50;
let cell_ref = owner.ro(&cell);
assert_eq!(*cell_ref, 50);
}
#[test]
fn tlcell_into_inner() {
struct Marker;
type ACell<T> = TLCell<Marker, T>;
let cell = ACell::new(100u32);
assert_eq!(cell.into_inner(), 100);
}
#[test]
fn tlcell_unsized() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let mut owner = ACellOwner::new();
struct Squares(u32);
struct Integers(u64);
trait Series {
fn step(&mut self);
fn value(&self) -> u64;
}
impl Series for Squares {
fn step(&mut self) {
self.0 += 1;
}
fn value(&self) -> u64 {
(self.0 as u64) * (self.0 as u64)
}
}
impl Series for Integers {
fn step(&mut self) {
self.0 += 1;
}
fn value(&self) -> u64 {
self.0
}
}
fn series(init: u32, is_squares: bool) -> Box<ACell<dyn Series>> {
if is_squares | {
Box::new(ACell::new(Squares(init)))
} | conditional_block | |
tlcell.rs | /// support `Send` or `Sync`.
pub fn new() -> Self {
SINGLETON_CHECK.with(|set| {
assert!(set.borrow_mut().insert(TypeId::of::<Q>()),
"Illegal to create two TLCellOwner instances within the same thread with the same marker type parameter");
});
Self {
not_send_or_sync: PhantomData,
typ: PhantomData,
}
}
/// Create a new cell owned by this owner instance. See also
/// [`TLCell::new`].
///
/// [`TLCell::new`]: struct.TLCell.html
pub fn cell<T>(&self, value: T) -> TLCell<Q, T> {
TLCell::<Q, T>::new(value)
}
/// Borrow contents of a `TLCell` immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a, T: ?Sized>(&'a self, tc: &'a TLCell<Q, T>) -> &'a T {
unsafe { &*tc.value.get() }
}
/// Borrow contents of a `TLCell` mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed.
#[inline]
pub fn rw<'a, T: ?Sized>(&'a mut self, tc: &'a TLCell<Q, T>) -> &'a mut T {
unsafe { &mut *tc.value.get() }
}
/// Borrow contents of two `TLCell` instances mutably. Panics if
/// the two `TLCell` instances point to the same memory.
#[inline]
pub fn rw2<'a, T: ?Sized, U: ?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
) -> (&'a mut T, &'a mut U) {
assert!(
tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize,
"Illegal to borrow same TLCell twice with rw2()"
);
unsafe { (&mut *tc1.value.get(), &mut *tc2.value.get()) }
}
/// Borrow contents of three `TLCell` instances mutably. Panics if
/// any pair of `TLCell` instances point to the same memory.
#[inline]
pub fn rw3<'a, T: ?Sized, U: ?Sized, V: ?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
tc3: &'a TLCell<Q, V>,
) -> (&'a mut T, &'a mut U, &'a mut V) {
assert!(
(tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize)
&& (tc2 as *const _ as *const () as usize != tc3 as *const _ as *const () as usize)
&& (tc3 as *const _ as *const () as usize != tc1 as *const _ as *const () as usize),
"Illegal to borrow same TLCell twice with rw3()"
);
unsafe {
(
&mut *tc1.value.get(),
&mut *tc2.value.get(),
&mut *tc3.value.get(),
)
}
}
}
/// Cell whose contents is owned (for borrowing purposes) by a | /// thread, `Sync` is not supported for this type. However it *is*
/// possible to send the cell to another thread, which then allows its
/// contents to be borrowed using the owner in that thread.
///
/// See also [crate documentation](index.html).
///
/// [`TLCellOwner`]: struct.TLCellOwner.html
#[repr(transparent)]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCell<Q, T: ?Sized> {
// Use Invariant<Q> for invariant parameter
owner: PhantomData<Invariant<Q>>,
// TLCell absolutely cannot be Sync, since otherwise you could send
// two &TLCell's to two different threads, that each have their own
// TLCellOwner<Q> instance and that could therefore both give out
// a &mut T to the same T.
//
// However, it's fine to Send a TLCell to a different thread, because
// you can only send something if nothing borrows it, so nothing can
// be accessing its contents. After sending the TLCell, the original
// TLCellOwner can no longer give access to the TLCell's contents since
// TLCellOwner is !Send + !Sync. Only the TLCellOwner of the new thread
// can give access to this TLCell's contents now.
//
// `UnsafeCell` already disables `Sync` and gives the right `Send` implementation.
value: UnsafeCell<T>,
}
impl<Q, T> TLCell<Q, T> {
/// Create a new `TLCell` owned for borrowing purposes by the
/// `TLCellOwner` derived from the same marker type `Q`.
#[inline]
pub const fn new(value: T) -> TLCell<Q, T> {
TLCell {
owner: PhantomData,
value: UnsafeCell::new(value),
}
}
/// Destroy the cell and return the contained value
///
/// Safety: Since this consumes the cell, there can be no other
/// references to the cell or the data at this point.
#[inline]
pub fn into_inner(self) -> T {
self.value.into_inner()
}
}
impl<Q, T: ?Sized> TLCell<Q, T> {
/// Borrow contents of this cell immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a>(&'a self, owner: &'a TLCellOwner<Q>) -> &'a T {
owner.ro(self)
}
/// Borrow contents of this cell mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed. To mutably borrow from two or three
/// cells at the same time, see [`TLCellOwner::rw2`] or
/// [`TLCellOwner::rw3`].
#[inline]
pub fn rw<'a>(&'a self, owner: &'a mut TLCellOwner<Q>) -> &'a mut T {
owner.rw(self)
}
/// Returns a mutable reference to the underlying data
///
/// Note that this is only useful at the beginning-of-life or
/// end-of-life of the cell when you have exclusive access to it.
/// Normally you'd use [`TLCell::rw`] or [`TLCellOwner::rw`] to
/// get a mutable reference to the contents of the cell.
///
/// Safety: This call borrows `TLCell` mutably which guarantees
/// that we possess the only reference. This means that there can
/// be no active borrows of other forms, even ones obtained using
/// an immutable reference.
#[inline]
pub fn get_mut(&mut self) -> &mut T {
self.value.get_mut()
}
}
impl<Q: 'static, T: Default + ?Sized> Default for TLCell<Q, T> {
fn default() -> Self {
TLCell::new(T::default())
}
}
#[cfg(test)]
mod tests {
use super::{TLCell, TLCellOwner};
#[test]
#[should_panic]
fn tlcell_singleton_1() {
struct Marker;
let _owner1 = TLCellOwner::<Marker>::new();
let _owner2 = TLCellOwner::<Marker>::new(); // Panic here
}
#[test]
fn tlcell_singleton_2() {
struct Marker;
let owner1 = TLCellOwner::<Marker>::new();
drop(owner1);
let _owner2 = TLCellOwner::<Marker>::new();
}
#[test]
fn tlcell_singleton_3() {
struct Marker1;
struct Marker2;
let _owner1 = TLCellOwner::<Marker1>::new();
let _owner2 = TLCellOwner::<Marker2>::new();
}
#[test]
fn tlcell() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let mut owner = ACellOwner::new();
let c1 = ACell::new(100u32);
let c2 = owner.cell(200u32);
| /// [`TLCellOwner`].
///
/// To borrow from this cell, use the borrowing calls on the
/// [`TLCellOwner`] instance that shares the same marker type. Since
/// there may be another indistinguishable [`TLCellOwner`] in another | random_line_split |
tlcell.rs | (*const ());
/// Borrowing-owner of zero or more [`TLCell`](struct.TLCell.html)
/// instances.
///
/// See [crate documentation](index.html).
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCellOwner<Q: 'static> {
// Use NotSendOrSync to disable Send and Sync,
not_send_or_sync: PhantomData<NotSendOrSync>,
// Use Invariant<Q> for invariant parameter
typ: PhantomData<Invariant<Q>>,
}
impl<Q: 'static> Drop for TLCellOwner<Q> {
fn drop(&mut self) {
SINGLETON_CHECK.with(|set| set.borrow_mut().remove(&TypeId::of::<Q>()));
}
}
impl<Q: 'static> Default for TLCellOwner<Q> {
fn default() -> Self {
TLCellOwner::new()
}
}
impl<Q: 'static> TLCellOwner<Q> {
/// Create the singleton owner instance. Each owner may be used
/// to create many `TLCell` instances. There may be only one
/// instance of this type per thread at any given time for each
/// different marker type `Q`. This call panics if a second
/// simultaneous instance is created. Since the owner is only
/// valid to use in the thread it is created in, it does not
/// support `Send` or `Sync`.
pub fn new() -> Self {
SINGLETON_CHECK.with(|set| {
assert!(set.borrow_mut().insert(TypeId::of::<Q>()),
"Illegal to create two TLCellOwner instances within the same thread with the same marker type parameter");
});
Self {
not_send_or_sync: PhantomData,
typ: PhantomData,
}
}
/// Create a new cell owned by this owner instance. See also
/// [`TLCell::new`].
///
/// [`TLCell::new`]: struct.TLCell.html
pub fn cell<T>(&self, value: T) -> TLCell<Q, T> {
TLCell::<Q, T>::new(value)
}
/// Borrow contents of a `TLCell` immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a, T: ?Sized>(&'a self, tc: &'a TLCell<Q, T>) -> &'a T {
unsafe { &*tc.value.get() }
}
/// Borrow contents of a `TLCell` mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed.
#[inline]
pub fn rw<'a, T: ?Sized>(&'a mut self, tc: &'a TLCell<Q, T>) -> &'a mut T {
unsafe { &mut *tc.value.get() }
}
/// Borrow contents of two `TLCell` instances mutably. Panics if
/// the two `TLCell` instances point to the same memory.
#[inline]
pub fn rw2<'a, T: ?Sized, U: ?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
) -> (&'a mut T, &'a mut U) {
assert!(
tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize,
"Illegal to borrow same TLCell twice with rw2()"
);
unsafe { (&mut *tc1.value.get(), &mut *tc2.value.get()) }
}
/// Borrow contents of three `TLCell` instances mutably. Panics if
/// any pair of `TLCell` instances point to the same memory.
#[inline]
pub fn rw3<'a, T: ?Sized, U: ?Sized, V: ?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
tc3: &'a TLCell<Q, V>,
) -> (&'a mut T, &'a mut U, &'a mut V) {
assert!(
(tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize)
&& (tc2 as *const _ as *const () as usize != tc3 as *const _ as *const () as usize)
&& (tc3 as *const _ as *const () as usize != tc1 as *const _ as *const () as usize),
"Illegal to borrow same TLCell twice with rw3()"
);
unsafe {
(
&mut *tc1.value.get(),
&mut *tc2.value.get(),
&mut *tc3.value.get(),
)
}
}
}
/// Cell whose contents is owned (for borrowing purposes) by a
/// [`TLCellOwner`].
///
/// To borrow from this cell, use the borrowing calls on the
/// [`TLCellOwner`] instance that shares the same marker type. Since
/// there may be another indistinguishable [`TLCellOwner`] in another
/// thread, `Sync` is not supported for this type. However it *is*
/// possible to send the cell to another thread, which then allows its
/// contents to be borrowed using the owner in that thread.
///
/// See also [crate documentation](index.html).
///
/// [`TLCellOwner`]: struct.TLCellOwner.html
#[repr(transparent)]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCell<Q, T: ?Sized> {
// Use Invariant<Q> for invariant parameter
owner: PhantomData<Invariant<Q>>,
// TLCell absolutely cannot be Sync, since otherwise you could send
// two &TLCell's to two different threads, that each have their own
// TLCellOwner<Q> instance and that could therefore both give out
// a &mut T to the same T.
//
// However, it's fine to Send a TLCell to a different thread, because
// you can only send something if nothing borrows it, so nothing can
// be accessing its contents. After sending the TLCell, the original
// TLCellOwner can no longer give access to the TLCell's contents since
// TLCellOwner is !Send + !Sync. Only the TLCellOwner of the new thread
// can give access to this TLCell's contents now.
//
// `UnsafeCell` already disables `Sync` and gives the right `Send` implementation.
value: UnsafeCell<T>,
}
impl<Q, T> TLCell<Q, T> {
/// Create a new `TLCell` owned for borrowing purposes by the
/// `TLCellOwner` derived from the same marker type `Q`.
#[inline]
pub const fn new(value: T) -> TLCell<Q, T> {
TLCell {
owner: PhantomData,
value: UnsafeCell::new(value),
}
}
/// Destroy the cell and return the contained value
///
/// Safety: Since this consumes the cell, there can be no other
/// references to the cell or the data at this point.
#[inline]
pub fn into_inner(self) -> T {
self.value.into_inner()
}
}
impl<Q, T: ?Sized> TLCell<Q, T> {
/// Borrow contents of this cell immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a>(&'a self, owner: &'a TLCellOwner<Q>) -> &'a T {
owner.ro(self)
}
/// Borrow contents of this cell mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed. To mutably borrow from two or three
/// cells at the same time, see [`TLCellOwner::rw2`] or
/// [`TLCellOwner::rw3`].
#[inline]
pub fn rw<'a>(&'a self, owner: &'a mut TLCellOwner<Q>) -> &'a mut T {
owner.rw(self)
}
/// Returns a mutable reference to the underlying data
///
/// Note that this is only useful at the beginning-of-life or
/// end-of-life of the cell when you have exclusive access to it.
/// Normally you'd use [`TLCell::rw`] or [`TLCellOwner::rw`] to
/// get a mutable reference to the contents of the cell.
///
/// Safety: This call borrows `TLCell` mutably which guarantees
/// that we possess the only reference. This means that there can
/// be no active borrows of other forms, even ones obtained using
/// an immutable reference.
#[inline]
pub fn get_mut(&mut self) -> &mut T {
self.value.get_mut()
}
}
impl<Q: 'static, T: | NotSendOrSync | identifier_name | |
transaction.go | // null marks beginning of list - not used as a record type
NullTag = TagType(iota)
// valid record types
// OBSOLETE items must still be supported to process older blocks
BaseDataTag = TagType(iota) // OBSOLETE: block owner
AssetDataTag = TagType(iota) // create asset
BitmarkIssueTag = TagType(iota) // issue asset
BitmarkTransferUnratifiedTag = TagType(iota) // single signed transfer
BitmarkTransferCountersignedTag = TagType(iota) // two signature transfer
BlockFoundationTag = TagType(iota) // block owner
BlockOwnerTransferTag = TagType(iota) // block owner transfer
BitmarkShareTag = TagType(iota) // convert bitmark to a quantity of shares
ShareGrantTag = TagType(iota) // grant some value to another account
ShareSwapTag = TagType(iota) // atomically swap shares between accounts
// this item must be last
InvalidTag = TagType(iota)
)
// Packed - packed records are just a byte slice
type Packed []byte
// Transaction - generic transaction interface
type Transaction interface {
Pack(account *account.Account) (Packed, error)
}
// byte sizes for various fields
const (
maxNameLength = 64
maxMetadataLength = 2048
minFingerprintLength = 1
maxFingerprintLength = 1024
maxSignatureLength = 1024
)
// OldBaseData - the unpacked Proofer Data structure (OBSOLETE)
// this is first tx in every block and can only be used there
type OldBaseData struct {
Currency currency.Currency `json:"currency"` // utf-8 → Enum
PaymentAddress string `json:"paymentAddress"` // utf-8
Owner *account.Account `json:"owner"` // base58
Nonce uint64 `json:"nonce,string"` // unsigned 0..N
Signature account.Signature `json:"signature,"` // hex
}
// AssetData - the unpacked Asset Data structure
type AssetData struct {
Name string `json:"name"` // utf-8
Fingerprint string `json:"fingerprint"` // utf-8
Metadata string `json:"metadata"` // utf-8
Registrant *account.Account `json:"registrant"` // base58
Signature account.Signature `json:"signature"` // hex
}
// BitmarkIssue - the unpacked BitmarkIssue structure
type BitmarkIssue struct {
AssetId AssetIdentifier `json:"assetId"` // link to asset record
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Nonce uint64 `json:"nonce,string"` // to allow for multiple issues at the same time
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
}
// Payment - optional payment record
type Payment struct {
Currency currency.Currency `json:"currency"` // utf-8 → Enum
Address string `json:"address"` // utf-8
Amount uint64 `json:"amount,string"` // number as string, in terms of smallest currency unit
}
// PaymentAlternative - a single payment possibility - for use in RPC layers
// up to entries:
// 1. issue block owner payment
// 2. last transfer block owner payment (can merge with 1 if same address)
// 3. optional transfer payment
type PaymentAlternative []*Payment
// BitmarkTransfer - to access field of various transfer types
type BitmarkTransfer interface {
Transaction
GetLink() merkle.Digest
GetPayment() *Payment
GetOwner() *account.Account
GetCurrencies() currency.Map
GetSignature() account.Signature
GetCountersignature() account.Signature
}
// BitmarkTransferUnratified - the unpacked BitmarkTransfer structure
type BitmarkTransferUnratified struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
}
// BitmarkTransferCountersigned - the unpacked Countersigned BitmarkTransfer structure
type BitmarkTransferCountersigned struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// BlockFoundation - the unpacked Proofer Data structure
// this is first tx in every block and can only be used there
type BlockFoundation struct {
Version uint64 `json:"version,string"` // reflects combination of supported currencies
Payments currency.Map `json:"payments"` // contents depend on version
Owner *account.Account `json:"owner"` // base58
Nonce uint64 `json:"nonce,string"` // unsigned 0..N
Signature account.Signature `json:"signature"` // hex
}
// BlockOwnerTransfer - the unpacked Block Owner Transfer Data structure
// forms a chain that links back to a foundation record which has a TxId of:
// SHA3-256 . concat blockDigest leBlockNumberUint64
type BlockOwnerTransfer struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Version uint64 `json:"version,string"` // reflects combination of supported currencies
Payments currency.Map `json:"payments"` // require length and contents depend on version
Owner *account.Account `json:"owner"` // base58
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// BitmarkShare - turn a bitmark provenance chain into a fungible share
type BitmarkShare struct {
Link merkle.Digest `json:"link"` // previous record
Quantity uint64 `json:"quantity,string"` // initial balance quantity
Signature account.Signature `json:"signature"` // hex
}
// ShareGrant - grant some shares to another (one way transfer)
type ShareGrant struct {
ShareId merkle.Digest `json:"shareId"` // share = issue id
Quantity uint64 `json:"quantity,string"` // shares to transfer > 0
Owner *account.Account `json:"owner"` // base58
Recipient *account.Account `json:"recipient"` // base58
BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// ShareSwap - swap some shares to another (two way transfer)
type ShareSwap struct {
ShareIdOne merkle.Digest `json:"shareIdOne"` // share = issue id
QuantityOne uint64 `json:"quantityOne,string"` // shares to transfer > 0
OwnerOne *account.Account `json:"ownerOne"` // base58
ShareIdTwo merkle.Digest `json:"shareIdTwo"` // share = issue id
QuantityTwo uint64 `json:"quantityTwo,string"` // shares to transfer > 0
OwnerTwo *account.Account `json:"ownerTwo"` // base58
BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// Type - returns the record type code
func (record Packed) Type() TagType {
recordType, n := util.FromVarint64(record)
if 0 == n {
| turn TagType(recordType)
}
// RecordName - returns the name of a transaction record as a string
func RecordName(record interface{}) (string, bool) {
switch record.(type) {
case *OldBaseData, OldBaseData:
return "BaseData", true
case *AssetData, AssetData:
return "AssetData", true
case *BitmarkIssue, BitmarkIssue:
return " | return NullTag
}
re | conditional_block |
transaction.go | // grant some value to another account
ShareSwapTag = TagType(iota) // atomically swap shares between accounts
// this item must be last
InvalidTag = TagType(iota)
)
// Packed - packed records are just a byte slice
type Packed []byte
// Transaction - generic transaction interface
type Transaction interface {
Pack(account *account.Account) (Packed, error)
}
// byte sizes for various fields
const (
maxNameLength = 64
maxMetadataLength = 2048
minFingerprintLength = 1
maxFingerprintLength = 1024
maxSignatureLength = 1024
)
// OldBaseData - the unpacked Proofer Data structure (OBSOLETE)
// this is first tx in every block and can only be used there
type OldBaseData struct {
Currency currency.Currency `json:"currency"` // utf-8 → Enum
PaymentAddress string `json:"paymentAddress"` // utf-8
Owner *account.Account `json:"owner"` // base58
Nonce uint64 `json:"nonce,string"` // unsigned 0..N
Signature account.Signature `json:"signature,"` // hex
}
// AssetData - the unpacked Asset Data structure
type AssetData struct {
Name string `json:"name"` // utf-8
Fingerprint string `json:"fingerprint"` // utf-8
Metadata string `json:"metadata"` // utf-8
Registrant *account.Account `json:"registrant"` // base58
Signature account.Signature `json:"signature"` // hex
}
// BitmarkIssue - the unpacked BitmarkIssue structure
type BitmarkIssue struct {
AssetId AssetIdentifier `json:"assetId"` // link to asset record
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Nonce uint64 `json:"nonce,string"` // to allow for multiple issues at the same time
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
}
// Payment - optional payment record
type Payment struct {
Currency currency.Currency `json:"currency"` // utf-8 → Enum
Address string `json:"address"` // utf-8
Amount uint64 `json:"amount,string"` // number as string, in terms of smallest currency unit
}
// PaymentAlternative - a single payment possibility - for use in RPC layers
// up to entries:
// 1. issue block owner payment
// 2. last transfer block owner payment (can merge with 1 if same address)
// 3. optional transfer payment
type PaymentAlternative []*Payment
// BitmarkTransfer - to access field of various transfer types
type BitmarkTransfer interface {
Transaction
GetLink() merkle.Digest
GetPayment() *Payment
GetOwner() *account.Account
GetCurrencies() currency.Map
GetSignature() account.Signature
GetCountersignature() account.Signature
}
// BitmarkTransferUnratified - the unpacked BitmarkTransfer structure
type BitmarkTransferUnratified struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
}
// BitmarkTransferCountersigned - the unpacked Countersigned BitmarkTransfer structure
type BitmarkTransferCountersigned struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// BlockFoundation - the unpacked Proofer Data structure
// this is first tx in every block and can only be used there
type BlockFoundation struct {
Version uint64 `json:"version,string"` // reflects combination of supported currencies
Payments currency.Map `json:"payments"` // contents depend on version
Owner *account.Account `json:"owner"` // base58
Nonce uint64 `json:"nonce,string"` // unsigned 0..N
Signature account.Signature `json:"signature"` // hex
}
// BlockOwnerTransfer - the unpacked Block Owner Transfer Data structure
// forms a chain that links back to a foundation record which has a TxId of:
// SHA3-256 . concat blockDigest leBlockNumberUint64
type BlockOwnerTransfer struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Version uint64 `json:"version,string"` // reflects combination of supported currencies
Payments currency.Map `json:"payments"` // require length and contents depend on version
Owner *account.Account `json:"owner"` // base58
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// BitmarkShare - turn a bitmark provenance chain into a fungible share
type BitmarkShare struct {
Link merkle.Digest `json:"link"` // previous record
Quantity uint64 `json:"quantity,string"` // initial balance quantity
Signature account.Signature `json:"signature"` // hex
}
// ShareGrant - grant some shares to another (one way transfer)
type ShareGrant struct {
ShareId merkle.Digest `json:"shareId"` // share = issue id
Quantity uint64 `json:"quantity,string"` // shares to transfer > 0
Owner *account.Account `json:"owner"` // base58
Recipient *account.Account `json:"recipient"` // base58
BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// ShareSwap - swap some shares to another (two way transfer)
type ShareSwap struct {
ShareIdOne merkle.Digest `json:"shareIdOne"` // share = issue id
QuantityOne uint64 `json:"quantityOne,string"` // shares to transfer > 0
OwnerOne *account.Account `json:"ownerOne"` // base58
ShareIdTwo merkle.Digest `json:"shareIdTwo"` // share = issue id
QuantityTwo uint64 `json:"quantityTwo,string"` // shares to transfer > 0
OwnerTwo *account.Account `json:"ownerTwo"` // base58
BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// Type - returns the record type code
func (record Packed) Type() TagType {
recordType, n := util.FromVarint64(record)
if 0 == n {
return NullTag
}
return TagType(recordType)
}
// RecordName - returns the name of a transaction record as a string
func RecordName(record interface{}) (string, bool) {
switch record.(type) {
case *OldBaseData, OldBaseData:
return "BaseData", true
case *AssetData, AssetData:
return "AssetData", true
case *BitmarkIssue, BitmarkIssue:
return "BitmarkIssue", true
case *BitmarkTransferUnratified, BitmarkTransferUnratified:
return "BitmarkTransferUnratified", true
case *BitmarkTransferCountersigned, BitmarkTransferCountersigned:
return "BitmarkTransferCountersigned", true
case *BlockFoundation, BlockFoundation:
return "BlockFoundation", true
case *BlockOwnerTransfer, BlockOwnerTransfer:
return "BlockOwnerTransfer", true
case *BitmarkShare, BitmarkShare:
return "ShareBalance", true
case *ShareGrant, ShareGrant:
return "ShareGrant", true
case *ShareSwap, ShareSwap:
return "ShareSwap", true
default:
return "*unknown*", false
}
}
// AssetId - compute an asset id
func (assetData *AssetData) AssetId() AssetIdentifier {
r | eturn NewAssetIdentifier([]byte(assetData.Fingerprint))
}
// | identifier_body | |
transaction.go | FingerprintLength = 1
maxFingerprintLength = 1024
maxSignatureLength = 1024
)
// OldBaseData - the unpacked Proofer Data structure (OBSOLETE)
// this is first tx in every block and can only be used there
type OldBaseData struct {
Currency currency.Currency `json:"currency"` // utf-8 → Enum
PaymentAddress string `json:"paymentAddress"` // utf-8
Owner *account.Account `json:"owner"` // base58
Nonce uint64 `json:"nonce,string"` // unsigned 0..N
Signature account.Signature `json:"signature,"` // hex
}
// AssetData - the unpacked Asset Data structure
type AssetData struct {
Name string `json:"name"` // utf-8
Fingerprint string `json:"fingerprint"` // utf-8
Metadata string `json:"metadata"` // utf-8
Registrant *account.Account `json:"registrant"` // base58
Signature account.Signature `json:"signature"` // hex
}
// BitmarkIssue - the unpacked BitmarkIssue structure
type BitmarkIssue struct {
AssetId AssetIdentifier `json:"assetId"` // link to asset record
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Nonce uint64 `json:"nonce,string"` // to allow for multiple issues at the same time
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
}
// Payment - optional payment record
type Payment struct {
Currency currency.Currency `json:"currency"` // utf-8 → Enum
Address string `json:"address"` // utf-8
Amount uint64 `json:"amount,string"` // number as string, in terms of smallest currency unit
}
// PaymentAlternative - a single payment possibility - for use in RPC layers
// up to entries:
// 1. issue block owner payment
// 2. last transfer block owner payment (can merge with 1 if same address)
// 3. optional transfer payment
type PaymentAlternative []*Payment
// BitmarkTransfer - to access field of various transfer types
type BitmarkTransfer interface {
Transaction
GetLink() merkle.Digest
GetPayment() *Payment
GetOwner() *account.Account
GetCurrencies() currency.Map
GetSignature() account.Signature
GetCountersignature() account.Signature
}
// BitmarkTransferUnratified - the unpacked BitmarkTransfer structure
type BitmarkTransferUnratified struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
}
// BitmarkTransferCountersigned - the unpacked Countersigned BitmarkTransfer structure
type BitmarkTransferCountersigned struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// BlockFoundation - the unpacked Proofer Data structure
// this is first tx in every block and can only be used there
type BlockFoundation struct {
Version uint64 `json:"version,string"` // reflects combination of supported currencies
Payments currency.Map `json:"payments"` // contents depend on version
Owner *account.Account `json:"owner"` // base58
Nonce uint64 `json:"nonce,string"` // unsigned 0..N
Signature account.Signature `json:"signature"` // hex
}
// BlockOwnerTransfer - the unpacked Block Owner Transfer Data structure
// forms a chain that links back to a foundation record which has a TxId of:
// SHA3-256 . concat blockDigest leBlockNumberUint64
type BlockOwnerTransfer struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Version uint64 `json:"version,string"` // reflects combination of supported currencies
Payments currency.Map `json:"payments"` // require length and contents depend on version
Owner *account.Account `json:"owner"` // base58
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// BitmarkShare - turn a bitmark provenance chain into a fungible share
type BitmarkShare struct {
Link merkle.Digest `json:"link"` // previous record
Quantity uint64 `json:"quantity,string"` // initial balance quantity
Signature account.Signature `json:"signature"` // hex
}
// ShareGrant - grant some shares to another (one way transfer)
type ShareGrant struct {
ShareId merkle.Digest `json:"shareId"` // share = issue id
Quantity uint64 `json:"quantity,string"` // shares to transfer > 0
Owner *account.Account `json:"owner"` // base58
Recipient *account.Account `json:"recipient"` // base58
BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// ShareSwap - swap some shares to another (two way transfer)
type ShareSwap struct {
ShareIdOne merkle.Digest `json:"shareIdOne"` // share = issue id
QuantityOne uint64 `json:"quantityOne,string"` // shares to transfer > 0
OwnerOne *account.Account `json:"ownerOne"` // base58
ShareIdTwo merkle.Digest `json:"shareIdTwo"` // share = issue id
QuantityTwo uint64 `json:"quantityTwo,string"` // shares to transfer > 0
OwnerTwo *account.Account `json:"ownerTwo"` // base58
BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// Type - returns the record type code
func (record Packed) Type() TagType {
recordType, n := util.FromVarint64(record)
if 0 == n {
return NullTag
}
return TagType(recordType)
}
// RecordName - returns the name of a transaction record as a string
func RecordName(record interface{}) (string, bool) {
switch record.(type) {
case *OldBaseData, OldBaseData:
return "BaseData", true
case *AssetData, AssetData:
return "AssetData", true
case *BitmarkIssue, BitmarkIssue:
return "BitmarkIssue", true
case *BitmarkTransferUnratified, BitmarkTransferUnratified:
return "BitmarkTransferUnratified", true
case *BitmarkTransferCountersigned, BitmarkTransferCountersigned:
return "BitmarkTransferCountersigned", true
case *BlockFoundation, BlockFoundation:
return "BlockFoundation", true
case *BlockOwnerTransfer, BlockOwnerTransfer:
return "BlockOwnerTransfer", true
case *BitmarkShare, BitmarkShare:
return "ShareBalance", true
case *ShareGrant, ShareGrant:
return "ShareGrant", true
case *ShareSwap, ShareSwap:
return "ShareSwap", true
default:
return "*unknown*", false
}
}
// AssetId - compute an asset id
func (assetData *AssetData) AssetId() AssetIdentifier {
return NewAssetIdentifier([]byte(assetData.Fingerprint))
}
// MakeLink - Create an link for a packed record
func (record Packed) MakeLink() merkle.Digest {
return merkle.NewDigest(record)
}
// MarshalText - convert a packed to its hex JSON form
func (record Packed) MarshalText() ([]byte, error) {
size := hex.EncodedLen(len(record))
b := make([]byte, size)
hex.Encode(b, record)
return b, nil
}
// UnmarshalText - convert a packed to its hex JSON form
func (record *Packed) Unma | rshalText(s [ | identifier_name | |
transaction.go |
// TagType - type code for transactions
type TagType uint64
// enumerate the possible transaction record types
// this is encoded a Varint64 at start of "Packed"
const (
// null marks beginning of list - not used as a record type
NullTag = TagType(iota)
// valid record types
// OBSOLETE items must still be supported to process older blocks
BaseDataTag = TagType(iota) // OBSOLETE: block owner
AssetDataTag = TagType(iota) // create asset
BitmarkIssueTag = TagType(iota) // issue asset
BitmarkTransferUnratifiedTag = TagType(iota) // single signed transfer
BitmarkTransferCountersignedTag = TagType(iota) // two signature transfer
BlockFoundationTag = TagType(iota) // block owner
BlockOwnerTransferTag = TagType(iota) // block owner transfer
BitmarkShareTag = TagType(iota) // convert bitmark to a quantity of shares
ShareGrantTag = TagType(iota) // grant some value to another account
ShareSwapTag = TagType(iota) // atomically swap shares between accounts
// this item must be last
InvalidTag = TagType(iota)
)
// Packed - packed records are just a byte slice
type Packed []byte
// Transaction - generic transaction interface
type Transaction interface {
Pack(account *account.Account) (Packed, error)
}
// byte sizes for various fields
const (
maxNameLength = 64
maxMetadataLength = 2048
minFingerprintLength = 1
maxFingerprintLength = 1024
maxSignatureLength = 1024
)
// OldBaseData - the unpacked Proofer Data structure (OBSOLETE)
// this is first tx in every block and can only be used there
type OldBaseData struct {
Currency currency.Currency `json:"currency"` // utf-8 → Enum
PaymentAddress string `json:"paymentAddress"` // utf-8
Owner *account.Account `json:"owner"` // base58
Nonce uint64 `json:"nonce,string"` // unsigned 0..N
Signature account.Signature `json:"signature,"` // hex
}
// AssetData - the unpacked Asset Data structure
type AssetData struct {
Name string `json:"name"` // utf-8
Fingerprint string `json:"fingerprint"` // utf-8
Metadata string `json:"metadata"` // utf-8
Registrant *account.Account `json:"registrant"` // base58
Signature account.Signature `json:"signature"` // hex
}
// BitmarkIssue - the unpacked BitmarkIssue structure
type BitmarkIssue struct {
AssetId AssetIdentifier `json:"assetId"` // link to asset record
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Nonce uint64 `json:"nonce,string"` // to allow for multiple issues at the same time
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
}
// Payment - optional payment record
type Payment struct {
Currency currency.Currency `json:"currency"` // utf-8 → Enum
Address string `json:"address"` // utf-8
Amount uint64 `json:"amount,string"` // number as string, in terms of smallest currency unit
}
// PaymentAlternative - a single payment possibility - for use in RPC layers
// up to entries:
// 1. issue block owner payment
// 2. last transfer block owner payment (can merge with 1 if same address)
// 3. optional transfer payment
type PaymentAlternative []*Payment
// BitmarkTransfer - to access field of various transfer types
type BitmarkTransfer interface {
Transaction
GetLink() merkle.Digest
GetPayment() *Payment
GetOwner() *account.Account
GetCurrencies() currency.Map
GetSignature() account.Signature
GetCountersignature() account.Signature
}
// BitmarkTransferUnratified - the unpacked BitmarkTransfer structure
type BitmarkTransferUnratified struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
}
// BitmarkTransferCountersigned - the unpacked Countersigned BitmarkTransfer structure
type BitmarkTransferCountersigned struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Owner *account.Account `json:"owner"` // base58: the "destination" owner
Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// BlockFoundation - the unpacked Proofer Data structure
// this is first tx in every block and can only be used there
type BlockFoundation struct {
Version uint64 `json:"version,string"` // reflects combination of supported currencies
Payments currency.Map `json:"payments"` // contents depend on version
Owner *account.Account `json:"owner"` // base58
Nonce uint64 `json:"nonce,string"` // unsigned 0..N
Signature account.Signature `json:"signature"` // hex
}
// BlockOwnerTransfer - the unpacked Block Owner Transfer Data structure
// forms a chain that links back to a foundation record which has a TxId of:
// SHA3-256 . concat blockDigest leBlockNumberUint64
type BlockOwnerTransfer struct {
Link merkle.Digest `json:"link"` // previous record
Escrow *Payment `json:"escrow"` // optional escrow payment address
Version uint64 `json:"version,string"` // reflects combination of supported currencies
Payments currency.Map `json:"payments"` // require length and contents depend on version
Owner *account.Account `json:"owner"` // base58
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// BitmarkShare - turn a bitmark provenance chain into a fungible share
type BitmarkShare struct {
Link merkle.Digest `json:"link"` // previous record
Quantity uint64 `json:"quantity,string"` // initial balance quantity
Signature account.Signature `json:"signature"` // hex
}
// ShareGrant - grant some shares to another (one way transfer)
type ShareGrant struct {
ShareId merkle.Digest `json:"shareId"` // share = issue id
Quantity uint64 `json:"quantity,string"` // shares to transfer > 0
Owner *account.Account `json:"owner"` // base58
Recipient *account.Account `json:"recipient"` // base58
BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// ShareSwap - swap some shares to another (two way transfer)
type ShareSwap struct {
ShareIdOne merkle.Digest `json:"shareIdOne"` // share = issue id
QuantityOne uint64 `json:"quantityOne,string"` // shares to transfer > 0
OwnerOne *account.Account `json:"ownerOne"` // base58
ShareIdTwo merkle.Digest `json:"shareIdTwo"` // share = issue id
QuantityTwo uint64 `json:"quantityTwo,string"` // shares to transfer > 0
OwnerTwo *account.Account `json:"ownerTwo"` // base58
BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block
Signature account.Signature `json:"signature"` // hex
Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record
}
// Type - returns the record type code
func (record Packed) Type() TagType {
recordType, n := util.FromVarint64(record)
if 0 == n {
return NullTag
}
return TagType(recordType)
}
// RecordName - returns the name of a transaction record as a string
func RecordName(record interface{}) (string, bool) {
switch record.(type) {
case *OldBaseData | random_line_split | ||
history.component.ts | welcomeMessageHistory";
logout: string = "Logout";
DASHBOAR: string = "DASHBOARD,";
ENVELOPES: string = "ENVELOPES";
GOALS: string = "GOALS";
BILLS: string = "BILLS";
HISTORY: string = "HISTORY";
UTILITIES: string = "UTILITIES";
user: string = "User";
settings: string = "Settings";
appearance: string = "Appearance";
light: string = "Light";
dark: string = "Dark";
chartData1: Array<any> = [];
chartColors1: Array<any> = [];
chartLabels1: Array<any> = [];
chartType1: string = "doughnut";
chartData2: Array<any> = [];
chartColors2: Array<any> = [];
chartLabels2: Array<string> = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
chartType2: string = "line";
paginatedExpenses: Array<Expense> = [];
expenseCount: number = 0;
page: number = 1;
pageSize: number = 10
filter: string = '';
historyAll = "historyAll";
historyExport = "historyExport";
historyTotal = "historyTotal";
constructor(
private api: ApiService,
@Inject(DOCUMENT) private document: HTMLDocument,
private router: Router,
private authentication: AuthenticationService,
private connectionService: ConnectionService
) { }
public hasConnection(): boolean {
return this.connectionService.hasConnection;
}
async handlePaginatedOfflineAsync() {
if (this.connectionService.hasConnection && this.paginatedExpenses.length === 0) {
if (this.connectionService.hasConnection) {
this.api.getExpense('', this.pageSize, (this.page - 1) * this.pageSize).then(result => {
this.expenseCount = result['length'];
this.paginatedExpenses = result['expenses'];
}).catch(error => {});
}
}
}
@ViewChild('color') color: ElementRef;
ngOnInit(): void {
this.api.getUser().then(result => {
this.refreshLanguage(result.language);
this.categories = result.categories;
this.expenses = this.generateExpenses(result.expense);
this.currency = result.defaultCurrency;
//data za tabele
const parsedTable = this.parseTable(this.expenses);
document.querySelector(".totaltext").innerHTML = "<h5>" + this.historyTotal + ": " + parsedTable.sum.toFixed(2); + "€</h5>";
const pieChart = this.groupByCategories(parsedTable);
const lineChartData = this.makeDataForGraph(this.filterByCategory(this.expenses))
this.chartData1 = this.makeDataArray1(pieChart);
this.chartColors1 = this.makeColorArray1(pieChart);
this.chartLabels1 = this.makeLabelArray1(pieChart);
this.chartData2 = this.generateDatasets(lineChartData);
this.chartColors2 = this.getColors(lineChartData);
}).catch(error => {
this.authentication.logout();
this.router.navigate(['/']);
});
setInterval(()=> { this.handlePaginatedOfflineAsync() }, 1000);
}
refreshLanguage(language: string) {
setLanguage(language);
this.message = getTranslation("messageHistory");
this.welcomeMessage = getTranslation("welcomeMessageHistory");
this.historyAll = getTranslation("historyAll");
this.historyExport = getTranslation("historyExport");
this.historyTotal = getTranslation("historyTotal");
}
parseTable(rows) {
const parsedTable = {
sum: 0,
data: []
};
for (let row of rows) {
parsedTable.data.push({
id: row.id,
year: row.year,
month: row.month,
day: row.day,
category: row.category,
receiver: row.receiver,
currency: row.currency,
value: row.value,
color: row.color,
});
parsedTable.sum += row.value;
}
return parsedTable;
}
filterByCategory(table) {
var categories = new Map();
for (var expense of table) {
if (!categories.get(expense.category)) {
categories.set(expense.category, []);
}
categories.get(expense.category).push(expense);
categories.get(expense.category).color = expense.color;
}
return categories;
}
filterByMonth(expenses) {
var month = new Map();
for (var expense of expenses) {
if (!month.get(expense.month)) {
month.set(expense.month, {});
month.get(expense.month).sum = 0;
}
month.get(expense.month).sum += expense.value;
}
return month;
}
makeDataForGraph(category) {
var month = new Map();
let keys = Array.from(category.keys());
for (let name of keys) {
month.set(name, this.filterByMonth(category.get(name)));
month.get(name).color = category.get(name).color;
}
return month;
}
convertMonthsToName(month) {
switch (month) {
case 'JAN':
return "January";
case 'FEB':
return "February";
case 'MAR':
return "March";
case 'APR':
return "April";
case 'MAY':
return "May";
case 'JUN':
return "June";
case 'JUL':
return "July";
case 'AUG':
return "August";
case 'SEP':
return "September";
case 'OCT':
return "October";
case 'NOV':
return "November";
case 'DEC':
return "December";
}
}
generateExpenses(expense) {
var expensesArray = []
for (var exp of expense) {
var date = exp.date.split('T')[0].split('-');
expensesArray.push({
id: exp._id,
year: date[0],
month: date[1],
monthName: this.translateMonth(date[1]),
day: date[2],
category: exp.category.name,
recipient: exp.recipient,
value: exp.value,
currency: exp.currency,
color: exp.category.color,
});
}
expensesArray.sort(this.compare)
return expensesArray;
}
translateMonth(month) {
switch (month) {
case '01':
return "JAN";
case '02':
return "FEB";
case '03':
return "MAR";
case '04':
return "APR";
case '05':
return "MAY";
case '06':
return "JUN";
case '07':
return "JUL";
case '08':
return "AUG";
case '09':
return "SEP";
case '10':
return "OCT";
case '11':
return "NOV";
case '12':
return "DEC";
}
}
compare(a, b) { //1 menjava, -1 ni menjava
if (a.year < b.year) {
return 1;
} else if (a.year == b.year) {
if (a.month < b.month) {
return 1;
} else if (a.month == b.month) {
if (a.day < b.day) {
return 1;
} else {
return -1;
}
} else {
return -1;
}
} else {
return -1;
}
return 0;
}
groupByCategories(parsedTable) {
const groups = [];
for (let entry of parsedTable.data) {
const group = this.findGroupByCategory(groups, entry.category);
if (group != null) {
group.sum += parseInt(entry.value);
} else {
groups.push({
name: entry.category,
sum: entry.value,
color: entry.color,
});
}
}
return groups;
}
fi | roups, category) {
for (let group of groups) {
if (group.name == category) return group;
}
return null;
}
makeDataArray1(array): Array<any> {
const returnTable = [array.length];
for (let i = 0; i < array.length; i++) {
returnTable[i] = array[i].sum;
}
return returnTable;
}
makeColorArray1(array) {
const table = [];
const returnTable = [];
for (let i = 0; i < array.length; i++) {
returnTable.push([array[i].color]);
}
let barva = {
backgroundColor: returnTable
}
table[0] = barva;
return table;
}
makeLabelArray1(array) {
const returnTable = [array.length];
for (let i = 0; i < array.length; i++) {
returnTable[i] = array[i].name;
}
return returnTable;
}
generateDatasets(map) {
var datasets: ChartDataSets[] = [];
let keys = Array.from(map.keys());
for (let i of keys) {
datasets.push(this.generateDataset(map.get(i | ndGroupByCategory(g | identifier_name |
history.component.ts | "welcomeMessageHistory";
logout: string = "Logout";
DASHBOAR: string = "DASHBOARD,";
ENVELOPES: string = "ENVELOPES";
GOALS: string = "GOALS";
BILLS: string = "BILLS";
HISTORY: string = "HISTORY";
UTILITIES: string = "UTILITIES";
user: string = "User";
settings: string = "Settings";
appearance: string = "Appearance";
light: string = "Light";
dark: string = "Dark";
chartData1: Array<any> = [];
chartColors1: Array<any> = [];
chartLabels1: Array<any> = [];
chartType1: string = "doughnut";
chartData2: Array<any> = [];
chartColors2: Array<any> = [];
chartLabels2: Array<string> = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
chartType2: string = "line";
paginatedExpenses: Array<Expense> = [];
expenseCount: number = 0;
page: number = 1;
pageSize: number = 10
filter: string = '';
historyAll = "historyAll";
historyExport = "historyExport";
historyTotal = "historyTotal";
constructor(
private api: ApiService,
@Inject(DOCUMENT) private document: HTMLDocument,
private router: Router,
private authentication: AuthenticationService,
private connectionService: ConnectionService
) { }
public hasConnection(): boolean {
return this.connectionService.hasConnection;
}
async handlePaginatedOfflineAsync() {
if (this.connectionService.hasConnection && this.paginatedExpenses.length === 0) {
if (this.connectionService.hasConnection) {
this.api.getExpense('', this.pageSize, (this.page - 1) * this.pageSize).then(result => {
this.expenseCount = result['length'];
this.paginatedExpenses = result['expenses'];
}).catch(error => {});
}
}
}
@ViewChild('color') color: ElementRef;
ngOnInit(): void {
this.api.getUser().then(result => {
this.refreshLanguage(result.language);
this.categories = result.categories;
this.expenses = this.generateExpenses(result.expense);
this.currency = result.defaultCurrency;
//data za tabele
const parsedTable = this.parseTable(this.expenses);
document.querySelector(".totaltext").innerHTML = "<h5>" + this.historyTotal + ": " + parsedTable.sum.toFixed(2); + "€</h5>";
const pieChart = this.groupByCategories(parsedTable);
const lineChartData = this.makeDataForGraph(this.filterByCategory(this.expenses))
this.chartData1 = this.makeDataArray1(pieChart);
this.chartColors1 = this.makeColorArray1(pieChart);
this.chartLabels1 = this.makeLabelArray1(pieChart);
this.chartData2 = this.generateDatasets(lineChartData);
this.chartColors2 = this.getColors(lineChartData);
}).catch(error => {
this.authentication.logout();
this.router.navigate(['/']);
});
setInterval(()=> { this.handlePaginatedOfflineAsync() }, 1000);
}
refreshLanguage(language: string) {
setLanguage(language);
this.message = getTranslation("messageHistory");
this.welcomeMessage = getTranslation("welcomeMessageHistory");
this.historyAll = getTranslation("historyAll");
this.historyExport = getTranslation("historyExport");
this.historyTotal = getTranslation("historyTotal");
}
parseTable(rows) {
const parsedTable = {
sum: 0,
data: []
};
for (let row of rows) {
parsedTable.data.push({
id: row.id,
year: row.year,
month: row.month,
day: row.day,
category: row.category,
receiver: row.receiver,
currency: row.currency,
value: row.value,
color: row.color,
});
parsedTable.sum += row.value;
}
return parsedTable;
}
filterByCategory(table) {
var categories = new Map();
for (var expense of table) {
if (!categories.get(expense.category)) {
categories.set(expense.category, []);
}
categories.get(expense.category).push(expense);
categories.get(expense.category).color = expense.color;
}
return categories;
}
filterByMonth(expenses) {
var month = new Map();
for (var expense of expenses) {
if (!month.get(expense.month)) {
month.set(expense.month, {});
month.get(expense.month).sum = 0;
}
month.get(expense.month).sum += expense.value;
}
return month;
}
makeDataForGraph(category) {
var month = new Map();
let keys = Array.from(category.keys());
for (let name of keys) {
month.set(name, this.filterByMonth(category.get(name)));
month.get(name).color = category.get(name).color;
}
return month;
}
convertMonthsToName(month) {
switch (month) {
case 'JAN':
return "January";
case 'FEB':
return "February";
case 'MAR':
return "March";
case 'APR':
return "April";
case 'MAY':
return "May";
case 'JUN':
return "June";
case 'JUL':
return "July";
case 'AUG':
return "August";
case 'SEP':
return "September";
case 'OCT':
return "October";
case 'NOV':
return "November";
case 'DEC':
return "December";
}
}
generateExpenses(expense) {
var expensesArray = []
for (var exp of expense) {
var date = exp.date.split('T')[0].split('-');
expensesArray.push({
id: exp._id,
year: date[0],
month: date[1],
monthName: this.translateMonth(date[1]),
day: date[2],
category: exp.category.name,
recipient: exp.recipient,
value: exp.value,
currency: exp.currency,
color: exp.category.color,
});
}
expensesArray.sort(this.compare)
return expensesArray;
}
translateMonth(month) {
switch (month) {
case '01':
return "JAN";
case '02':
return "FEB";
case '03':
return "MAR";
case '04':
return "APR";
case '05':
return "MAY";
case '06':
return "JUN";
case '07':
return "JUL";
case '08':
return "AUG";
case '09':
return "SEP";
case '10':
return "OCT";
case '11':
return "NOV";
case '12':
return "DEC";
}
}
compare(a, b) { //1 menjava, -1 ni menjava
if (a.year < b.year) {
return 1;
} else if (a.year == b.year) {
if (a.month < b.month) {
| lse if (a.month == b.month) {
if (a.day < b.day) {
return 1;
} else {
return -1;
}
} else {
return -1;
}
} else {
return -1;
}
return 0;
}
groupByCategories(parsedTable) {
const groups = [];
for (let entry of parsedTable.data) {
const group = this.findGroupByCategory(groups, entry.category);
if (group != null) {
group.sum += parseInt(entry.value);
} else {
groups.push({
name: entry.category,
sum: entry.value,
color: entry.color,
});
}
}
return groups;
}
findGroupByCategory(groups, category) {
for (let group of groups) {
if (group.name == category) return group;
}
return null;
}
makeDataArray1(array): Array<any> {
const returnTable = [array.length];
for (let i = 0; i < array.length; i++) {
returnTable[i] = array[i].sum;
}
return returnTable;
}
makeColorArray1(array) {
const table = [];
const returnTable = [];
for (let i = 0; i < array.length; i++) {
returnTable.push([array[i].color]);
}
let barva = {
backgroundColor: returnTable
}
table[0] = barva;
return table;
}
makeLabelArray1(array) {
const returnTable = [array.length];
for (let i = 0; i < array.length; i++) {
returnTable[i] = array[i].name;
}
return returnTable;
}
generateDatasets(map) {
var datasets: ChartDataSets[] = [];
let keys = Array.from(map.keys());
for (let i of keys) {
datasets.push(this.generateDataset(map.get(i | return 1;
} e | conditional_block |
history.component.ts | "welcomeMessageHistory";
logout: string = "Logout";
DASHBOAR: string = "DASHBOARD,";
ENVELOPES: string = "ENVELOPES";
GOALS: string = "GOALS";
BILLS: string = "BILLS";
HISTORY: string = "HISTORY";
UTILITIES: string = "UTILITIES";
user: string = "User";
settings: string = "Settings";
appearance: string = "Appearance";
light: string = "Light";
dark: string = "Dark";
chartData1: Array<any> = [];
chartColors1: Array<any> = [];
chartLabels1: Array<any> = [];
chartType1: string = "doughnut";
chartData2: Array<any> = [];
chartColors2: Array<any> = [];
chartLabels2: Array<string> = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
chartType2: string = "line";
paginatedExpenses: Array<Expense> = [];
expenseCount: number = 0;
page: number = 1;
pageSize: number = 10
filter: string = '';
historyAll = "historyAll";
historyExport = "historyExport";
historyTotal = "historyTotal";
constructor(
private api: ApiService,
@Inject(DOCUMENT) private document: HTMLDocument,
private router: Router,
private authentication: AuthenticationService,
private connectionService: ConnectionService
) { }
public hasConnection(): boolean {
return this.connectionService.hasConnection;
}
async handlePaginatedOfflineAsync() {
if (this.connectionService.hasConnection && this.paginatedExpenses.length === 0) {
if (this.connectionService.hasConnection) {
this.api.getExpense('', this.pageSize, (this.page - 1) * this.pageSize).then(result => {
this.expenseCount = result['length'];
this.paginatedExpenses = result['expenses'];
}).catch(error => {});
}
}
}
@ViewChild('color') color: ElementRef;
ngOnInit(): void {
this.api.getUser().then(result => {
this.refreshLanguage(result.language);
this.categories = result.categories;
this.expenses = this.generateExpenses(result.expense);
this.currency = result.defaultCurrency;
//data za tabele
const parsedTable = this.parseTable(this.expenses);
document.querySelector(".totaltext").innerHTML = "<h5>" + this.historyTotal + ": " + parsedTable.sum.toFixed(2); + "€</h5>";
const pieChart = this.groupByCategories(parsedTable);
const lineChartData = this.makeDataForGraph(this.filterByCategory(this.expenses))
this.chartData1 = this.makeDataArray1(pieChart);
this.chartColors1 = this.makeColorArray1(pieChart);
this.chartLabels1 = this.makeLabelArray1(pieChart);
this.chartData2 = this.generateDatasets(lineChartData);
this.chartColors2 = this.getColors(lineChartData);
}).catch(error => {
this.authentication.logout();
this.router.navigate(['/']);
});
setInterval(()=> { this.handlePaginatedOfflineAsync() }, 1000);
}
refreshLanguage(language: string) {
setLanguage(language);
this.message = getTranslation("messageHistory");
this.welcomeMessage = getTranslation("welcomeMessageHistory");
this.historyAll = getTranslation("historyAll");
this.historyExport = getTranslation("historyExport");
this.historyTotal = getTranslation("historyTotal");
}
parseTable(rows) {
const parsedTable = {
sum: 0,
data: []
};
for (let row of rows) {
parsedTable.data.push({
id: row.id,
year: row.year,
month: row.month,
day: row.day,
category: row.category,
receiver: row.receiver,
currency: row.currency,
value: row.value,
color: row.color,
});
parsedTable.sum += row.value;
}
return parsedTable;
}
filterByCategory(table) {
var categories = new Map();
for (var expense of table) {
if (!categories.get(expense.category)) {
categories.set(expense.category, []);
}
categories.get(expense.category).push(expense);
categories.get(expense.category).color = expense.color;
}
return categories;
}
filterByMonth(expenses) {
var month = new Map();
for (var expense of expenses) {
if (!month.get(expense.month)) {
month.set(expense.month, {});
month.get(expense.month).sum = 0;
}
month.get(expense.month).sum += expense.value;
}
return month;
}
makeDataForGraph(category) {
var month = new Map();
let keys = Array.from(category.keys());
for (let name of keys) {
month.set(name, this.filterByMonth(category.get(name)));
month.get(name).color = category.get(name).color;
}
return month;
}
convertMonthsToName(month) {
switch (month) {
case 'JAN':
return "January";
case 'FEB':
return "February";
case 'MAR':
return "March";
case 'APR':
return "April";
case 'MAY':
return "May";
case 'JUN':
return "June";
case 'JUL':
return "July";
case 'AUG':
return "August";
case 'SEP':
return "September";
case 'OCT':
return "October";
case 'NOV':
return "November";
case 'DEC':
return "December";
}
}
generateExpenses(expense) {
| }
translateMonth(month) {
switch (month) {
case '01':
return "JAN";
case '02':
return "FEB";
case '03':
return "MAR";
case '04':
return "APR";
case '05':
return "MAY";
case '06':
return "JUN";
case '07':
return "JUL";
case '08':
return "AUG";
case '09':
return "SEP";
case '10':
return "OCT";
case '11':
return "NOV";
case '12':
return "DEC";
}
}
compare(a, b) { //1 menjava, -1 ni menjava
if (a.year < b.year) {
return 1;
} else if (a.year == b.year) {
if (a.month < b.month) {
return 1;
} else if (a.month == b.month) {
if (a.day < b.day) {
return 1;
} else {
return -1;
}
} else {
return -1;
}
} else {
return -1;
}
return 0;
}
groupByCategories(parsedTable) {
const groups = [];
for (let entry of parsedTable.data) {
const group = this.findGroupByCategory(groups, entry.category);
if (group != null) {
group.sum += parseInt(entry.value);
} else {
groups.push({
name: entry.category,
sum: entry.value,
color: entry.color,
});
}
}
return groups;
}
findGroupByCategory(groups, category) {
for (let group of groups) {
if (group.name == category) return group;
}
return null;
}
makeDataArray1(array): Array<any> {
const returnTable = [array.length];
for (let i = 0; i < array.length; i++) {
returnTable[i] = array[i].sum;
}
return returnTable;
}
makeColorArray1(array) {
const table = [];
const returnTable = [];
for (let i = 0; i < array.length; i++) {
returnTable.push([array[i].color]);
}
let barva = {
backgroundColor: returnTable
}
table[0] = barva;
return table;
}
makeLabelArray1(array) {
const returnTable = [array.length];
for (let i = 0; i < array.length; i++) {
returnTable[i] = array[i].name;
}
return returnTable;
}
generateDatasets(map) {
var datasets: ChartDataSets[] = [];
let keys = Array.from(map.keys());
for (let i of keys) {
datasets.push(this.generateDataset(map.get(i), | var expensesArray = []
for (var exp of expense) {
var date = exp.date.split('T')[0].split('-');
expensesArray.push({
id: exp._id,
year: date[0],
month: date[1],
monthName: this.translateMonth(date[1]),
day: date[2],
category: exp.category.name,
recipient: exp.recipient,
value: exp.value,
currency: exp.currency,
color: exp.category.color,
});
}
expensesArray.sort(this.compare)
return expensesArray; | identifier_body |
history.component.ts | "welcomeMessageHistory";
logout: string = "Logout";
DASHBOAR: string = "DASHBOARD,";
ENVELOPES: string = "ENVELOPES";
GOALS: string = "GOALS";
BILLS: string = "BILLS";
HISTORY: string = "HISTORY";
UTILITIES: string = "UTILITIES";
user: string = "User";
settings: string = "Settings";
appearance: string = "Appearance";
light: string = "Light";
dark: string = "Dark";
chartData1: Array<any> = [];
chartColors1: Array<any> = [];
chartLabels1: Array<any> = [];
chartType1: string = "doughnut";
chartData2: Array<any> = [];
chartColors2: Array<any> = [];
chartLabels2: Array<string> = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
chartType2: string = "line";
paginatedExpenses: Array<Expense> = [];
expenseCount: number = 0;
page: number = 1;
pageSize: number = 10
filter: string = '';
historyAll = "historyAll";
historyExport = "historyExport";
historyTotal = "historyTotal";
constructor(
private api: ApiService,
@Inject(DOCUMENT) private document: HTMLDocument,
private router: Router,
private authentication: AuthenticationService,
private connectionService: ConnectionService
) { }
public hasConnection(): boolean {
return this.connectionService.hasConnection;
}
async handlePaginatedOfflineAsync() {
if (this.connectionService.hasConnection && this.paginatedExpenses.length === 0) {
if (this.connectionService.hasConnection) {
this.api.getExpense('', this.pageSize, (this.page - 1) * this.pageSize).then(result => {
this.expenseCount = result['length'];
this.paginatedExpenses = result['expenses'];
}).catch(error => {});
}
}
}
@ViewChild('color') color: ElementRef;
ngOnInit(): void {
this.api.getUser().then(result => {
this.refreshLanguage(result.language);
this.categories = result.categories;
this.expenses = this.generateExpenses(result.expense);
this.currency = result.defaultCurrency;
//data za tabele | const pieChart = this.groupByCategories(parsedTable);
const lineChartData = this.makeDataForGraph(this.filterByCategory(this.expenses))
this.chartData1 = this.makeDataArray1(pieChart);
this.chartColors1 = this.makeColorArray1(pieChart);
this.chartLabels1 = this.makeLabelArray1(pieChart);
this.chartData2 = this.generateDatasets(lineChartData);
this.chartColors2 = this.getColors(lineChartData);
}).catch(error => {
this.authentication.logout();
this.router.navigate(['/']);
});
setInterval(()=> { this.handlePaginatedOfflineAsync() }, 1000);
}
refreshLanguage(language: string) {
setLanguage(language);
this.message = getTranslation("messageHistory");
this.welcomeMessage = getTranslation("welcomeMessageHistory");
this.historyAll = getTranslation("historyAll");
this.historyExport = getTranslation("historyExport");
this.historyTotal = getTranslation("historyTotal");
}
parseTable(rows) {
const parsedTable = {
sum: 0,
data: []
};
for (let row of rows) {
parsedTable.data.push({
id: row.id,
year: row.year,
month: row.month,
day: row.day,
category: row.category,
receiver: row.receiver,
currency: row.currency,
value: row.value,
color: row.color,
});
parsedTable.sum += row.value;
}
return parsedTable;
}
filterByCategory(table) {
var categories = new Map();
for (var expense of table) {
if (!categories.get(expense.category)) {
categories.set(expense.category, []);
}
categories.get(expense.category).push(expense);
categories.get(expense.category).color = expense.color;
}
return categories;
}
filterByMonth(expenses) {
var month = new Map();
for (var expense of expenses) {
if (!month.get(expense.month)) {
month.set(expense.month, {});
month.get(expense.month).sum = 0;
}
month.get(expense.month).sum += expense.value;
}
return month;
}
makeDataForGraph(category) {
var month = new Map();
let keys = Array.from(category.keys());
for (let name of keys) {
month.set(name, this.filterByMonth(category.get(name)));
month.get(name).color = category.get(name).color;
}
return month;
}
convertMonthsToName(month) {
switch (month) {
case 'JAN':
return "January";
case 'FEB':
return "February";
case 'MAR':
return "March";
case 'APR':
return "April";
case 'MAY':
return "May";
case 'JUN':
return "June";
case 'JUL':
return "July";
case 'AUG':
return "August";
case 'SEP':
return "September";
case 'OCT':
return "October";
case 'NOV':
return "November";
case 'DEC':
return "December";
}
}
generateExpenses(expense) {
var expensesArray = []
for (var exp of expense) {
var date = exp.date.split('T')[0].split('-');
expensesArray.push({
id: exp._id,
year: date[0],
month: date[1],
monthName: this.translateMonth(date[1]),
day: date[2],
category: exp.category.name,
recipient: exp.recipient,
value: exp.value,
currency: exp.currency,
color: exp.category.color,
});
}
expensesArray.sort(this.compare)
return expensesArray;
}
translateMonth(month) {
switch (month) {
case '01':
return "JAN";
case '02':
return "FEB";
case '03':
return "MAR";
case '04':
return "APR";
case '05':
return "MAY";
case '06':
return "JUN";
case '07':
return "JUL";
case '08':
return "AUG";
case '09':
return "SEP";
case '10':
return "OCT";
case '11':
return "NOV";
case '12':
return "DEC";
}
}
compare(a, b) { //1 menjava, -1 ni menjava
if (a.year < b.year) {
return 1;
} else if (a.year == b.year) {
if (a.month < b.month) {
return 1;
} else if (a.month == b.month) {
if (a.day < b.day) {
return 1;
} else {
return -1;
}
} else {
return -1;
}
} else {
return -1;
}
return 0;
}
groupByCategories(parsedTable) {
const groups = [];
for (let entry of parsedTable.data) {
const group = this.findGroupByCategory(groups, entry.category);
if (group != null) {
group.sum += parseInt(entry.value);
} else {
groups.push({
name: entry.category,
sum: entry.value,
color: entry.color,
});
}
}
return groups;
}
findGroupByCategory(groups, category) {
for (let group of groups) {
if (group.name == category) return group;
}
return null;
}
makeDataArray1(array): Array<any> {
const returnTable = [array.length];
for (let i = 0; i < array.length; i++) {
returnTable[i] = array[i].sum;
}
return returnTable;
}
makeColorArray1(array) {
const table = [];
const returnTable = [];
for (let i = 0; i < array.length; i++) {
returnTable.push([array[i].color]);
}
let barva = {
backgroundColor: returnTable
}
table[0] = barva;
return table;
}
makeLabelArray1(array) {
const returnTable = [array.length];
for (let i = 0; i < array.length; i++) {
returnTable[i] = array[i].name;
}
return returnTable;
}
generateDatasets(map) {
var datasets: ChartDataSets[] = [];
let keys = Array.from(map.keys());
for (let i of keys) {
datasets.push(this.generateDataset(map.get(i),i | const parsedTable = this.parseTable(this.expenses);
document.querySelector(".totaltext").innerHTML = "<h5>" + this.historyTotal + ": " + parsedTable.sum.toFixed(2); + "€</h5>"; | random_line_split |
blueberry_segmentation.py | 'image',
}
)
test_transform = A.Compose(
[
A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH)
]
)
to_grayscale = A.Compose(
[
ToTensorV2()
]
)
class BlueberryDataset(Dataset):
def __init__(self, base_path, image_path, mask_path, transform=None):
self.images = []
self.masks = []
self.transform = transform
self.to_tensor = transforms.Compose([transforms.ToTensor()])
self.process_mask = transforms.Compose(
[
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
]
)
for image_file in os.listdir(image_path):
self.images.append(os.path.join(image_path, image_file))
mask_file = image_file[-12:-3] + 'png'
self.masks.append(os.path.join(mask_path, mask_file))
def __len__(self):
return len(self.images)
def | (self, index):
image = imread(self.images[index])
image = cvtColor(image, COLOR_BGR2RGB)
mask = imread(self.masks[index])
mask = cvtColor(mask, COLOR_BGR2RGB)
transformed = self.transform(image=image, mask=mask)
image = transformed['image']
mask = transformed['mask']
image = self.to_tensor(image)
mask = Image.fromarray(mask)
mask = self.process_mask(mask)
return image, mask
class BlueberryTestDataset(Dataset):
def __init__(self, base_path, image_path, transform=None):
self.images = []
self.transform = transform
self.to_tensor = transforms.Compose([transforms.ToTensor()])
for image_file in os.listdir(image_path):
self.images.append(os.path.join(image_path, image_file))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = imread(self.images[index])
image = cvtColor(image, COLOR_BGR2RGB)
transformed = self.transform(image=image)
image = transformed['image']
image = self.to_tensor(image)
return image
dataset = BlueberryDataset(BASE_DIR, IMAGES_DIR, MASKS_DIR, transform=train_transform)
train_set, val_set = torch.utils.data.random_split(dataset, [6, 1])
test = BlueberryTestDataset(BASE_DIR, TEST_DIR, test_transform)
dataloaders = {
'train': DataLoader(train_set, batch_size=1, shuffle=True),
'val': DataLoader(val_set, batch_size=1, shuffle=True),
'test': DataLoader(test, batch_size=1, shuffle=True)
}
"""# Visualize Data"""
image1, mask1 = dataset[0]
image2, mask2 = dataset[2]
fig = plt.figure(figsize=(20,20))
ax1 = fig.add_subplot(2, 5, 1)
ax1.imshow(image1.permute(1, 2, 0))
ax2 = fig.add_subplot(2, 5, 2)
ax2.imshow(mask1.permute(1, 2, 0).squeeze(2))
ax3 = fig.add_subplot(2, 5, 3)
ax3.imshow(image2.permute(1, 2, 0))
ax4 = fig.add_subplot(2, 5, 4)
ax4.imshow(mask2.permute(1, 2, 0).squeeze(2))
plt.show()
"""# Build model"""
# Source: https://github.com/usuyama/pytorch-unet
from torch import nn
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
class ResNetUNet(nn.Module):
def __init__(self, n_class):
super().__init__()
self.base_model = models.resnet18(pretrained=True)
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2)
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4)
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(3, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, input):
x_original = self.conv_original_size0(input)
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer4 = self.layer4_1x1(layer4)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
out = self.conv_last(x)
return out
unet = ResNetUNet(n_class=3)
"""# Model summary"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = ResNetUNet(n_class=1)
model = model.to(device)
from torchsummary import summary
summary(model, input_size=(3, 512, 512))
"""# Define training functions"""
# Source: https://github.com/usuyama/pytorch-unet
from collections import defaultdict
import torch.nn.functional as F
def dice_loss(pred, target, smooth = 1.):
pred = pred.contiguous()
target = target.contiguous()
intersection = (pred * target).sum(dim=2).sum(dim=2)
loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth)))
return loss.mean()
def calc_loss(pred, target, metrics, bce_weight=0.5):
bce = F.binary_cross_entropy_with_logits(pred, target)
pred = F.sigmoid(pred)
dice = dice_loss(pred, target)
loss = bce * bce_weight + dice * (1 - bce_weight)
metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
return loss
def print_metrics(metrics, epoch_samples, phase):
| __getitem__ | identifier_name |
blueberry_segmentation.py | """# Imports"""
import os
import matplotlib.pyplot as plt
from cv2 import imread, cvtColor, COLOR_BGR2RGB, COLOR_BGR2GRAY
from PIL import Image
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import torch
import torch.nn as nn
from torchvision import models
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from torch import optim
from torch.optim import lr_scheduler
import time
import copy
"""# Constants"""
BASE_DIR = 'gdrive/MyDrive/datasets/Deep_BlueBerry_databases/instancesegmentation'
IMAGES_DIR = BASE_DIR + '/images/'
MASKS_DIR = BASE_DIR + '/masks/'
TEST_DIR = BASE_DIR + '/test'
IMAGE_HEIGHT = 512
IMAGE_WIDTH = 512
"""# Dataset"""
train_transform = A.Compose(
[
A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5, val_shift_limit=5),
A.Rotate(),
A.GaussNoise(),
A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH),
],
additional_targets={
'image' : 'image',
'mask' : 'image',
}
)
test_transform = A.Compose(
[
A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH)
]
)
to_grayscale = A.Compose(
[
ToTensorV2()
]
)
class BlueberryDataset(Dataset):
def __init__(self, base_path, image_path, mask_path, transform=None):
self.images = []
self.masks = []
self.transform = transform
self.to_tensor = transforms.Compose([transforms.ToTensor()])
self.process_mask = transforms.Compose(
[
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
]
)
for image_file in os.listdir(image_path):
self.images.append(os.path.join(image_path, image_file))
mask_file = image_file[-12:-3] + 'png'
self.masks.append(os.path.join(mask_path, mask_file))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = imread(self.images[index])
image = cvtColor(image, COLOR_BGR2RGB)
mask = imread(self.masks[index])
mask = cvtColor(mask, COLOR_BGR2RGB)
transformed = self.transform(image=image, mask=mask)
image = transformed['image']
mask = transformed['mask']
image = self.to_tensor(image)
mask = Image.fromarray(mask)
mask = self.process_mask(mask)
return image, mask
class BlueberryTestDataset(Dataset):
def __init__(self, base_path, image_path, transform=None):
self.images = []
self.transform = transform
self.to_tensor = transforms.Compose([transforms.ToTensor()])
for image_file in os.listdir(image_path):
self.images.append(os.path.join(image_path, image_file))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = imread(self.images[index])
image = cvtColor(image, COLOR_BGR2RGB)
transformed = self.transform(image=image)
image = transformed['image']
image = self.to_tensor(image)
return image
dataset = BlueberryDataset(BASE_DIR, IMAGES_DIR, MASKS_DIR, transform=train_transform)
train_set, val_set = torch.utils.data.random_split(dataset, [6, 1])
test = BlueberryTestDataset(BASE_DIR, TEST_DIR, test_transform)
dataloaders = {
'train': DataLoader(train_set, batch_size=1, shuffle=True),
'val': DataLoader(val_set, batch_size=1, shuffle=True),
'test': DataLoader(test, batch_size=1, shuffle=True)
}
"""# Visualize Data"""
image1, mask1 = dataset[0]
image2, mask2 = dataset[2]
fig = plt.figure(figsize=(20,20))
ax1 = fig.add_subplot(2, 5, 1)
ax1.imshow(image1.permute(1, 2, 0))
ax2 = fig.add_subplot(2, 5, 2)
ax2.imshow(mask1.permute(1, 2, 0).squeeze(2))
ax3 = fig.add_subplot(2, 5, 3)
ax3.imshow(image2.permute(1, 2, 0))
ax4 = fig.add_subplot(2, 5, 4)
ax4.imshow(mask2.permute(1, 2, 0).squeeze(2))
plt.show()
"""# Build model"""
# Source: https://github.com/usuyama/pytorch-unet
from torch import nn
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
class ResNetUNet(nn.Module):
def __init__(self, n_class):
super().__init__()
self.base_model = models.resnet18(pretrained=True)
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2)
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4)
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(3, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, input):
x_original = self.conv_original_size0(input)
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer4 = self.layer4_1x1(layer4)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
out = self.conv_last(x)
return out
unet = ResNetUNet(n_class=3)
"""# Model summary"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = ResNetUNet(n_class=1)
model = model.to(device)
from torchsummary import summary
summary(model, input_size=( |
!pip install albumentations==0.4.6
!pip install torch
!pip install torchvision
| random_line_split | |
blueberry_segmentation.py | self.transform = transform
self.to_tensor = transforms.Compose([transforms.ToTensor()])
for image_file in os.listdir(image_path):
self.images.append(os.path.join(image_path, image_file))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = imread(self.images[index])
image = cvtColor(image, COLOR_BGR2RGB)
transformed = self.transform(image=image)
image = transformed['image']
image = self.to_tensor(image)
return image
dataset = BlueberryDataset(BASE_DIR, IMAGES_DIR, MASKS_DIR, transform=train_transform)
train_set, val_set = torch.utils.data.random_split(dataset, [6, 1])
test = BlueberryTestDataset(BASE_DIR, TEST_DIR, test_transform)
dataloaders = {
'train': DataLoader(train_set, batch_size=1, shuffle=True),
'val': DataLoader(val_set, batch_size=1, shuffle=True),
'test': DataLoader(test, batch_size=1, shuffle=True)
}
"""# Visualize Data"""
image1, mask1 = dataset[0]
image2, mask2 = dataset[2]
fig = plt.figure(figsize=(20,20))
ax1 = fig.add_subplot(2, 5, 1)
ax1.imshow(image1.permute(1, 2, 0))
ax2 = fig.add_subplot(2, 5, 2)
ax2.imshow(mask1.permute(1, 2, 0).squeeze(2))
ax3 = fig.add_subplot(2, 5, 3)
ax3.imshow(image2.permute(1, 2, 0))
ax4 = fig.add_subplot(2, 5, 4)
ax4.imshow(mask2.permute(1, 2, 0).squeeze(2))
plt.show()
"""# Build model"""
# Source: https://github.com/usuyama/pytorch-unet
from torch import nn
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
class ResNetUNet(nn.Module):
def __init__(self, n_class):
super().__init__()
self.base_model = models.resnet18(pretrained=True)
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2)
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4)
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(3, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, input):
x_original = self.conv_original_size0(input)
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer4 = self.layer4_1x1(layer4)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
out = self.conv_last(x)
return out
unet = ResNetUNet(n_class=3)
"""# Model summary"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = ResNetUNet(n_class=1)
model = model.to(device)
from torchsummary import summary
summary(model, input_size=(3, 512, 512))
"""# Define training functions"""
# Source: https://github.com/usuyama/pytorch-unet
from collections import defaultdict
import torch.nn.functional as F
def dice_loss(pred, target, smooth = 1.):
pred = pred.contiguous()
target = target.contiguous()
intersection = (pred * target).sum(dim=2).sum(dim=2)
loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth)))
return loss.mean()
def calc_loss(pred, target, metrics, bce_weight=0.5):
bce = F.binary_cross_entropy_with_logits(pred, target)
pred = F.sigmoid(pred)
dice = dice_loss(pred, target)
loss = bce * bce_weight + dice * (1 - bce_weight)
metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
return loss
def print_metrics(metrics, epoch_samples, phase):
outputs = []
for k in metrics.keys():
outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples))
print("{}: {}".format(phase, ", ".join(outputs)))
def train_model(model, optimizer, scheduler, num_epochs=25):
training_loss_array = []
validation_loss_array = []
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 1e10
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
since = time.time()
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
for param_group in optimizer.param_groups:
print("LR", param_group['lr'])
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
metrics = defaultdict(float)
epoch_samples = 0
for inputs, labels in dataloaders[phase]:
| inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = calc_loss(outputs, labels, metrics)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
epoch_samples += inputs.size(0) | conditional_block | |
blueberry_segmentation.py | 'image',
}
)
test_transform = A.Compose(
[
A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH)
]
)
to_grayscale = A.Compose(
[
ToTensorV2()
]
)
class BlueberryDataset(Dataset):
def __init__(self, base_path, image_path, mask_path, transform=None):
|
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = imread(self.images[index])
image = cvtColor(image, COLOR_BGR2RGB)
mask = imread(self.masks[index])
mask = cvtColor(mask, COLOR_BGR2RGB)
transformed = self.transform(image=image, mask=mask)
image = transformed['image']
mask = transformed['mask']
image = self.to_tensor(image)
mask = Image.fromarray(mask)
mask = self.process_mask(mask)
return image, mask
class BlueberryTestDataset(Dataset):
def __init__(self, base_path, image_path, transform=None):
self.images = []
self.transform = transform
self.to_tensor = transforms.Compose([transforms.ToTensor()])
for image_file in os.listdir(image_path):
self.images.append(os.path.join(image_path, image_file))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = imread(self.images[index])
image = cvtColor(image, COLOR_BGR2RGB)
transformed = self.transform(image=image)
image = transformed['image']
image = self.to_tensor(image)
return image
dataset = BlueberryDataset(BASE_DIR, IMAGES_DIR, MASKS_DIR, transform=train_transform)
train_set, val_set = torch.utils.data.random_split(dataset, [6, 1])
test = BlueberryTestDataset(BASE_DIR, TEST_DIR, test_transform)
dataloaders = {
'train': DataLoader(train_set, batch_size=1, shuffle=True),
'val': DataLoader(val_set, batch_size=1, shuffle=True),
'test': DataLoader(test, batch_size=1, shuffle=True)
}
"""# Visualize Data"""
image1, mask1 = dataset[0]
image2, mask2 = dataset[2]
fig = plt.figure(figsize=(20,20))
ax1 = fig.add_subplot(2, 5, 1)
ax1.imshow(image1.permute(1, 2, 0))
ax2 = fig.add_subplot(2, 5, 2)
ax2.imshow(mask1.permute(1, 2, 0).squeeze(2))
ax3 = fig.add_subplot(2, 5, 3)
ax3.imshow(image2.permute(1, 2, 0))
ax4 = fig.add_subplot(2, 5, 4)
ax4.imshow(mask2.permute(1, 2, 0).squeeze(2))
plt.show()
"""# Build model"""
# Source: https://github.com/usuyama/pytorch-unet
from torch import nn
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
class ResNetUNet(nn.Module):
def __init__(self, n_class):
super().__init__()
self.base_model = models.resnet18(pretrained=True)
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2)
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4)
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(3, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, input):
x_original = self.conv_original_size0(input)
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer4 = self.layer4_1x1(layer4)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
out = self.conv_last(x)
return out
unet = ResNetUNet(n_class=3)
"""# Model summary"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = ResNetUNet(n_class=1)
model = model.to(device)
from torchsummary import summary
summary(model, input_size=(3, 512, 512))
"""# Define training functions"""
# Source: https://github.com/usuyama/pytorch-unet
from collections import defaultdict
import torch.nn.functional as F
def dice_loss(pred, target, smooth = 1.):
pred = pred.contiguous()
target = target.contiguous()
intersection = (pred * target).sum(dim=2).sum(dim=2)
loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth)))
return loss.mean()
def calc_loss(pred, target, metrics, bce_weight=0.5):
bce = F.binary_cross_entropy_with_logits(pred, target)
pred = F.sigmoid(pred)
dice = dice_loss(pred, target)
loss = bce * bce_weight + dice * (1 - bce_weight)
metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
return loss
def print_metrics(metrics, epoch_samples, phase):
outputs | self.images = []
self.masks = []
self.transform = transform
self.to_tensor = transforms.Compose([transforms.ToTensor()])
self.process_mask = transforms.Compose(
[
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
]
)
for image_file in os.listdir(image_path):
self.images.append(os.path.join(image_path, image_file))
mask_file = image_file[-12:-3] + 'png'
self.masks.append(os.path.join(mask_path, mask_file)) | identifier_body |
post.page.ts | profile picture',
profile_picture_female: 'updated her profile picture',
profile_cover_male: 'updated his cover photo',
profile_cover_female: 'updated her cover photo',
page_picture: 'updated page picture',
page_cover: 'updated cover photo',
group_picture: 'updated group picture',
group_cover: 'updated group cover',
event_cover: 'updated event cover'
};
sub : any = '';
slidesPerView : number = 1;
public postElement = [];
public sharedInfo = [];
private pageCount = 2;
private arrayPosition = 0;
private isAndroid = false;
private mediapath = "https://followthebirds.com/content/uploads/";
usermayknow : any = [];
stories : any = [];
height : number = 300;
width : number = 300;
private user_picture = localStorage.getItem('user_picture');
slideOpts = {
initialSlide: 3,
speed: 400
};
constructor(
public navCtrl: NavController,
public toastCtrl: ToastController,
private camera: Camera,
public actionSheetCtrl: ActionSheetController,
public menu: MenuController,
public modalCtrl: ModalController,
private transfer: FileTransfer,
private file: File,
private platform: Platform,
private alertCtrl: AlertController,
private post: PostService,
public user: UserService,
public router: Router
) {
platform.ready().then((readySource) => {
this.width = platform.width();
this.height = platform.height();
});
}
ngOnInit(){
this.getStories();
this.isAndroid = this.platform.is("android");
this.postElement['handle'] = "me";
this.postElement['id'] = '';
this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{})
.then(data => {
this.postFeeds = [];
let item = data[0];
localStorage.setItem('last_post_live',item[0].post_id);
for (var key in item) {
if(item[key].post_type == 'photos'){
this.post_type.photos = "added "+item[key].photos_num+"photos";
}
this.postFeeds.push(item[key]);
}
});
}
doInfinite(event) {
setTimeout(() => {
this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{'page': this.pageCount})
.then(data => {
if(data[0].length > 0) {
let item = data[0];
for (var key in item) {
this.postFeeds.push(item[key]);
}
}
});
this.pageCount = this.pageCount + 1;
event.target.complete();
}, 500);
}
doRefresh(event) {
this.ngOnInit();
setTimeout(() => {
console.log('Async operation has ended');
event.target.complete();
}, 2000);
}
getPeopleYouMayKnow(){
this.user.getPeopleYouMayKnow('may_know',parseInt(localStorage.getItem('user_id')))
.then(data => {
this.usermayknow = data[0];
});
}
getStories(){
this.user.getStories({user_id:localStorage.getItem('user_id')})
.then(data => {
this.stories = data[0];
console.log("stories",data)
});
}
viewStory(story){
this.router.navigate(['/StoryPage',{story: story}]);
}
viewPost(post) {
if(post.photos_num == '1'){
this.router.navigate(['/view-photo',{photo: post.photos[0]}]);
} else {
this.router.navigate(['/view-post',{post: post}]);
}
}
viewProfile(post) {
if(post.user_type == 'user'){
this.router.navigate(['/profile',{user_name: post.user_name,user_id:post.user_id}]);
}
if(post.user_type == 'page'){
this.router.navigate(['/PageProfilePage',{pageProfile:post}]);
}
if(post.user_type == 'group'){
this.router.navigate(['/GroupProfilePage',{groupProfile:post}]);
}
if(post.user_type == 'event'){
this.router.navigate(['/EventProfilePage',{eventProfile:post}]);
}
}
downloadAttachment(filePath){
let arr = filePath.split('/');
var filename = arr.pop();
let url = encodeURI(filePath);
const fileTransfer: FileTransferObject = this.transfer.create();
fileTransfer.download(this.mediapath+filePath, this.file.dataDirectory + filename).then((entry) => {
let toast = this.toastCtrl.create({
message: "Attachment bas been download",
duration: 3000,
position: 'top'
});
}, (error) => {
// handle error
let toast = this.toastCtrl.create({
message: "Downloading failure! retry.",
duration: 3000,
position: 'top'
});
});
}
async viewComments(index,comments,post_id){
const modal = await this.modalCtrl.create({
component: CommentsPage,
componentProps: {
'comments': comments,
'post_id': post_id,
'handle': 'post'
}
});
await modal.present();
}
async sharePostCtrl(post_id)
{
let prompt = await this.alertCtrl.create({
message: 'Share this post',
inputs : [
{
type:'radio',
label:'Share post now ',
value:post_id
},
{
type:'radio',
label:'Write Post',
value:post_id
}],
buttons : [
{
text: "Cancel",
handler: data => {
console.log("cancel clicked");
}
},
{
text: "Share",
handler: data => {
this.sharePost('share',post_id);
}
}]});
await prompt.present();
}
async postActivity(event,post)
{
let buttons : any = [
{
icon: !this.platform.is('ios') ? 'ios-bookmark' : null,
text: 'Save Post',
handler: () => {
this.reactAction('save_post',post.post_id);
}
}
];
if(post.author_id != localStorage.getItem('user_id')){
let report : any = {
icon: !this.platform.is('ios') ? 'ios-flag' : null,
text: 'Report Post',
handler: () => {
this.reportAction("post",post.post_id)
}
};
let hide : any = {
icon: !this.platform.is('ios') ? 'ios-eye-off' : null,
text: 'Hide Post',
handler: () => {
event.target.parentNode.parentNode.parentNode.parentNode.remove();
this.reactAction("hide_post",post.post_id)
}
};
buttons.push(report);
buttons.push(hide);
}
if(post.author_id == localStorage.getItem('user_id')){
let btn : any = {
icon: !this.platform.is('ios') ? 'ios-trash' : null,
text: 'Delete Post',
handler: async () => {
const confirm = await this.alertCtrl.create({
header: 'Delete post?',
message: 'Once you delete you can not undo this step.',
buttons: [
{
text: 'Cancel',
handler: () => {
}
}
,{
text: 'Delete',
handler: () => {
event.target.parentNode.parentNode.parentNode.parentNode.remove();
this.reactAction("delete_post",post.post_id)
}
}
]
});
await confirm.present();
}
};
buttons.push(btn);
}
const actionSheet = await this.actionSheetCtrl.create({
buttons
});
await actionSheet.present();
}
getBackgroundStyle(url) {
if(!url){
return 'url(assets/followthebirdImgs/no-profile-img.jpeg)'
} else {
return 'url(' + this.mediapath+url + ')'
}
}
getStoryBackgroundStyle(media) {
if(media != 'null'){
console.log(media);
let obj = JSON.parse(media)
return 'url(' + this.mediapath+obj[0].src + ')'
} else {
return 'url(assets/followthebirdImgs/story_background.png)'
}
}
getMedia(media) {
let obj = JSON.parse(media)
return this.mediapath+obj[0].src;
}
sharePost(type,id) | {
this.post.sharePost({'do':type,id:id,my_id:localStorage.getItem('user_id')}).subscribe(async (resp) => {
const toast = await this.toastCtrl.create({
message: "Post has been shared successfully",
duration: 3000,
position: 'top'
});
toast.present();
}, async (err) => {
const toast = await this.toastCtrl.create({
message: "Unable to post. Retry",
duration: 3000,
position: 'top',
});
toast.present();
});
} | identifier_body | |
post.page.ts | badgeCount = 6;
postFeeds: any = [];
post_type: any = {
shared: 'shared',
link: 'shared a link',
poll: 'created a poll',
product: 'added new product for sell',
article: 'added new article',
video : 'added a video',
audio: 'added an audio',
file: 'added a file',
photos: 'added a photo',
profile_picture_male: 'updated his profile picture',
profile_picture_female: 'updated her profile picture',
profile_cover_male: 'updated his cover photo',
profile_cover_female: 'updated her cover photo',
page_picture: 'updated page picture',
page_cover: 'updated cover photo',
group_picture: 'updated group picture',
group_cover: 'updated group cover',
event_cover: 'updated event cover'
};
sub : any = '';
slidesPerView : number = 1;
public postElement = [];
public sharedInfo = [];
private pageCount = 2;
private arrayPosition = 0;
private isAndroid = false;
private mediapath = "https://followthebirds.com/content/uploads/";
usermayknow : any = [];
stories : any = [];
height : number = 300;
width : number = 300;
private user_picture = localStorage.getItem('user_picture');
slideOpts = {
initialSlide: 3,
speed: 400
};
constructor(
public navCtrl: NavController,
public toastCtrl: ToastController,
private camera: Camera,
public actionSheetCtrl: ActionSheetController,
public menu: MenuController,
public modalCtrl: ModalController,
private transfer: FileTransfer,
private file: File,
private platform: Platform,
private alertCtrl: AlertController,
private post: PostService,
public user: UserService,
public router: Router
) {
platform.ready().then((readySource) => {
this.width = platform.width();
this.height = platform.height();
});
}
ngOnInit(){
this.getStories();
this.isAndroid = this.platform.is("android");
this.postElement['handle'] = "me";
this.postElement['id'] = '';
this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{})
.then(data => {
this.postFeeds = [];
let item = data[0];
localStorage.setItem('last_post_live',item[0].post_id);
for (var key in item) {
if(item[key].post_type == 'photos'){
this.post_type.photos = "added "+item[key].photos_num+"photos";
}
this.postFeeds.push(item[key]);
}
});
}
doInfinite(event) {
setTimeout(() => {
this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{'page': this.pageCount})
.then(data => {
if(data[0].length > 0) {
let item = data[0];
for (var key in item) {
this.postFeeds.push(item[key]);
}
}
});
this.pageCount = this.pageCount + 1;
event.target.complete();
}, 500);
}
doRefresh(event) {
this.ngOnInit();
setTimeout(() => {
console.log('Async operation has ended');
event.target.complete();
}, 2000);
}
getPeopleYouMayKnow(){
this.user.getPeopleYouMayKnow('may_know',parseInt(localStorage.getItem('user_id')))
.then(data => {
this.usermayknow = data[0];
});
}
getStories(){
this.user.getStories({user_id:localStorage.getItem('user_id')})
.then(data => {
this.stories = data[0];
console.log("stories",data)
});
}
viewStory(story){
this.router.navigate(['/StoryPage',{story: story}]);
}
viewPost(post) {
if(post.photos_num == '1'){
this.router.navigate(['/view-photo',{photo: post.photos[0]}]);
} else {
this.router.navigate(['/view-post',{post: post}]);
}
}
viewProfile(post) {
if(post.user_type == 'user'){
this.router.navigate(['/profile',{user_name: post.user_name,user_id:post.user_id}]);
}
if(post.user_type == 'page'){
this.router.navigate(['/PageProfilePage',{pageProfile:post}]);
}
if(post.user_type == 'group'){
this.router.navigate(['/GroupProfilePage',{groupProfile:post}]);
}
if(post.user_type == 'event'){
this.router.navigate(['/EventProfilePage',{eventProfile:post}]);
}
}
downloadAttachment(filePath){
let arr = filePath.split('/');
var filename = arr.pop();
let url = encodeURI(filePath);
const fileTransfer: FileTransferObject = this.transfer.create();
fileTransfer.download(this.mediapath+filePath, this.file.dataDirectory + filename).then((entry) => {
let toast = this.toastCtrl.create({
message: "Attachment bas been download",
duration: 3000,
position: 'top'
});
}, (error) => {
// handle error
let toast = this.toastCtrl.create({
message: "Downloading failure! retry.",
duration: 3000,
position: 'top'
});
});
}
async viewComments(index,comments,post_id){
const modal = await this.modalCtrl.create({
component: CommentsPage,
componentProps: {
'comments': comments,
'post_id': post_id,
'handle': 'post'
}
});
await modal.present();
}
async sharePostCtrl(post_id)
{
let prompt = await this.alertCtrl.create({
message: 'Share this post',
inputs : [
{
type:'radio',
label:'Share post now ',
value:post_id
},
{
type:'radio',
label:'Write Post',
value:post_id
}],
buttons : [
{
text: "Cancel",
handler: data => {
console.log("cancel clicked");
}
},
{
text: "Share",
handler: data => {
this.sharePost('share',post_id);
}
}]});
await prompt.present();
}
async postActivity(event,post)
{
let buttons : any = [
{
icon: !this.platform.is('ios') ? 'ios-bookmark' : null,
text: 'Save Post',
handler: () => {
this.reactAction('save_post',post.post_id);
}
}
];
if(post.author_id != localStorage.getItem('user_id')){
let report : any = {
icon: !this.platform.is('ios') ? 'ios-flag' : null,
text: 'Report Post',
handler: () => {
this.reportAction("post",post.post_id)
}
};
let hide : any = {
icon: !this.platform.is('ios') ? 'ios-eye-off' : null,
text: 'Hide Post',
handler: () => {
event.target.parentNode.parentNode.parentNode.parentNode.remove();
this.reactAction("hide_post",post.post_id)
}
};
buttons.push(report);
buttons.push(hide);
}
if(post.author_id == localStorage.getItem('user_id')){
let btn : any = {
icon: !this.platform.is('ios') ? 'ios-trash' : null,
text: 'Delete Post',
handler: async () => {
const confirm = await this.alertCtrl.create({
header: 'Delete post?',
message: 'Once you delete you can not undo this step.',
buttons: [
{
text: 'Cancel',
handler: () => {
}
}
,{
text: 'Delete',
handler: () => {
event.target.parentNode.parentNode.parentNode.parentNode.remove();
this.reactAction("delete_post",post.post_id)
}
}
]
});
await confirm.present();
}
};
buttons.push(btn);
}
const actionSheet = await this.actionSheetCtrl.create({
buttons
});
await actionSheet.present();
}
getBackgroundStyle(url) {
if(!url){
return 'url(assets/followthebirdImgs/no-profile-img.jpeg)'
} else {
return 'url(' + this.mediapath+url + ')'
}
}
getStoryBackgroundStyle(media) {
if(media != 'null') | else {
return 'url(assets/followthebirdImgs/story_background.png)'
}
}
getMedia(media) {
let obj = JSON.parse(media)
return this.mediapath+obj[0].src;
}
sharePost(type,id){
this.post.sharePost({'do':type,id:id,my_id:localStorage.getItem('user_id')}).subscribe(async (resp) => {
const toast = | {
console.log(media);
let obj = JSON.parse(media)
return 'url(' + this.mediapath+obj[0].src + ')'
} | conditional_block |
post.page.ts | badgeCount = 6;
postFeeds: any = [];
post_type: any = {
shared: 'shared',
link: 'shared a link',
poll: 'created a poll',
product: 'added new product for sell',
article: 'added new article',
video : 'added a video',
audio: 'added an audio',
file: 'added a file',
photos: 'added a photo',
profile_picture_male: 'updated his profile picture',
profile_picture_female: 'updated her profile picture',
profile_cover_male: 'updated his cover photo',
profile_cover_female: 'updated her cover photo',
page_picture: 'updated page picture',
page_cover: 'updated cover photo',
group_picture: 'updated group picture',
group_cover: 'updated group cover',
event_cover: 'updated event cover'
};
sub : any = '';
slidesPerView : number = 1;
public postElement = [];
public sharedInfo = [];
private pageCount = 2;
private arrayPosition = 0;
private isAndroid = false;
private mediapath = "https://followthebirds.com/content/uploads/";
usermayknow : any = [];
stories : any = [];
height : number = 300;
width : number = 300;
private user_picture = localStorage.getItem('user_picture');
slideOpts = {
initialSlide: 3,
speed: 400
};
constructor(
public navCtrl: NavController,
public toastCtrl: ToastController,
private camera: Camera,
public actionSheetCtrl: ActionSheetController,
public menu: MenuController,
public modalCtrl: ModalController,
private transfer: FileTransfer,
private file: File,
private platform: Platform,
private alertCtrl: AlertController,
private post: PostService,
public user: UserService,
public router: Router
) {
platform.ready().then((readySource) => {
this.width = platform.width();
this.height = platform.height();
});
}
ngOnInit(){
this.getStories();
this.isAndroid = this.platform.is("android");
this.postElement['handle'] = "me"; | let item = data[0];
localStorage.setItem('last_post_live',item[0].post_id);
for (var key in item) {
if(item[key].post_type == 'photos'){
this.post_type.photos = "added "+item[key].photos_num+"photos";
}
this.postFeeds.push(item[key]);
}
});
}
doInfinite(event) {
setTimeout(() => {
this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{'page': this.pageCount})
.then(data => {
if(data[0].length > 0) {
let item = data[0];
for (var key in item) {
this.postFeeds.push(item[key]);
}
}
});
this.pageCount = this.pageCount + 1;
event.target.complete();
}, 500);
}
doRefresh(event) {
this.ngOnInit();
setTimeout(() => {
console.log('Async operation has ended');
event.target.complete();
}, 2000);
}
getPeopleYouMayKnow(){
this.user.getPeopleYouMayKnow('may_know',parseInt(localStorage.getItem('user_id')))
.then(data => {
this.usermayknow = data[0];
});
}
getStories(){
this.user.getStories({user_id:localStorage.getItem('user_id')})
.then(data => {
this.stories = data[0];
console.log("stories",data)
});
}
viewStory(story){
this.router.navigate(['/StoryPage',{story: story}]);
}
viewPost(post) {
if(post.photos_num == '1'){
this.router.navigate(['/view-photo',{photo: post.photos[0]}]);
} else {
this.router.navigate(['/view-post',{post: post}]);
}
}
viewProfile(post) {
if(post.user_type == 'user'){
this.router.navigate(['/profile',{user_name: post.user_name,user_id:post.user_id}]);
}
if(post.user_type == 'page'){
this.router.navigate(['/PageProfilePage',{pageProfile:post}]);
}
if(post.user_type == 'group'){
this.router.navigate(['/GroupProfilePage',{groupProfile:post}]);
}
if(post.user_type == 'event'){
this.router.navigate(['/EventProfilePage',{eventProfile:post}]);
}
}
downloadAttachment(filePath){
let arr = filePath.split('/');
var filename = arr.pop();
let url = encodeURI(filePath);
const fileTransfer: FileTransferObject = this.transfer.create();
fileTransfer.download(this.mediapath+filePath, this.file.dataDirectory + filename).then((entry) => {
let toast = this.toastCtrl.create({
message: "Attachment bas been download",
duration: 3000,
position: 'top'
});
}, (error) => {
// handle error
let toast = this.toastCtrl.create({
message: "Downloading failure! retry.",
duration: 3000,
position: 'top'
});
});
}
async viewComments(index,comments,post_id){
const modal = await this.modalCtrl.create({
component: CommentsPage,
componentProps: {
'comments': comments,
'post_id': post_id,
'handle': 'post'
}
});
await modal.present();
}
async sharePostCtrl(post_id)
{
let prompt = await this.alertCtrl.create({
message: 'Share this post',
inputs : [
{
type:'radio',
label:'Share post now ',
value:post_id
},
{
type:'radio',
label:'Write Post',
value:post_id
}],
buttons : [
{
text: "Cancel",
handler: data => {
console.log("cancel clicked");
}
},
{
text: "Share",
handler: data => {
this.sharePost('share',post_id);
}
}]});
await prompt.present();
}
async postActivity(event,post)
{
let buttons : any = [
{
icon: !this.platform.is('ios') ? 'ios-bookmark' : null,
text: 'Save Post',
handler: () => {
this.reactAction('save_post',post.post_id);
}
}
];
if(post.author_id != localStorage.getItem('user_id')){
let report : any = {
icon: !this.platform.is('ios') ? 'ios-flag' : null,
text: 'Report Post',
handler: () => {
this.reportAction("post",post.post_id)
}
};
let hide : any = {
icon: !this.platform.is('ios') ? 'ios-eye-off' : null,
text: 'Hide Post',
handler: () => {
event.target.parentNode.parentNode.parentNode.parentNode.remove();
this.reactAction("hide_post",post.post_id)
}
};
buttons.push(report);
buttons.push(hide);
}
if(post.author_id == localStorage.getItem('user_id')){
let btn : any = {
icon: !this.platform.is('ios') ? 'ios-trash' : null,
text: 'Delete Post',
handler: async () => {
const confirm = await this.alertCtrl.create({
header: 'Delete post?',
message: 'Once you delete you can not undo this step.',
buttons: [
{
text: 'Cancel',
handler: () => {
}
}
,{
text: 'Delete',
handler: () => {
event.target.parentNode.parentNode.parentNode.parentNode.remove();
this.reactAction("delete_post",post.post_id)
}
}
]
});
await confirm.present();
}
};
buttons.push(btn);
}
const actionSheet = await this.actionSheetCtrl.create({
buttons
});
await actionSheet.present();
}
getBackgroundStyle(url) {
if(!url){
return 'url(assets/followthebirdImgs/no-profile-img.jpeg)'
} else {
return 'url(' + this.mediapath+url + ')'
}
}
getStoryBackgroundStyle(media) {
if(media != 'null'){
console.log(media);
let obj = JSON.parse(media)
return 'url(' + this.mediapath+obj[0].src + ')'
} else {
return 'url(assets/followthebirdImgs/story_background.png)'
}
}
getMedia(media) {
let obj = JSON.parse(media)
return this.mediapath+obj[0].src;
}
sharePost(type,id){
this.post.sharePost({'do':type,id:id,my_id:localStorage.getItem('user_id')}).subscribe(async (resp) => {
const toast = await | this.postElement['id'] = '';
this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{})
.then(data => {
this.postFeeds = []; | random_line_split |
post.page.ts | badgeCount = 6;
postFeeds: any = [];
post_type: any = {
shared: 'shared',
link: 'shared a link',
poll: 'created a poll',
product: 'added new product for sell',
article: 'added new article',
video : 'added a video',
audio: 'added an audio',
file: 'added a file',
photos: 'added a photo',
profile_picture_male: 'updated his profile picture',
profile_picture_female: 'updated her profile picture',
profile_cover_male: 'updated his cover photo',
profile_cover_female: 'updated her cover photo',
page_picture: 'updated page picture',
page_cover: 'updated cover photo',
group_picture: 'updated group picture',
group_cover: 'updated group cover',
event_cover: 'updated event cover'
};
sub : any = '';
slidesPerView : number = 1;
public postElement = [];
public sharedInfo = [];
private pageCount = 2;
private arrayPosition = 0;
private isAndroid = false;
private mediapath = "https://followthebirds.com/content/uploads/";
usermayknow : any = [];
stories : any = [];
height : number = 300;
width : number = 300;
private user_picture = localStorage.getItem('user_picture');
slideOpts = {
initialSlide: 3,
speed: 400
};
constructor(
public navCtrl: NavController,
public toastCtrl: ToastController,
private camera: Camera,
public actionSheetCtrl: ActionSheetController,
public menu: MenuController,
public modalCtrl: ModalController,
private transfer: FileTransfer,
private file: File,
private platform: Platform,
private alertCtrl: AlertController,
private post: PostService,
public user: UserService,
public router: Router
) {
platform.ready().then((readySource) => {
this.width = platform.width();
this.height = platform.height();
});
}
ngOnInit(){
this.getStories();
this.isAndroid = this.platform.is("android");
this.postElement['handle'] = "me";
this.postElement['id'] = '';
this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{})
.then(data => {
this.postFeeds = [];
let item = data[0];
localStorage.setItem('last_post_live',item[0].post_id);
for (var key in item) {
if(item[key].post_type == 'photos'){
this.post_type.photos = "added "+item[key].photos_num+"photos";
}
this.postFeeds.push(item[key]);
}
});
}
doInfinite(event) {
setTimeout(() => {
this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{'page': this.pageCount})
.then(data => {
if(data[0].length > 0) {
let item = data[0];
for (var key in item) {
this.postFeeds.push(item[key]);
}
}
});
this.pageCount = this.pageCount + 1;
event.target.complete();
}, 500);
}
doRefresh(event) {
this.ngOnInit();
setTimeout(() => {
console.log('Async operation has ended');
event.target.complete();
}, 2000);
}
getPeopleYouMayKnow(){
this.user.getPeopleYouMayKnow('may_know',parseInt(localStorage.getItem('user_id')))
.then(data => {
this.usermayknow = data[0];
});
}
getStories(){
this.user.getStories({user_id:localStorage.getItem('user_id')})
.then(data => {
this.stories = data[0];
console.log("stories",data)
});
}
viewStory(story){
this.router.navigate(['/StoryPage',{story: story}]);
}
viewPost(post) {
if(post.photos_num == '1'){
this.router.navigate(['/view-photo',{photo: post.photos[0]}]);
} else {
this.router.navigate(['/view-post',{post: post}]);
}
}
viewProfile(post) {
if(post.user_type == 'user'){
this.router.navigate(['/profile',{user_name: post.user_name,user_id:post.user_id}]);
}
if(post.user_type == 'page'){
this.router.navigate(['/PageProfilePage',{pageProfile:post}]);
}
if(post.user_type == 'group'){
this.router.navigate(['/GroupProfilePage',{groupProfile:post}]);
}
if(post.user_type == 'event'){
this.router.navigate(['/EventProfilePage',{eventProfile:post}]);
}
}
| (filePath){
let arr = filePath.split('/');
var filename = arr.pop();
let url = encodeURI(filePath);
const fileTransfer: FileTransferObject = this.transfer.create();
fileTransfer.download(this.mediapath+filePath, this.file.dataDirectory + filename).then((entry) => {
let toast = this.toastCtrl.create({
message: "Attachment bas been download",
duration: 3000,
position: 'top'
});
}, (error) => {
// handle error
let toast = this.toastCtrl.create({
message: "Downloading failure! retry.",
duration: 3000,
position: 'top'
});
});
}
async viewComments(index,comments,post_id){
const modal = await this.modalCtrl.create({
component: CommentsPage,
componentProps: {
'comments': comments,
'post_id': post_id,
'handle': 'post'
}
});
await modal.present();
}
async sharePostCtrl(post_id)
{
let prompt = await this.alertCtrl.create({
message: 'Share this post',
inputs : [
{
type:'radio',
label:'Share post now ',
value:post_id
},
{
type:'radio',
label:'Write Post',
value:post_id
}],
buttons : [
{
text: "Cancel",
handler: data => {
console.log("cancel clicked");
}
},
{
text: "Share",
handler: data => {
this.sharePost('share',post_id);
}
}]});
await prompt.present();
}
async postActivity(event,post)
{
let buttons : any = [
{
icon: !this.platform.is('ios') ? 'ios-bookmark' : null,
text: 'Save Post',
handler: () => {
this.reactAction('save_post',post.post_id);
}
}
];
if(post.author_id != localStorage.getItem('user_id')){
let report : any = {
icon: !this.platform.is('ios') ? 'ios-flag' : null,
text: 'Report Post',
handler: () => {
this.reportAction("post",post.post_id)
}
};
let hide : any = {
icon: !this.platform.is('ios') ? 'ios-eye-off' : null,
text: 'Hide Post',
handler: () => {
event.target.parentNode.parentNode.parentNode.parentNode.remove();
this.reactAction("hide_post",post.post_id)
}
};
buttons.push(report);
buttons.push(hide);
}
if(post.author_id == localStorage.getItem('user_id')){
let btn : any = {
icon: !this.platform.is('ios') ? 'ios-trash' : null,
text: 'Delete Post',
handler: async () => {
const confirm = await this.alertCtrl.create({
header: 'Delete post?',
message: 'Once you delete you can not undo this step.',
buttons: [
{
text: 'Cancel',
handler: () => {
}
}
,{
text: 'Delete',
handler: () => {
event.target.parentNode.parentNode.parentNode.parentNode.remove();
this.reactAction("delete_post",post.post_id)
}
}
]
});
await confirm.present();
}
};
buttons.push(btn);
}
const actionSheet = await this.actionSheetCtrl.create({
buttons
});
await actionSheet.present();
}
getBackgroundStyle(url) {
if(!url){
return 'url(assets/followthebirdImgs/no-profile-img.jpeg)'
} else {
return 'url(' + this.mediapath+url + ')'
}
}
getStoryBackgroundStyle(media) {
if(media != 'null'){
console.log(media);
let obj = JSON.parse(media)
return 'url(' + this.mediapath+obj[0].src + ')'
} else {
return 'url(assets/followthebirdImgs/story_background.png)'
}
}
getMedia(media) {
let obj = JSON.parse(media)
return this.mediapath+obj[0].src;
}
sharePost(type,id){
this.post.sharePost({'do':type,id:id,my_id:localStorage.getItem('user_id')}).subscribe(async (resp) => {
const toast = await | downloadAttachment | identifier_name |
clarans.py | param[in] data (list): Input data that is presented as list of points (objects), each point should be
represented by list or tuple.
@param[in] number_clusters (uint): Amount of clusters that should be allocated.
@param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the
problem). @param[in] maxneighbor (uint): The maximum number of neighbors examined.
"""
self.__pointer_data = data
self.__numlocal = numlocal
self.__maxneighbor = maxneighbor
self.__number_clusters = number_clusters
self.__clusters = []
self.__current = []
self.__belong = []
self.__optimal_medoids = []
self.__optimal_estimation = float("inf")
self.__verify_arguments()
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError(
"Input data is empty (size: '%d')." % len(self.__pointer_data)
)
if self.__number_clusters <= 0:
raise ValueError(
"Amount of cluster (current value: '%d') for allocation should be greater than 0."
% self.__number_clusters
)
if self.__numlocal < 0:
raise ValueError(
"Local minima (current value: '%d') should be greater or equal to 0."
% self.__numlocal
)
if self.__maxneighbor < 0:
raise ValueError(
"Maximum number of neighbors (current value: '%d') should be greater or "
"equal to 0." % self.__maxneighbor
)
def process(self, plotting=False):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(
range(0, len(self.__pointer_data)), self.__number_clusters
)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print(
"Better configuration found with medoids: {0} and cost: {1}".format(
self.__current[:], estimation
)
)
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting is True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
else:
print(
"Configuration found does not improve current best one because its cost is {0}".format(
estimation
)
)
if plotting is True:
self.__update_clusters(self.__current[:])
plot_pam(
self.__pointer_data,
dict(zip(self.__current[:], self.__clusters)),
)
self.__update_clusters(self.__optimal_medoids)
if plotting is True:
print("FINAL RESULT:")
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters by the algorithm.
@remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty
list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_medoids()
"""
return self.__clusters
def get_medoids(self):
"""!
@brief Returns list of medoids of allocated clusters.
@see process()
@see get_clusters()
"""
return self.__optimal_medoids
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for _ in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(
self.__pointer_data[index_point],
self.__pointer_data[medoids[index]],
)
if (dist < dist_optim) or (index == 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [
cluster for cluster in self.__clusters if len(cluster) > 0
]
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while index_neighbor < self.__maxneighbor:
# get random current medoid that is to be replaced
current_medoid_index = self.__current[
random.randint(0, self.__number_clusters - 1)
]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(
point_index, current_medoid_index
)
other_medoid_cluster_index = self.__belong[
other_medoid_index
]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[current_medoid_index],
)
# from the point to candidate median
distance_candidate = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[candidate_medoid_index],
)
# from the point to nearest (own) medoid
distance_nearest = float("inf")
if (point_medoid_index != candidate_medoid_index) and (
point_medoid_index != current_medoid_cluster_index
):
distance_nearest = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[point_medoid_index],
)
# apply rules for cost calculation
if point_cluster_index == current_medoid_cluster_index:
# case 1:
if distance_candidate >= distance_nearest:
candidate_cost += (
distance_nearest - distance_current
)
# case 2:
else:
candidate_cost += (
distance_candidate - distance_current
)
elif point_cluster_index == other_medoid_cluster_index:
# case 3 ('nearest medoid' is the representative object of that cluster and object is more
# similar to 'nearest' than to 'candidate'):
if distance_candidate > distance_nearest:
|
# case 4:
else:
candidate_cost += (
distance_candidate - distance_nearest
)
if candidate_cost < 0:
counter += 1
# set candidate that has won
self.__current[
current_medoid_cluster_index
] = candidate_medoid_index
# recalculate clusters
self.__update_clusters(self.__current)
# reset iterations and starts investigation from the begining
index_neighbor = 0
else:
index_neighbor += 1
print("Medoid set changed {0} times".format(counter))
def __find_another_nearest_medoid(self, point_index, current_medoid_index):
"""!
@brief Finds the another nearest medoid for the specified point that is different from the specified medoid.
@param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids
is performed | pass | conditional_block |
clarans.py | .__numlocal < 0:
raise ValueError(
"Local minima (current value: '%d') should be greater or equal to 0."
% self.__numlocal
)
if self.__maxneighbor < 0:
raise ValueError(
"Maximum number of neighbors (current value: '%d') should be greater or "
"equal to 0." % self.__maxneighbor
)
def process(self, plotting=False):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(
range(0, len(self.__pointer_data)), self.__number_clusters
)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print(
"Better configuration found with medoids: {0} and cost: {1}".format(
self.__current[:], estimation
)
)
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting is True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
else:
print(
"Configuration found does not improve current best one because its cost is {0}".format(
estimation
)
)
if plotting is True:
self.__update_clusters(self.__current[:])
plot_pam(
self.__pointer_data,
dict(zip(self.__current[:], self.__clusters)),
)
self.__update_clusters(self.__optimal_medoids)
if plotting is True:
print("FINAL RESULT:")
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters by the algorithm.
@remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty
list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_medoids()
"""
return self.__clusters
def get_medoids(self):
"""!
@brief Returns list of medoids of allocated clusters.
@see process()
@see get_clusters()
"""
return self.__optimal_medoids
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for _ in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(
self.__pointer_data[index_point],
self.__pointer_data[medoids[index]],
)
if (dist < dist_optim) or (index == 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [
cluster for cluster in self.__clusters if len(cluster) > 0
]
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while index_neighbor < self.__maxneighbor:
# get random current medoid that is to be replaced
current_medoid_index = self.__current[
random.randint(0, self.__number_clusters - 1)
]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(
point_index, current_medoid_index
)
other_medoid_cluster_index = self.__belong[
other_medoid_index
]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[current_medoid_index],
)
# from the point to candidate median
distance_candidate = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[candidate_medoid_index],
)
# from the point to nearest (own) medoid
distance_nearest = float("inf")
if (point_medoid_index != candidate_medoid_index) and (
point_medoid_index != current_medoid_cluster_index
):
distance_nearest = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[point_medoid_index],
)
# apply rules for cost calculation
if point_cluster_index == current_medoid_cluster_index:
# case 1:
if distance_candidate >= distance_nearest:
candidate_cost += (
distance_nearest - distance_current
)
# case 2:
else:
candidate_cost += (
distance_candidate - distance_current
)
elif point_cluster_index == other_medoid_cluster_index:
# case 3 ('nearest medoid' is the representative object of that cluster and object is more
# similar to 'nearest' than to 'candidate'):
if distance_candidate > distance_nearest:
pass
# case 4:
else:
candidate_cost += (
distance_candidate - distance_nearest
)
if candidate_cost < 0:
counter += 1
# set candidate that has won
self.__current[
current_medoid_cluster_index
] = candidate_medoid_index
# recalculate clusters
self.__update_clusters(self.__current)
# reset iterations and starts investigation from the begining
index_neighbor = 0
else:
index_neighbor += 1
print("Medoid set changed {0} times".format(counter))
def __find_another_nearest_medoid(self, point_index, current_medoid_index):
"""!
@brief Finds the another nearest medoid for the specified point that is different from the specified medoid.
@param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids
is performed.
@param[in] current_medoid_index: index of medoid that shouldn't be considered as a nearest.
@return (uint) index of the another nearest medoid for the point.
"""
other_medoid_index = -1
other_distance_nearest = float("inf")
for index_medoid in self.__current:
if index_medoid != current_medoid_index:
other_distance_candidate = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[current_medoid_index],
)
if other_distance_candidate < other_distance_nearest:
other_distance_nearest = other_distance_candidate
other_medoid_index = index_medoid
return other_medoid_index
def __calculate_estimation(self):
"""!
@brief Calculates estimation (cost) of the current clusters. The lower the estimation,
the more optimally configuration of clusters.
@return (double) estimation of current clusters.
"""
estimation = 0.0
for index_cluster in range(0, len(self.__clusters)):
cluster = self.__clusters[index_cluster]
index_medoid = self.__current[index_cluster]
for index_point in cluster:
estimation += euclidean_distance_square(
self.__pointer_data[index_point],
self.__pointer_data[index_medoid],
)
return estimation
def | compute_cost_clarans | identifier_name | |
clarans.py | @brief Cluster analysis algorithm: CLARANS.
@details Implementation based on paper @cite article::clarans::1.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2019
@copyright GNU Public License
@cond GNU_PUBLIC_LICENSE
PyClustering is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyClustering is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
@endcond
"""
import random
import scipy
import itertools
import graphviz
import numpy as np
from clustviz.pam import plot_pam
from pyclustering.cluster.encoder import type_encoding
from pyclustering.utils import euclidean_distance_square
class clarans:
"""!
@brief Class represents clustering algorithm CLARANS (a method for clustering objects for spatial data mining).
"""
def __init__(self, data, number_clusters, numlocal, maxneighbor):
"""! @brief Constructor of clustering algorithm CLARANS. @details The higher the value of maxneighbor,
the closer is CLARANS to K-Medoids, and the longer is each search of a local minima.
@param[in] data (list): Input data that is presented as list of points (objects), each point should be
represented by list or tuple.
@param[in] number_clusters (uint): Amount of clusters that should be allocated.
@param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the
problem). @param[in] maxneighbor (uint): The maximum number of neighbors examined.
"""
self.__pointer_data = data
self.__numlocal = numlocal
self.__maxneighbor = maxneighbor
self.__number_clusters = number_clusters
self.__clusters = []
self.__current = []
self.__belong = []
self.__optimal_medoids = []
self.__optimal_estimation = float("inf")
self.__verify_arguments()
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError(
"Input data is empty (size: '%d')." % len(self.__pointer_data)
)
if self.__number_clusters <= 0:
raise ValueError(
"Amount of cluster (current value: '%d') for allocation should be greater than 0."
% self.__number_clusters
)
if self.__numlocal < 0:
raise ValueError(
"Local minima (current value: '%d') should be greater or equal to 0."
% self.__numlocal
)
if self.__maxneighbor < 0:
raise ValueError(
"Maximum number of neighbors (current value: '%d') should be greater or "
"equal to 0." % self.__maxneighbor
)
def process(self, plotting=False):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(
range(0, len(self.__pointer_data)), self.__number_clusters
)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print(
"Better configuration found with medoids: {0} and cost: {1}".format(
self.__current[:], estimation
)
)
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting is True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
else:
print(
"Configuration found does not improve current best one because its cost is {0}".format(
estimation
)
)
if plotting is True:
self.__update_clusters(self.__current[:])
plot_pam(
self.__pointer_data,
dict(zip(self.__current[:], self.__clusters)),
)
self.__update_clusters(self.__optimal_medoids)
if plotting is True:
print("FINAL RESULT:")
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters by the algorithm.
@remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty
list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_medoids()
"""
return self.__clusters
def get_medoids(self):
"""!
@brief Returns list of medoids of allocated clusters.
@see process()
@see get_clusters()
"""
return self.__optimal_medoids
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for _ in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(
self.__pointer_data[index_point],
self.__pointer_data[medoids[index]],
)
if (dist < dist_optim) or (index == 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [
cluster for cluster in self.__clusters if len(cluster) > 0
]
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while index_neighbor < self.__maxneighbor:
# get random current medoid that is to be replaced
current_medoid_index = self.__current[
random.randint(0, self.__number_clusters - 1)
]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(
point_index, current_medoid_index
)
other_medoid_cluster_index = self.__belong[
other_medoid_index
]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[current_medoid_index],
)
# from the point to candidate median
distance_candidate = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[candidate_medoid_index],
)
# from the point to nearest (own) medoid
distance_nearest = float("inf")
if (point_medoid_index != candidate_medoid_index) and (
point_medoid_index != current_medoid_cluster_index
):
distance_nearest | """!
| random_line_split | |
clarans.py | [in] data (list): Input data that is presented as list of points (objects), each point should be
represented by list or tuple.
@param[in] number_clusters (uint): Amount of clusters that should be allocated.
@param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the
problem). @param[in] maxneighbor (uint): The maximum number of neighbors examined.
"""
self.__pointer_data = data
self.__numlocal = numlocal
self.__maxneighbor = maxneighbor
self.__number_clusters = number_clusters
self.__clusters = []
self.__current = []
self.__belong = []
self.__optimal_medoids = []
self.__optimal_estimation = float("inf")
self.__verify_arguments()
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError(
"Input data is empty (size: '%d')." % len(self.__pointer_data)
)
if self.__number_clusters <= 0:
raise ValueError(
"Amount of cluster (current value: '%d') for allocation should be greater than 0."
% self.__number_clusters
)
if self.__numlocal < 0:
raise ValueError(
"Local minima (current value: '%d') should be greater or equal to 0."
% self.__numlocal
)
if self.__maxneighbor < 0:
raise ValueError(
"Maximum number of neighbors (current value: '%d') should be greater or "
"equal to 0." % self.__maxneighbor
)
def process(self, plotting=False):
|
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print(
"Better configuration found with medoids: {0} and cost: {1}".format(
self.__current[:], estimation
)
)
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting is True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
else:
print(
"Configuration found does not improve current best one because its cost is {0}".format(
estimation
)
)
if plotting is True:
self.__update_clusters(self.__current[:])
plot_pam(
self.__pointer_data,
dict(zip(self.__current[:], self.__clusters)),
)
self.__update_clusters(self.__optimal_medoids)
if plotting is True:
print("FINAL RESULT:")
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
return self
def get_clusters(self):
"""!
@brief Returns allocated clusters by the algorithm.
@remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty
list is returned.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
@see get_medoids()
"""
return self.__clusters
def get_medoids(self):
"""!
@brief Returns list of medoids of allocated clusters.
@see process()
@see get_clusters()
"""
return self.__optimal_medoids
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for _ in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(
self.__pointer_data[index_point],
self.__pointer_data[medoids[index]],
)
if (dist < dist_optim) or (index == 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [
cluster for cluster in self.__clusters if len(cluster) > 0
]
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while index_neighbor < self.__maxneighbor:
# get random current medoid that is to be replaced
current_medoid_index = self.__current[
random.randint(0, self.__number_clusters - 1)
]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(
point_index, current_medoid_index
)
other_medoid_cluster_index = self.__belong[
other_medoid_index
]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[current_medoid_index],
)
# from the point to candidate median
distance_candidate = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[candidate_medoid_index],
)
# from the point to nearest (own) medoid
distance_nearest = float("inf")
if (point_medoid_index != candidate_medoid_index) and (
point_medoid_index != current_medoid_cluster_index
):
distance_nearest = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[point_medoid_index],
)
# apply rules for cost calculation
if point_cluster_index == current_medoid_cluster_index:
# case 1:
if distance_candidate >= distance_nearest:
candidate_cost += (
distance_nearest - distance_current
)
# case 2:
else:
candidate_cost += (
distance_candidate - distance_current
)
elif point_cluster_index == other_medoid_cluster_index:
# case 3 ('nearest medoid' is the representative object of that cluster and object is more
# similar to 'nearest' than to 'candidate'):
if distance_candidate > distance_nearest:
pass
# case 4:
else:
candidate_cost += (
distance_candidate - distance_nearest
)
if candidate_cost < 0:
counter += 1
# set candidate that has won
self.__current[
current_medoid_cluster_index
] = candidate_medoid_index
# recalculate clusters
self.__update_clusters(self.__current)
# reset iterations and starts investigation from the begining
index_neighbor = 0
else:
index_neighbor += 1
print("Medoid set changed {0} times".format(counter))
def __find_another_nearest_medoid(self, point_index, current_medoid_index):
"""!
@brief Finds the another nearest medoid for the specified point that is different from the specified medoid.
@param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids
is performed | """!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(
range(0, len(self.__pointer_data)), self.__number_clusters
) | identifier_body |
mod.rs | zero or more children.
pub trait Block: std::fmt::Debug {
/// The output of executing this block.
type Output;
/// The signatures on this block.
type Signature;
/// Whether consensus has decided to commit this block. This kind of blocks are expected to be
/// sent to storage very soon, unless execution is lagging behind.
fn is_committed(&self) -> bool;
/// Marks this block as committed.
fn set_committed(&mut self);
/// Whether this block has finished execution.
fn is_executed(&self) -> bool;
/// Sets the output of this block.
fn set_output(&mut self, output: Self::Output);
/// Sets the signatures for this block.
fn set_signature(&mut self, signature: Self::Signature);
/// The id of this block.
fn id(&self) -> HashValue;
/// The id of the parent block.
fn parent_id(&self) -> HashValue;
/// Adds a block as its child.
fn add_child(&mut self, child_id: HashValue);
/// The list of children of this block.
fn children(&self) -> &HashSet<HashValue>;
}
/// The `BlockTree` implementation.
#[derive(Debug)]
pub struct BlockTree<B> {
/// A map that keeps track of all existing blocks by their ids.
id_to_block: HashMap<HashValue, B>,
/// The blocks at the lowest height in the map. B5 and B5' in the following example.
/// ```text
/// Committed(B0..4) -> B5 -> B6 -> B7
/// |
/// └--> B5' -> B6' -> B7'
/// |
/// └----> B7"
/// ```
heads: HashSet<HashValue>,
/// Id of the last committed block. B4 in the above example.
last_committed_id: HashValue,
}
impl<B> BlockTree<B>
where
B: Block,
{
/// Constructs a new `BlockTree`.
pub fn new(last_committed_id: HashValue) -> Self {
BlockTree {
id_to_block: HashMap::new(),
heads: HashSet::new(),
last_committed_id,
}
}
/// Adds a new block to the tree.
pub fn add_block(&mut self, block: B) -> Result<(), AddBlockError<B>> {
| id,
);
}
hash_map::Entry::Vacant(_) => bail_err!(AddBlockError::ParentNotFound { block }),
}
Ok(())
}
//
/ Returns a reference to a specific block, if it exists in the tree.
pub fn get_block(&self, id: HashValue) -> Option<&B> {
self.id_to_block.get(&id)
}
/// Returns a mutable reference to a specific block, if it exists in the tree.
pub fn get_block_mut(&mut self, id: HashValue) -> Option<&mut B> {
self.id_to_block.get_mut(&id)
}
/// Returns id of a block that is ready to be sent to VM for execution (its parent has finished
/// execution), if such block exists in the tree.
pub fn get_block_to_execute(&mut self) -> Option<HashValue> {
let mut to_visit: Vec<HashValue> = self.heads.iter().cloned().collect();
while let Some(id) = to_visit.pop() {
let block = self
.id_to_block
.get(&id)
.expect("Missing block in id_to_block.");
if !block.is_executed() {
return Some(id);
}
to_visit.extend(block.children().iter().cloned());
}
None
}
/// Marks given block and all its uncommitted ancestors as committed. This does not cause these
/// blocks to be sent to storage immediately.
pub fn mark_as_committed(
&mut self,
id: HashValue,
signature: B::Signature,
) -> Result<(), CommitBlockError> {
// First put the signatures in the block. Note that if this causes multiple blocks to be
// marked as committed, only the last one will have the signatures.
match self.id_to_block.get_mut(&id) {
Some(block) => {
if block.is_committed() {
bail_err!(CommitBlockError::BlockAlreadyMarkedAsCommitted { id });
} else {
block.set_signature(signature);
}
}
None => bail_err!(CommitBlockError::BlockNotFound { id }),
}
// Mark the current block as committed. Go to parent block and repeat until a committed
// block is found, or no more blocks.
let mut current_id = id;
while let Some(block) = self.id_to_block.get_mut(¤t_id) {
if block.is_committed() {
break;
}
block.set_committed();
current_id = block.parent_id();
}
Ok(())
}
/// Removes all blocks in the tree that conflict with committed blocks. Returns a list of
/// blocks that are ready to be sent to storage (all the committed blocks that have been
/// executed).
pub fn prune(&mut self) -> Vec<B> {
let mut blocks_to_store = vec![];
// First find if there is a committed block in current heads. Since these blocks are at the
// same height, at most one of them can be committed. If all of them are pending we have
// nothing to do here. Otherwise, one of the branches is committed. Throw away the rest of
// them and advance to the next height.
let mut current_heads = self.heads.clone();
while let Some(committed_head) = self.get_committed_head(¤t_heads) {
assert!(
current_heads.remove(&committed_head),
"committed_head should exist.",
);
for id in current_heads {
self.remove_branch(id);
}
match self.id_to_block.entry(committed_head) {
hash_map::Entry::Occupied(entry) => {
current_heads = entry.get().children().clone();
let current_id = *entry.key();
let parent_id = entry.get().parent_id();
if entry.get().is_executed() {
// If this block has been executed, all its proper ancestors must have
// finished execution and present in `blocks_to_store`.
self.heads = current_heads.clone();
self.last_committed_id = current_id;
blocks_to_store.push(entry.remove());
} else {
// The current block has not finished execution. If the parent block does
// not exist in the map, that means parent block (also committed) has been
// executed and removed. Otherwise self.heads does not need to be changed.
if !self.id_to_block.contains_key(&parent_id) {
self.heads = HashSet::new();
self.heads.insert(current_id);
}
}
}
hash_map::Entry::Vacant(_) => unreachable!("committed_head_id should exist."),
}
}
blocks_to_store
}
/// Given a list of heads, returns the committed one if it exists.
fn get_committed_head(&self, heads: &HashSet<HashValue>) -> Option<HashValue> {
let mut committed_head = None;
for head in heads {
let block = self
.id_to_block
.get(head)
.expect("Head should exist in id_to_block.");
if block.is_committed() {
assert!(
committed_head.is_none(),
"Conflicting blocks are both committed.",
);
committed_head = Some(*head);
}
}
committed_head
}
/// Removes a branch at block `head`.
fn remove_branch(&mut self, head: HashValue) {
let mut remaining = vec![head];
while let Some(current_block_id) = remaining.pop() {
let block = self
.id_to_block
.remove(¤t_block_id)
.unwrap_or_else(|| {
panic!(
"Trying to remove a non-existing block {:x}.",
current_block_id,
)
});
assert!(
!block.is_committed(),
"Trying to remove a committed block {:x}.",
current_block_id,
);
remaining.extend(block.children().iter());
}
}
/// Removes the entire subtree at block `id`.
pub fn remove_subtree(&mut self, id: HashValue) {
self.heads.remove(&id);
self.remove_branch(id);
}
/// Resets the block tree with a new `last_committed_id`. This removes all the in-memory
/// blocks.
pub fn reset(&mut self, last_committed_id: HashValue) {
let mut | assert!(!self.id_to_block.contains_key(&self.last_committed_id));
let id = block.id();
if self.id_to_block.contains_key(&id) {
bail_err!(AddBlockError::BlockAlreadyExists { block });
}
let parent_id = block.parent_id();
if parent_id == self.last_committed_id {
assert!(self.heads.insert(id), "Block already existed in heads.");
self.id_to_block.insert(id, block);
return Ok(());
}
match self.id_to_block.entry(parent_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().add_child(id);
assert!(
self.id_to_block.insert(id, block).is_none(),
"Block {:x} already existed.", | identifier_body |
mod.rs | zero or more children.
pub trait Block: std::fmt::Debug {
/// The output of executing this block.
type Output;
/// The signatures on this block.
type Signature;
/// Whether consensus has decided to commit this block. This kind of blocks are expected to be
/// sent to storage very soon, unless execution is lagging behind.
fn is_committed(&self) -> bool;
/// Marks this block as committed.
fn set_committed(&mut self);
/// Whether this block has finished execution.
fn is_executed(&self) -> bool;
/// Sets the output of this block.
fn set_output(&mut self, output: Self::Output);
/// Sets the signatures for this block.
fn set_signature(&mut self, signature: Self::Signature);
/// The id of this block.
fn id(&self) -> HashValue;
/// The id of the parent block.
fn parent_id(&self) -> HashValue;
/// Adds a block as its child.
fn add_child(&mut self, child_id: HashValue);
/// The list of children of this block.
fn children(&self) -> &HashSet<HashValue>;
}
/// The `BlockTree` implementation.
#[derive(Debug)]
pub struct BlockTree<B> {
/// A map that keeps track of all existing blocks by their ids.
id_to_block: HashMap<HashValue, B>,
/// The blocks at the lowest height in the map. B5 and B5' in the following example.
/// ```text
/// Committed(B0..4) -> B5 -> B6 -> B7
/// |
/// └--> B5' -> B6' -> B7'
/// |
/// └----> B7"
/// ```
heads: HashSet<HashValue>,
/// Id of the last committed block. B4 in the above example.
last_committed_id: HashValue,
}
impl<B> BlockTree<B>
where
B: Block,
{
/// Constructs a new `BlockTree`.
pub fn new(last_committed_id: HashValue) -> Self {
BlockTree {
id_to_block: HashMap::new(),
heads: HashSet::new(),
last_committed_id,
}
}
/// Adds a new block to the tree.
pub fn add_block(&mut self, block: B) -> Result<(), AddBlockError<B>> {
assert!(!self.id_to_block.contains_key(&self.last_committed_id));
let id = block.id();
if self.id_to_block.contains_key(&id) {
bail_err!(AddBlockError::BlockAlreadyExists { block });
}
let parent_id = block.parent_id();
if parent_id == self.last_committed_id {
assert!(self.heads.insert(id), "Block already existed in heads.");
self.id_to_block.insert(id, block);
return Ok(());
}
match self.id_to_block.entry(parent_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().add_child(id);
assert!(
self.id_to_block.insert(id, block).is_none(),
"Block {:x} already existed.",
id,
);
}
hash_map::Entry::Vacant(_) => bail_err!(AddBlockError::ParentNotFound { block }),
}
Ok(())
}
/// Returns a reference to a specific block, if it exists in the tree.
pub fn get_block(&self, id: HashValue) -> Option<&B> {
self.id_to_block.get(&id)
}
/// Returns a mutable reference to a specific block, if it exists in the tree.
pub fn get_block_mut(&mut self, id: HashValue) -> Option<&mut B> {
self.id_to_block.get_mut(&id)
}
/// Returns id of a block that is ready to be sent to VM for execution (its parent has finished
/// execution), if such block exists in the tree.
pub fn get_block_to_execute(&mut self) -> Option<HashValue> {
let mut to_visit: Vec<HashValue> = self.heads.iter().cloned().collect();
while let Some(id) = to_visit.pop() {
let block = self
.id_to_block
.get(&id)
.expect("Missing block in id_to_block.");
if !block.is_executed() {
return Some(id);
}
to_visit.extend(block.children().iter().cloned());
}
None
}
/// Marks given block and all its uncommitted ancestors as committed. This does not cause these
/// blocks to be sent to storage immediately.
pub fn mark_as_committed(
&mut self,
id: HashValue,
signature: B::Signature,
) -> Result<(), CommitBlockError> {
// First put the signatures in the block. Note that if this causes multiple blocks to be
// marked as committed, only the last one will have the signatures.
match self.id_to_block.get_mut(&id) {
Some(block) => {
if block.is_committed() {
bail_err!(CommitBlockError::BlockAlreadyMarkedAsCommitted { id });
} else {
block.set_signature(signature);
}
}
None => bail_err!(CommitBlockError::BlockNotFound { id }),
}
// Mark the current block as committed. Go to parent block and repeat until a committed
// block is found, or no more blocks.
let mut current_id = id;
while let Some(block) = self.id_to_block.get_mut(¤t_id) {
if block.is_committed() {
break;
}
block.set_committed();
current_id = block.parent_id();
}
Ok(())
}
/// Removes all blocks in the tree that conflict with committed blocks. Returns a list of
/// blocks that are ready to be sent to storage (all the committed blocks that have been
/// executed).
pub fn prune(&mut self) -> Vec<B> {
let mut blocks_to_store = vec![];
// First find if there is a committed block in current heads. Since these blocks are at the
// same height, at most one of them can be committed. If all of them are pending we have
// nothing to do here. Otherwise, one of the branches is committed. Throw away the rest of
// them and advance to the next height.
let mut current_heads = self.heads.clone();
while let Some(committed_head) = self.get_committed_head(¤t_heads) {
assert!(
current_heads.remove(&committed_head),
"committed_head should exist.",
);
for id in current_heads {
self.remove_branch(id);
}
match self.id_to_block.entry(committed_head) {
hash_map::Entry::Occupied(entry) => {
current_heads = entry.get().children().clone();
let current_id = *entry.key();
let parent_id = entry.get().parent_id();
if entry.get().is_executed() {
// If this block has been executed, all its proper ancestors must have
// finished execution and present in `blocks_to_store`.
self.heads = current_heads.clone();
self.last_committed_id = current_id;
blocks_to_store.push(entry.remove());
} else {
// The current block has not finished execution. If the parent block does
// not exist in the map, that means parent block (also committed) has been
// executed and removed. Otherwise self.heads does not need to be changed.
if !self.id_to_block.contains_key(&parent_id) {
self.heads = HashSet::new();
self.heads.insert(current_id);
}
}
}
hash_map::Entry::Vacant(_) => unreachable!("committed_head_id should exist."),
}
}
blocks_to_store
}
/// Given a list of heads, returns the committed one if it exists.
fn get_committed_head(&self, heads: &HashSet<HashValue>) -> Option<HashValue> {
let mut committed_head = None;
for head in heads {
let block = self
.id_to_block
.get(head)
.expect("Head should exist in id_to_block.");
if block.is_committed() {
assert!(
committed_head.is_none(),
"Conflicting blocks are both committed.",
);
committed_head = Some(*head);
}
}
committed_head
}
/// Removes a branch at block `head`.
fn remove_branch(&mut self, head: HashValue) {
let mut remaining = vec![head];
while let Some(current_block_id) = remaining.pop() {
let block = self
.id_to_block
.remove(¤t_block_id)
.unwrap_or_else(|| {
panic!(
"Trying to remove a non-existing block {:x}.",
current_block_id,
)
});
assert!(
!block.is_committed(),
"Trying to remove a committed block {:x}.",
current_block_id,
);
remaining.extend(block.children().iter());
}
}
/// Removes the entire subtree at block `id`.
pub fn remove_subtree(&mut self, id: HashValue) {
self.heads.remove(&id);
self.remove_branch(id);
}
/// Resets the block tree with a new `last_committed_id`. This removes all the in-memory
/// blocks.
pub fn reset(&m | lf, last_committed_id: HashValue) {
let mut | ut se | identifier_name |
mod.rs | zero or more children.
pub trait Block: std::fmt::Debug {
/// The output of executing this block.
type Output;
/// The signatures on this block.
type Signature;
/// Whether consensus has decided to commit this block. This kind of blocks are expected to be
/// sent to storage very soon, unless execution is lagging behind.
fn is_committed(&self) -> bool;
/// Marks this block as committed.
fn set_committed(&mut self);
/// Whether this block has finished execution.
fn is_executed(&self) -> bool;
/// Sets the output of this block.
fn set_output(&mut self, output: Self::Output);
/// Sets the signatures for this block.
fn set_signature(&mut self, signature: Self::Signature);
/// The id of this block.
fn id(&self) -> HashValue;
/// The id of the parent block.
fn parent_id(&self) -> HashValue;
/// Adds a block as its child.
fn add_child(&mut self, child_id: HashValue);
/// The list of children of this block.
fn children(&self) -> &HashSet<HashValue>;
}
/// The `BlockTree` implementation.
#[derive(Debug)]
pub struct BlockTree<B> {
/// A map that keeps track of all existing blocks by their ids.
id_to_block: HashMap<HashValue, B>,
/// The blocks at the lowest height in the map. B5 and B5' in the following example.
/// ```text
/// Committed(B0..4) -> B5 -> B6 -> B7
/// |
/// └--> B5' -> B6' -> B7'
/// |
/// └----> B7"
/// ```
heads: HashSet<HashValue>,
/// Id of the last committed block. B4 in the above example.
last_committed_id: HashValue,
}
impl<B> BlockTree<B>
where
B: Block,
{
/// Constructs a new `BlockTree`.
pub fn new(last_committed_id: HashValue) -> Self {
BlockTree {
id_to_block: HashMap::new(),
heads: HashSet::new(),
last_committed_id,
}
}
/// Adds a new block to the tree.
pub fn add_block(&mut self, block: B) -> Result<(), AddBlockError<B>> {
assert!(!self.id_to_block.contains_key(&self.last_committed_id));
let id = block.id();
if self.id_to_block.contains_key(&id) {
bail_err!(AddBlockError::BlockAlreadyExists { block });
}
let parent_id = block.parent_id();
if parent_id == self.last_committed_id {
assert!(self.heads.insert(id), "Block already existed in heads.");
self.id_to_block.insert(id, block);
return Ok(());
}
match self.id_to_block.entry(parent_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().add_child(id);
assert!(
self.id_to_block.insert(id, block).is_none(),
"Block {:x} already existed.",
id,
);
}
hash_map::Entry::Vacant(_) => bail_err!(AddBlockError::ParentNotFound { block }),
}
Ok(())
}
/// Returns a reference to a specific block, if it exists in the tree.
pub fn get_block(&self, id: HashValue) -> Option<&B> {
self.id_to_block.get(&id)
}
/// Returns a mutable reference to a specific block, if it exists in the tree.
pub fn get_block_mut(&mut self, id: HashValue) -> Option<&mut B> {
self.id_to_block.get_mut(&id)
}
/// Returns id of a block that is ready to be sent to VM for execution (its parent has finished
/// execution), if such block exists in the tree.
pub fn get_block_to_execute(&mut self) -> Option<HashValue> {
let mut to_visit: Vec<HashValue> = self.heads.iter().cloned().collect();
while let Some(id) = to_visit.pop() {
let block = self
.id_to_block
.get(&id)
.expect("Missing block in id_to_block.");
if !block.is_executed() {
return Some(id);
}
to_visit.extend(block.children().iter().cloned());
}
None
}
/// Marks given block and all its uncommitted ancestors as committed. This does not cause these
/// blocks to be sent to storage immediately.
pub fn mark_as_committed(
&mut self,
id: HashValue,
signature: B::Signature,
) -> Result<(), CommitBlockError> {
// First put the signatures in the block. Note that if this causes multiple blocks to be
// marked as committed, only the last one will have the signatures.
match self.id_to_block.get_mut(&id) {
Some(block) => {
if block.is_committed() {
bail_err!(CommitBlockError::BlockAlreadyMarkedAsCommitted { id });
} else {
block.set_signature(signature);
}
}
None => bail_err!(CommitBlockError::BlockNotFound { id }),
}
// Mark the current block as committed. Go to parent block and repeat until a committed
// block is found, or no more blocks.
let mut current_id = id;
while let Some(block) = self.id_to_block.get_mut(¤t_id) {
if block.is_committed() {
break;
}
block.set_committed();
current_id = block.parent_id();
}
Ok(())
}
/// Removes all blocks in the tree that conflict with committed blocks. Returns a list of
/// blocks that are ready to be sent to storage (all the committed blocks that have been
/// executed).
pub fn prune(&mut self) -> Vec<B> {
let mut blocks_to_store = vec![]; | let mut current_heads = self.heads.clone();
while let Some(committed_head) = self.get_committed_head(¤t_heads) {
assert!(
current_heads.remove(&committed_head),
"committed_head should exist.",
);
for id in current_heads {
self.remove_branch(id);
}
match self.id_to_block.entry(committed_head) {
hash_map::Entry::Occupied(entry) => {
current_heads = entry.get().children().clone();
let current_id = *entry.key();
let parent_id = entry.get().parent_id();
if entry.get().is_executed() {
// If this block has been executed, all its proper ancestors must have
// finished execution and present in `blocks_to_store`.
self.heads = current_heads.clone();
self.last_committed_id = current_id;
blocks_to_store.push(entry.remove());
} else {
// The current block has not finished execution. If the parent block does
// not exist in the map, that means parent block (also committed) has been
// executed and removed. Otherwise self.heads does not need to be changed.
if !self.id_to_block.contains_key(&parent_id) {
self.heads = HashSet::new();
self.heads.insert(current_id);
}
}
}
hash_map::Entry::Vacant(_) => unreachable!("committed_head_id should exist."),
}
}
blocks_to_store
}
/// Given a list of heads, returns the committed one if it exists.
fn get_committed_head(&self, heads: &HashSet<HashValue>) -> Option<HashValue> {
let mut committed_head = None;
for head in heads {
let block = self
.id_to_block
.get(head)
.expect("Head should exist in id_to_block.");
if block.is_committed() {
assert!(
committed_head.is_none(),
"Conflicting blocks are both committed.",
);
committed_head = Some(*head);
}
}
committed_head
}
/// Removes a branch at block `head`.
fn remove_branch(&mut self, head: HashValue) {
let mut remaining = vec![head];
while let Some(current_block_id) = remaining.pop() {
let block = self
.id_to_block
.remove(¤t_block_id)
.unwrap_or_else(|| {
panic!(
"Trying to remove a non-existing block {:x}.",
current_block_id,
)
});
assert!(
!block.is_committed(),
"Trying to remove a committed block {:x}.",
current_block_id,
);
remaining.extend(block.children().iter());
}
}
/// Removes the entire subtree at block `id`.
pub fn remove_subtree(&mut self, id: HashValue) {
self.heads.remove(&id);
self.remove_branch(id);
}
/// Resets the block tree with a new `last_committed_id`. This removes all the in-memory
/// blocks.
pub fn reset(&mut self, last_committed_id: HashValue) {
let mut |
// First find if there is a committed block in current heads. Since these blocks are at the
// same height, at most one of them can be committed. If all of them are pending we have
// nothing to do here. Otherwise, one of the branches is committed. Throw away the rest of
// them and advance to the next height. | random_line_split |
mod.rs | ) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::gmtime_r(&epoch, &mut result).is_null() {
bail!("libc::gmtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Returns Unix Epoch (now)
///
/// Note: This panics if the SystemTime::now() returns values not
/// repesentable as i64 (should never happen).
pub fn epoch_i64() -> i64 {
use std::convert::TryFrom;
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs())
.expect("epoch_i64: now is too large")
} else {
-i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs())
.expect("epoch_i64: now is too small")
}
}
/// Returns Unix Epoch (now) as f64 with subseconds resolution
///
/// Note: This can be inacurrate for values greater the 2^53. But this
/// should never happen.
pub fn epoch_f64() -> f64 {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64()
} else {
-UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64()
}
}
// rust libc bindings do not include strftime
#[link(name = "c")]
extern "C" {
#[link_name = "strftime"]
fn libc_strftime(
s: *mut libc::c_char,
max: libc::size_t,
format: *const libc::c_char,
time: *const libc::tm,
) -> libc::size_t;
}
/// Safe bindings to libc strftime
pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> {
let format = CString::new(format)?;
let mut buf = vec![0u8; 8192];
let res = unsafe {
libc_strftime(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len() as libc::size_t,
format.as_ptr(),
t as *const libc::tm,
)
};
let len = nix::errno::Errno::result(res).map(|r| r as usize)?;
if len == 0 {
bail!("strftime: result len is 0 (string too large)");
};
let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?;
let str_slice: &str = c_str.to_str().unwrap();
Ok(str_slice.to_owned())
}
/// Format epoch as local time
pub fn strftime_local(format: &str, epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
strftime(format, &localtime)
}
/// Format epoch as utc time
pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
strftime(format, &gmtime)
}
/// Convert Unix epoch into RFC3339 UTC string
pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
let year = gmtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339_utc: wrong year '{}'", year);
}
strftime("%010FT%TZ", &gmtime)
}
/// Convert Unix epoch into RFC3339 local time with TZ
pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
let year = localtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339: wrong year '{}'", year);
}
// Note: We cannot use strftime %z because of missing collon
let mut offset = localtime.tm_gmtoff;
let prefix = if offset < 0 {
offset = -offset;
'-'
} else {
'+'
};
let mins = offset / 60;
let hours = mins / 60;
let mins = mins % 60;
let mut s = strftime("%10FT%T", &localtime)?;
s.push(prefix);
s.push_str(&format!("{:02}:{:02}", hours, mins));
Ok(s)
}
/// Parse RFC3339 into Unix epoch
pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> {
let input = input_str.as_bytes();
let expect = |pos: usize, c: u8| {
if input[pos] != c {
bail!("unexpected char at pos {}", pos);
}
Ok(())
};
let digit = |pos: usize| -> Result<i32, Error> {
let digit = input[pos] as i32;
if digit < 48 || digit > 57 |
Ok(digit - 48)
};
let check_max = |i: i32, max: i32| {
if i > max {
bail!("value too large ({} > {})", i, max);
}
Ok(i)
};
crate::try_block!({
if input.len() < 20 || input.len() > 25 {
bail!("timestamp of unexpected length");
}
let tz = input[19];
match tz {
b'Z' => {
if input.len() != 20 {
bail!("unexpected length in UTC timestamp");
}
}
b'+' | b'-' => {
if input.len() != 25 {
bail!("unexpected length in timestamp");
}
}
_ => bail!("unexpected timezone indicator"),
}
let mut tm = TmEditor::new(true);
tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?;
expect(4, b'-')?;
tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?;
expect(7, b'-')?;
tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?;
expect(10, b'T')?;
tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?;
expect(13, b':')?;
tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?;
expect(16, b':')?;
tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?;
let epoch = tm.into_epoch()?;
if tz == b'Z' {
return Ok(epoch);
}
let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?;
expect(22, b':')?;
let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?;
let offset = (hours * 3600 + mins * 60) as i64;
let epoch = match tz {
b'+' => epoch - offset,
b'-' => epoch + offset,
_ => unreachable!(), // already checked above
};
Ok(epoch)
})
.map_err(|err| {
format_err!(
"failed to parse rfc3339 timestamp ({:?}) - {}",
input_str,
err
)
})
}
#[test]
fn test_leap_seconds() {
let convert_reconvert = |epoch| {
let rfc3339 =
epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work");
let parsed =
parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work");
assert_eq!(epoch, parsed);
};
// 2005-12-31T23:59:59Z was followed by a leap second
let epoch = 1136073599;
convert_reconvert(epoch);
convert_reconvert(epoch + 1);
convert_reconvert(epoch + 2);
let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work");
assert_eq!(parsed | {
bail!("unexpected char at pos {}", pos);
} | conditional_block |
mod.rs | ) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::gmtime_r(&epoch, &mut result).is_null() {
bail!("libc::gmtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Returns Unix Epoch (now)
///
/// Note: This panics if the SystemTime::now() returns values not
/// repesentable as i64 (should never happen).
pub fn epoch_i64() -> i64 {
use std::convert::TryFrom;
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs())
.expect("epoch_i64: now is too large")
} else {
-i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs())
.expect("epoch_i64: now is too small")
}
}
/// Returns Unix Epoch (now) as f64 with subseconds resolution
///
/// Note: This can be inacurrate for values greater the 2^53. But this
/// should never happen.
pub fn epoch_f64() -> f64 {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64()
} else {
-UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64()
}
}
// rust libc bindings do not include strftime
#[link(name = "c")]
extern "C" {
#[link_name = "strftime"]
fn libc_strftime(
s: *mut libc::c_char,
max: libc::size_t,
format: *const libc::c_char,
time: *const libc::tm,
) -> libc::size_t;
}
/// Safe bindings to libc strftime
pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> {
let format = CString::new(format)?;
let mut buf = vec![0u8; 8192];
let res = unsafe {
libc_strftime(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len() as libc::size_t,
format.as_ptr(),
t as *const libc::tm,
)
};
let len = nix::errno::Errno::result(res).map(|r| r as usize)?;
if len == 0 {
bail!("strftime: result len is 0 (string too large)");
};
let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?;
let str_slice: &str = c_str.to_str().unwrap();
Ok(str_slice.to_owned())
}
/// Format epoch as local time
pub fn | (format: &str, epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
strftime(format, &localtime)
}
/// Format epoch as utc time
pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
strftime(format, &gmtime)
}
/// Convert Unix epoch into RFC3339 UTC string
pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
let year = gmtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339_utc: wrong year '{}'", year);
}
strftime("%010FT%TZ", &gmtime)
}
/// Convert Unix epoch into RFC3339 local time with TZ
pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
let year = localtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339: wrong year '{}'", year);
}
// Note: We cannot use strftime %z because of missing collon
let mut offset = localtime.tm_gmtoff;
let prefix = if offset < 0 {
offset = -offset;
'-'
} else {
'+'
};
let mins = offset / 60;
let hours = mins / 60;
let mins = mins % 60;
let mut s = strftime("%10FT%T", &localtime)?;
s.push(prefix);
s.push_str(&format!("{:02}:{:02}", hours, mins));
Ok(s)
}
/// Parse RFC3339 into Unix epoch
pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> {
let input = input_str.as_bytes();
let expect = |pos: usize, c: u8| {
if input[pos] != c {
bail!("unexpected char at pos {}", pos);
}
Ok(())
};
let digit = |pos: usize| -> Result<i32, Error> {
let digit = input[pos] as i32;
if digit < 48 || digit > 57 {
bail!("unexpected char at pos {}", pos);
}
Ok(digit - 48)
};
let check_max = |i: i32, max: i32| {
if i > max {
bail!("value too large ({} > {})", i, max);
}
Ok(i)
};
crate::try_block!({
if input.len() < 20 || input.len() > 25 {
bail!("timestamp of unexpected length");
}
let tz = input[19];
match tz {
b'Z' => {
if input.len() != 20 {
bail!("unexpected length in UTC timestamp");
}
}
b'+' | b'-' => {
if input.len() != 25 {
bail!("unexpected length in timestamp");
}
}
_ => bail!("unexpected timezone indicator"),
}
let mut tm = TmEditor::new(true);
tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?;
expect(4, b'-')?;
tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?;
expect(7, b'-')?;
tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?;
expect(10, b'T')?;
tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?;
expect(13, b':')?;
tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?;
expect(16, b':')?;
tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?;
let epoch = tm.into_epoch()?;
if tz == b'Z' {
return Ok(epoch);
}
let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?;
expect(22, b':')?;
let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?;
let offset = (hours * 3600 + mins * 60) as i64;
let epoch = match tz {
b'+' => epoch - offset,
b'-' => epoch + offset,
_ => unreachable!(), // already checked above
};
Ok(epoch)
})
.map_err(|err| {
format_err!(
"failed to parse rfc3339 timestamp ({:?}) - {}",
input_str,
err
)
})
}
#[test]
fn test_leap_seconds() {
let convert_reconvert = |epoch| {
let rfc3339 =
epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work");
let parsed =
parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work");
assert_eq!(epoch, parsed);
};
// 2005-12-31T23:59:59Z was followed by a leap second
let epoch = 1136073599;
convert_reconvert(epoch);
convert_reconvert(epoch + 1);
convert_reconvert(epoch + 2);
let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work");
assert_eq!(parsed, | strftime_local | identifier_name |
mod.rs | ) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::gmtime_r(&epoch, &mut result).is_null() {
bail!("libc::gmtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Returns Unix Epoch (now)
///
/// Note: This panics if the SystemTime::now() returns values not
/// repesentable as i64 (should never happen).
pub fn epoch_i64() -> i64 {
use std::convert::TryFrom;
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs())
.expect("epoch_i64: now is too large")
} else {
-i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs())
.expect("epoch_i64: now is too small")
}
}
/// Returns Unix Epoch (now) as f64 with subseconds resolution
///
/// Note: This can be inacurrate for values greater the 2^53. But this
/// should never happen.
pub fn epoch_f64() -> f64 {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64()
} else {
-UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64()
}
}
// rust libc bindings do not include strftime
#[link(name = "c")]
extern "C" {
#[link_name = "strftime"]
fn libc_strftime(
s: *mut libc::c_char,
max: libc::size_t,
format: *const libc::c_char,
time: *const libc::tm,
) -> libc::size_t;
}
/// Safe bindings to libc strftime
pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> {
let format = CString::new(format)?;
let mut buf = vec![0u8; 8192];
let res = unsafe {
libc_strftime(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len() as libc::size_t,
format.as_ptr(),
t as *const libc::tm,
)
};
let len = nix::errno::Errno::result(res).map(|r| r as usize)?;
if len == 0 {
bail!("strftime: result len is 0 (string too large)");
};
let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?;
let str_slice: &str = c_str.to_str().unwrap();
Ok(str_slice.to_owned())
}
/// Format epoch as local time
pub fn strftime_local(format: &str, epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
strftime(format, &localtime)
}
/// Format epoch as utc time
pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> |
/// Convert Unix epoch into RFC3339 UTC string
pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
let year = gmtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339_utc: wrong year '{}'", year);
}
strftime("%010FT%TZ", &gmtime)
}
/// Convert Unix epoch into RFC3339 local time with TZ
pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
let year = localtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339: wrong year '{}'", year);
}
// Note: We cannot use strftime %z because of missing collon
let mut offset = localtime.tm_gmtoff;
let prefix = if offset < 0 {
offset = -offset;
'-'
} else {
'+'
};
let mins = offset / 60;
let hours = mins / 60;
let mins = mins % 60;
let mut s = strftime("%10FT%T", &localtime)?;
s.push(prefix);
s.push_str(&format!("{:02}:{:02}", hours, mins));
Ok(s)
}
/// Parse RFC3339 into Unix epoch
pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> {
let input = input_str.as_bytes();
let expect = |pos: usize, c: u8| {
if input[pos] != c {
bail!("unexpected char at pos {}", pos);
}
Ok(())
};
let digit = |pos: usize| -> Result<i32, Error> {
let digit = input[pos] as i32;
if digit < 48 || digit > 57 {
bail!("unexpected char at pos {}", pos);
}
Ok(digit - 48)
};
let check_max = |i: i32, max: i32| {
if i > max {
bail!("value too large ({} > {})", i, max);
}
Ok(i)
};
crate::try_block!({
if input.len() < 20 || input.len() > 25 {
bail!("timestamp of unexpected length");
}
let tz = input[19];
match tz {
b'Z' => {
if input.len() != 20 {
bail!("unexpected length in UTC timestamp");
}
}
b'+' | b'-' => {
if input.len() != 25 {
bail!("unexpected length in timestamp");
}
}
_ => bail!("unexpected timezone indicator"),
}
let mut tm = TmEditor::new(true);
tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?;
expect(4, b'-')?;
tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?;
expect(7, b'-')?;
tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?;
expect(10, b'T')?;
tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?;
expect(13, b':')?;
tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?;
expect(16, b':')?;
tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?;
let epoch = tm.into_epoch()?;
if tz == b'Z' {
return Ok(epoch);
}
let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?;
expect(22, b':')?;
let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?;
let offset = (hours * 3600 + mins * 60) as i64;
let epoch = match tz {
b'+' => epoch - offset,
b'-' => epoch + offset,
_ => unreachable!(), // already checked above
};
Ok(epoch)
})
.map_err(|err| {
format_err!(
"failed to parse rfc3339 timestamp ({:?}) - {}",
input_str,
err
)
})
}
#[test]
fn test_leap_seconds() {
let convert_reconvert = |epoch| {
let rfc3339 =
epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work");
let parsed =
parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work");
assert_eq!(epoch, parsed);
};
// 2005-12-31T23:59:59Z was followed by a leap second
let epoch = 1136073599;
convert_reconvert(epoch);
convert_reconvert(epoch + 1);
convert_reconvert(epoch + 2);
let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work");
assert_eq!(parsed | {
let gmtime = gmtime(epoch)?;
strftime(format, &gmtime)
} | identifier_body |
mod.rs | POCH.duration_since(now).unwrap().as_secs())
.expect("epoch_i64: now is too small")
}
}
/// Returns Unix Epoch (now) as f64 with subseconds resolution
///
/// Note: This can be inacurrate for values greater the 2^53. But this
/// should never happen.
pub fn epoch_f64() -> f64 {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64()
} else {
-UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64()
}
}
// rust libc bindings do not include strftime
#[link(name = "c")]
extern "C" {
#[link_name = "strftime"]
fn libc_strftime(
s: *mut libc::c_char,
max: libc::size_t,
format: *const libc::c_char,
time: *const libc::tm,
) -> libc::size_t;
}
/// Safe bindings to libc strftime
pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> {
let format = CString::new(format)?;
let mut buf = vec![0u8; 8192];
let res = unsafe {
libc_strftime(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len() as libc::size_t,
format.as_ptr(),
t as *const libc::tm,
)
};
let len = nix::errno::Errno::result(res).map(|r| r as usize)?;
if len == 0 {
bail!("strftime: result len is 0 (string too large)");
};
let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?;
let str_slice: &str = c_str.to_str().unwrap();
Ok(str_slice.to_owned())
}
/// Format epoch as local time
pub fn strftime_local(format: &str, epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
strftime(format, &localtime)
}
/// Format epoch as utc time
pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
strftime(format, &gmtime)
}
/// Convert Unix epoch into RFC3339 UTC string
pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
let year = gmtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339_utc: wrong year '{}'", year);
}
strftime("%010FT%TZ", &gmtime)
}
/// Convert Unix epoch into RFC3339 local time with TZ
pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
let year = localtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339: wrong year '{}'", year);
}
// Note: We cannot use strftime %z because of missing collon
let mut offset = localtime.tm_gmtoff;
let prefix = if offset < 0 {
offset = -offset;
'-'
} else {
'+'
};
let mins = offset / 60;
let hours = mins / 60;
let mins = mins % 60;
let mut s = strftime("%10FT%T", &localtime)?;
s.push(prefix);
s.push_str(&format!("{:02}:{:02}", hours, mins));
Ok(s)
}
/// Parse RFC3339 into Unix epoch
pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> {
let input = input_str.as_bytes();
let expect = |pos: usize, c: u8| {
if input[pos] != c {
bail!("unexpected char at pos {}", pos);
}
Ok(())
};
let digit = |pos: usize| -> Result<i32, Error> {
let digit = input[pos] as i32;
if digit < 48 || digit > 57 {
bail!("unexpected char at pos {}", pos);
}
Ok(digit - 48)
};
let check_max = |i: i32, max: i32| {
if i > max {
bail!("value too large ({} > {})", i, max);
}
Ok(i)
};
crate::try_block!({
if input.len() < 20 || input.len() > 25 {
bail!("timestamp of unexpected length");
}
let tz = input[19];
match tz {
b'Z' => {
if input.len() != 20 {
bail!("unexpected length in UTC timestamp");
}
}
b'+' | b'-' => {
if input.len() != 25 {
bail!("unexpected length in timestamp");
}
}
_ => bail!("unexpected timezone indicator"),
}
let mut tm = TmEditor::new(true);
tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?;
expect(4, b'-')?;
tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?;
expect(7, b'-')?;
tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?;
expect(10, b'T')?;
tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?;
expect(13, b':')?;
tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?;
expect(16, b':')?;
tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?;
let epoch = tm.into_epoch()?;
if tz == b'Z' {
return Ok(epoch);
}
let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?;
expect(22, b':')?;
let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?;
let offset = (hours * 3600 + mins * 60) as i64;
let epoch = match tz {
b'+' => epoch - offset,
b'-' => epoch + offset,
_ => unreachable!(), // already checked above
};
Ok(epoch)
})
.map_err(|err| {
format_err!(
"failed to parse rfc3339 timestamp ({:?}) - {}",
input_str,
err
)
})
}
#[test]
fn test_leap_seconds() {
let convert_reconvert = |epoch| {
let rfc3339 =
epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work");
let parsed =
parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work");
assert_eq!(epoch, parsed);
};
// 2005-12-31T23:59:59Z was followed by a leap second
let epoch = 1136073599;
convert_reconvert(epoch);
convert_reconvert(epoch + 1);
convert_reconvert(epoch + 2);
let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work");
assert_eq!(parsed, epoch + 1);
}
#[test]
fn test_rfc3339_range() {
// also tests single-digit years/first decade values
let lower = -62167219200;
let lower_str = "0000-01-01T00:00:00Z";
let upper = 253402300799;
let upper_str = "9999-12-31T23:59:59Z";
let converted =
epoch_to_rfc3339_utc(lower).expect("converting lower bound of RFC3339 range should work");
assert_eq!(converted, lower_str);
| let converted =
epoch_to_rfc3339_utc(upper).expect("converting upper bound of RFC3339 range should work");
assert_eq!(converted, upper_str);
| random_line_split | |
GP.py | :
self._Xmean = X.mean(0)[None,:]
self._Xstd = X.std(0)[None,:]
self.X = (X.copy() - self._Xmean) / self._Xstd
if hasattr(self,'Z'):
self.Z = (self.Z - self._Xmean) / self._Xstd
else:
self._Xmean = np.zeros((1,self.X.shape[1]))
self._Xstd = np.ones((1,self.X.shape[1]))
self.likelihood = likelihood
#assert self.X.shape[0] == self.likelihood.Y.shape[0]
#self.N, self.D = self.likelihood.Y.shape
assert self.X.shape[0] == self.likelihood.data.shape[0]
self.N, self.D = self.likelihood.data.shape
model.__init__(self)
def dL_dZ(self):
"""
TODO: one day we might like to learn Z by gradient methods?
"""
return np.zeros_like(self.Z)
def _set_params(self,p):
self.kern._set_params_transformed(p[:self.kern.Nparam])
#self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas
self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas
self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices)
self.K += self.likelihood.covariance_matrix
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
#the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
alpha = np.dot(self.Ki,self.likelihood.Y)
self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki)
else:
tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
self.dL_dK = 0.5*(tmp - self.D*self.Ki)
def | (self):
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
def _get_param_names(self):
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
def update_likelihood_approximation(self):
"""
Approximates a non-gaussian likelihood using Expectation Propagation
For a Gaussian (or direct: TODO) likelihood, no iteration is required:
this function does nothing
"""
self.likelihood.fit_full(self.kern.K(self.X))
self._set_params(self._get_params()) # update the GP
def _model_fit_term(self):
"""
Computes the model fit using YYT if it's available
"""
if self.likelihood.YYT is None:
return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y)))
else:
return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT))
def log_likelihood(self):
"""
The log marginal likelihood of the GP.
For an EP model, can be written as the log likelihood of a regression
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
"""
return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z
def _log_likelihood_gradients(self):
"""
The gradient of all parameters.
For the kernel parameters, use the chain rule via dL_dK
For the likelihood parameters, pass in alpha = K^-1 y
"""
return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
def _raw_predict(self,_Xnew,slices=None, full_cov=False):
"""
Internal helper function for making predictions, does not account
for normalization or likelihood
"""
Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices)
mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y)
KiKx = np.dot(self.Ki,Kx)
if full_cov:
Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices)
var = Kxx - np.dot(KiKx.T,Kx)
else:
Kxx = self.kern.Kdiag(_Xnew, slices=slices)
var = Kxx - np.sum(np.multiply(KiKx,Kx),0)
var = var[:,None]
return mu, var
def predict(self,Xnew, slices=None, full_cov=False):
"""
Predict the function(s) at the new point(s) Xnew.
Arguments
---------
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray, Nnew x self.Q
:param slices: specifies which outputs kernel(s) the Xnew correspond to (see below)
:type slices: (None, list of slice objects, list of ints)
:param full_cov: whether to return the folll covariance matrix, or just the diagonal
:type full_cov: bool
:rtype: posterior mean, a Numpy array, Nnew x self.D
:rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
:rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D
.. Note:: "slices" specifies how the the points X_new co-vary wich the training points.
- If None, the new points covary throigh every kernel part (default)
- If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part
- If a list of booleans, specifying which kernel parts are active
If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew.
This is to allow for different normalizations of the output dimensions.
"""
#normalize X values
Xnew = (Xnew.copy() - self._Xmean) / self._Xstd
mu, var = self._raw_predict(Xnew, slices, full_cov)
#now push through likelihood TODO
mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov)
return mean, var, _025pm, _975pm
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False):
"""
Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian
:param samples: the number of a posteriori samples to plot
:param which_data: which if the training data to plot (default all)
:type which_data: 'all' or a slice object to slice self.X, self.Y
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:param which_functions: which of the kernel functions to plot (additively)
:type which_functions: list of bools
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D
Plot the posterior of the GP.
- In one dimension, the function is plotted with a shaded region identifying two standard deviations.
- In two dimsensions, a contour-plot shows the mean predicted function
- In higher dimensions, we've no implemented this yet !TODO!
Can plot only part of the data and part of the posterior functions using which_data and which_functions
Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood
"""
if which_functions=='all':
which_functions = [True]*self.kern.Nparts
if which_data=='all':
which_data = slice(None)
if self.X.shape[1] == 1:
Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits)
if samples == 0:
m,v = self._raw_predict(Xnew, slices=which_functions)
gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v))
pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5)
else:
m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True)
Ysim = np.random.multivariate_normal(m.flatten(),v,samples)
gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None])
for | _get_params | identifier_name |
GP.py |
# parse arguments
self.Xslices = Xslices
self.X = X
assert len(self.X.shape)==2
self.N, self.Q = self.X.shape
assert isinstance(kernel, kern.kern)
self.kern = kernel
#here's some simple normalization for the inputs
if normalize_X:
self._Xmean = X.mean(0)[None,:]
self._Xstd = X.std(0)[None,:]
self.X = (X.copy() - self._Xmean) / self._Xstd
if hasattr(self,'Z'):
self.Z = (self.Z - self._Xmean) / self._Xstd
else:
self._Xmean = np.zeros((1,self.X.shape[1]))
self._Xstd = np.ones((1,self.X.shape[1]))
self.likelihood = likelihood
#assert self.X.shape[0] == self.likelihood.Y.shape[0]
#self.N, self.D = self.likelihood.Y.shape
assert self.X.shape[0] == self.likelihood.data.shape[0]
self.N, self.D = self.likelihood.data.shape
model.__init__(self)
def dL_dZ(self):
"""
TODO: one day we might like to learn Z by gradient methods?
"""
return np.zeros_like(self.Z)
def _set_params(self,p):
self.kern._set_params_transformed(p[:self.kern.Nparam])
#self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas
self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas
self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices)
self.K += self.likelihood.covariance_matrix
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
#the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
alpha = np.dot(self.Ki,self.likelihood.Y)
self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki)
else:
tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
self.dL_dK = 0.5*(tmp - self.D*self.Ki)
def _get_params(self):
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
def _get_param_names(self):
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
def update_likelihood_approximation(self):
"""
Approximates a non-gaussian likelihood using Expectation Propagation
For a Gaussian (or direct: TODO) likelihood, no iteration is required:
this function does nothing
"""
self.likelihood.fit_full(self.kern.K(self.X))
self._set_params(self._get_params()) # update the GP
def _model_fit_term(self):
"""
Computes the model fit using YYT if it's available
"""
if self.likelihood.YYT is None:
return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y)))
else:
return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT))
def log_likelihood(self):
"""
The log marginal likelihood of the GP.
For an EP model, can be written as the log likelihood of a regression
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
"""
return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z
def _log_likelihood_gradients(self):
"""
The gradient of all parameters.
For the kernel parameters, use the chain rule via dL_dK
For the likelihood parameters, pass in alpha = K^-1 y
"""
return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
def _raw_predict(self,_Xnew,slices=None, full_cov=False):
"""
Internal helper function for making predictions, does not account
for normalization or likelihood
"""
Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices)
mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y)
KiKx = np.dot(self.Ki,Kx)
if full_cov:
Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices)
var = Kxx - np.dot(KiKx.T,Kx)
else:
Kxx = self.kern.Kdiag(_Xnew, slices=slices)
var = Kxx - np.sum(np.multiply(KiKx,Kx),0)
var = var[:,None]
return mu, var
def predict(self,Xnew, slices=None, full_cov=False):
"""
Predict the function(s) at the new point(s) Xnew.
Arguments
---------
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray, Nnew x self.Q
:param slices: specifies which outputs kernel(s) the Xnew correspond to (see below)
:type slices: (None, list of slice objects, list of ints)
:param full_cov: whether to return the folll covariance matrix, or just the diagonal
:type full_cov: bool
:rtype: posterior mean, a Numpy array, Nnew x self.D
:rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
:rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D
.. Note:: "slices" specifies how the the points X_new co-vary wich the training points.
- If None, the new points covary throigh every kernel part (default)
- If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part
- If a list of booleans, specifying which kernel parts are active
If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew.
This is to allow for different normalizations of the output dimensions.
"""
#normalize X values
Xnew = (Xnew.copy() - self._Xmean) / self._Xstd
mu, var = self._raw_predict(Xnew, slices, full_cov)
#now push through likelihood TODO
mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov)
return mean, var, _025pm, _975pm
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False):
"""
Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian
:param samples: the number of a posteriori samples to plot
:param which_data: which if the training data to plot (default all)
:type which_data: 'all' or a slice object to slice self.X, self.Y
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:param which_functions: which of the kernel functions to plot (additively)
:type which_functions: list of bools
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a | """
Gaussian Process model for regression and EP
:param X: input observations
:param kernel: a GPy kernel, defaults to rbf+white
:parm likelihood: a GPy likelihood
:param normalize_X: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_X: False|True
:param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_Y: False|True
:param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing)
:rtype: model object
:param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1
:param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.]
:type powerep: list
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, likelihood, kernel, normalize_X=False, Xslices=None): | identifier_body | |
GP.py | :
self._Xmean = X.mean(0)[None,:]
self._Xstd = X.std(0)[None,:]
self.X = (X.copy() - self._Xmean) / self._Xstd
if hasattr(self,'Z'):
self.Z = (self.Z - self._Xmean) / self._Xstd
else:
self._Xmean = np.zeros((1,self.X.shape[1]))
self._Xstd = np.ones((1,self.X.shape[1]))
self.likelihood = likelihood
#assert self.X.shape[0] == self.likelihood.Y.shape[0]
#self.N, self.D = self.likelihood.Y.shape
assert self.X.shape[0] == self.likelihood.data.shape[0]
self.N, self.D = self.likelihood.data.shape
model.__init__(self)
def dL_dZ(self):
"""
TODO: one day we might like to learn Z by gradient methods?
"""
return np.zeros_like(self.Z)
def _set_params(self,p):
self.kern._set_params_transformed(p[:self.kern.Nparam])
#self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas
self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas
self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices)
self.K += self.likelihood.covariance_matrix
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
#the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
alpha = np.dot(self.Ki,self.likelihood.Y)
self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki)
else:
tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
self.dL_dK = 0.5*(tmp - self.D*self.Ki)
def _get_params(self):
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
def _get_param_names(self):
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
def update_likelihood_approximation(self):
"""
Approximates a non-gaussian likelihood using Expectation Propagation
For a Gaussian (or direct: TODO) likelihood, no iteration is required:
this function does nothing
"""
self.likelihood.fit_full(self.kern.K(self.X))
self._set_params(self._get_params()) # update the GP
def _model_fit_term(self):
"""
Computes the model fit using YYT if it's available
"""
if self.likelihood.YYT is None:
return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y)))
else:
return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT))
def log_likelihood(self):
"""
The log marginal likelihood of the GP.
For an EP model, can be written as the log likelihood of a regression
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
"""
return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z
def _log_likelihood_gradients(self):
"""
The gradient of all parameters.
For the kernel parameters, use the chain rule via dL_dK
For the likelihood parameters, pass in alpha = K^-1 y
"""
return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
def _raw_predict(self,_Xnew,slices=None, full_cov=False):
"""
Internal helper function for making predictions, does not account
for normalization or likelihood
"""
Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices)
mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y)
KiKx = np.dot(self.Ki,Kx)
if full_cov:
Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices)
var = Kxx - np.dot(KiKx.T,Kx)
else:
Kxx = self.kern.Kdiag(_Xnew, slices=slices)
var = Kxx - np.sum(np.multiply(KiKx,Kx),0)
var = var[:,None]
return mu, var
def predict(self,Xnew, slices=None, full_cov=False):
"""
Predict the function(s) at the new point(s) Xnew.
Arguments
---------
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray, Nnew x self.Q
:param slices: specifies which outputs kernel(s) the Xnew correspond to (see below)
:type slices: (None, list of slice objects, list of ints)
:param full_cov: whether to return the folll covariance matrix, or just the diagonal
:type full_cov: bool
:rtype: posterior mean, a Numpy array, Nnew x self.D
:rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
:rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D
.. Note:: "slices" specifies how the the points X_new co-vary wich the training points.
- If None, the new points covary throigh every kernel part (default)
- If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part
- If a list of booleans, specifying which kernel parts are active
If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew.
This is to allow for different normalizations of the output dimensions.
"""
#normalize X values
Xnew = (Xnew.copy() - self._Xmean) / self._Xstd
mu, var = self._raw_predict(Xnew, slices, full_cov)
#now push through likelihood TODO
mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov)
return mean, var, _025pm, _975pm
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False):
""" |
:param samples: the number of a posteriori samples to plot
:param which_data: which if the training data to plot (default all)
:type which_data: 'all' or a slice object to slice self.X, self.Y
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:param which_functions: which of the kernel functions to plot (additively)
:type which_functions: list of bools
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D
Plot the posterior of the GP.
- In one dimension, the function is plotted with a shaded region identifying two standard deviations.
- In two dimsensions, a contour-plot shows the mean predicted function
- In higher dimensions, we've no implemented this yet !TODO!
Can plot only part of the data and part of the posterior functions using which_data and which_functions
Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood
"""
if which_functions=='all':
which_functions = [True]*self.kern.Nparts
if which_data=='all':
which_data = slice(None)
if self.X.shape[1] == 1:
Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits)
if samples == 0:
m,v = self._raw_predict(Xnew, slices=which_functions)
gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v))
pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5)
else:
m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True)
Ysim = np.random.multivariate_normal(m.flatten(),v,samples)
gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None])
for | Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian | random_line_split |
GP.py | :
self._Xmean = X.mean(0)[None,:]
self._Xstd = X.std(0)[None,:]
self.X = (X.copy() - self._Xmean) / self._Xstd
if hasattr(self,'Z'):
self.Z = (self.Z - self._Xmean) / self._Xstd
else:
self._Xmean = np.zeros((1,self.X.shape[1]))
self._Xstd = np.ones((1,self.X.shape[1]))
self.likelihood = likelihood
#assert self.X.shape[0] == self.likelihood.Y.shape[0]
#self.N, self.D = self.likelihood.Y.shape
assert self.X.shape[0] == self.likelihood.data.shape[0]
self.N, self.D = self.likelihood.data.shape
model.__init__(self)
def dL_dZ(self):
"""
TODO: one day we might like to learn Z by gradient methods?
"""
return np.zeros_like(self.Z)
def _set_params(self,p):
self.kern._set_params_transformed(p[:self.kern.Nparam])
#self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas
self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas
self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices)
self.K += self.likelihood.covariance_matrix
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
#the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
alpha = np.dot(self.Ki,self.likelihood.Y)
self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki)
else:
tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
self.dL_dK = 0.5*(tmp - self.D*self.Ki)
def _get_params(self):
return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params()))
def _get_param_names(self):
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
def update_likelihood_approximation(self):
"""
Approximates a non-gaussian likelihood using Expectation Propagation
For a Gaussian (or direct: TODO) likelihood, no iteration is required:
this function does nothing
"""
self.likelihood.fit_full(self.kern.K(self.X))
self._set_params(self._get_params()) # update the GP
def _model_fit_term(self):
"""
Computes the model fit using YYT if it's available
"""
if self.likelihood.YYT is None:
return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y)))
else:
return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT))
def log_likelihood(self):
"""
The log marginal likelihood of the GP.
For an EP model, can be written as the log likelihood of a regression
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
"""
return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z
def _log_likelihood_gradients(self):
"""
The gradient of all parameters.
For the kernel parameters, use the chain rule via dL_dK
For the likelihood parameters, pass in alpha = K^-1 y
"""
return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
def _raw_predict(self,_Xnew,slices=None, full_cov=False):
"""
Internal helper function for making predictions, does not account
for normalization or likelihood
"""
Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices)
mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y)
KiKx = np.dot(self.Ki,Kx)
if full_cov:
Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices)
var = Kxx - np.dot(KiKx.T,Kx)
else:
Kxx = self.kern.Kdiag(_Xnew, slices=slices)
var = Kxx - np.sum(np.multiply(KiKx,Kx),0)
var = var[:,None]
return mu, var
def predict(self,Xnew, slices=None, full_cov=False):
"""
Predict the function(s) at the new point(s) Xnew.
Arguments
---------
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray, Nnew x self.Q
:param slices: specifies which outputs kernel(s) the Xnew correspond to (see below)
:type slices: (None, list of slice objects, list of ints)
:param full_cov: whether to return the folll covariance matrix, or just the diagonal
:type full_cov: bool
:rtype: posterior mean, a Numpy array, Nnew x self.D
:rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
:rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D
.. Note:: "slices" specifies how the the points X_new co-vary wich the training points.
- If None, the new points covary throigh every kernel part (default)
- If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part
- If a list of booleans, specifying which kernel parts are active
If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew.
This is to allow for different normalizations of the output dimensions.
"""
#normalize X values
Xnew = (Xnew.copy() - self._Xmean) / self._Xstd
mu, var = self._raw_predict(Xnew, slices, full_cov)
#now push through likelihood TODO
mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov)
return mean, var, _025pm, _975pm
def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False):
"""
Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian
:param samples: the number of a posteriori samples to plot
:param which_data: which if the training data to plot (default all)
:type which_data: 'all' or a slice object to slice self.X, self.Y
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:param which_functions: which of the kernel functions to plot (additively)
:type which_functions: list of bools
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D
Plot the posterior of the GP.
- In one dimension, the function is plotted with a shaded region identifying two standard deviations.
- In two dimsensions, a contour-plot shows the mean predicted function
- In higher dimensions, we've no implemented this yet !TODO!
Can plot only part of the data and part of the posterior functions using which_data and which_functions
Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood
"""
if which_functions=='all':
which_functions = [True]*self.kern.Nparts
if which_data=='all':
which_data = slice(None)
if self.X.shape[1] == 1:
Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits)
if samples == 0:
|
else:
m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True)
Ysim = np.random.multivariate_normal(m.flatten(),v,samples)
gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None])
| m,v = self._raw_predict(Xnew, slices=which_functions)
gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v))
pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5) | conditional_block |
vision.py | 2([
[-5, -1. * -105], #22
[90, -1. * -100], #27
[90, -1. * 110], #26
[0, -1. * 107] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
# Swap x-y coordinates (WTF!)
'''self.worldpts = np.float32([
[-105,-5], #22
[-100, 90], #27
[110, 90], #26
[107, 0] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
self.worldpts = np.float32([
[-104,-2], #22
[-104,85], #27
[115,84], #26
[115,3] #25
])
self.worldpts = vu.toImageCoordinates(self.worldpts)
testPts = vu.toWaypointCoordinates(self.worldpts)
print 'TestWorldPts', str(testPts)
# ===== *************** ===== #
### Camera initialization ###
print 'Opening Camera ' + str(camera)
self.vidcap = cv2.VideoCapture(camera)# Open up specified camera
# Check if camera is opened and exit if not
if self.vidcap.isOpened():
print 'Camera ' + str(camera) + ' opened successfully'
else:
print 'ERROR: Camera ' + str(camera) + ' not opened'
return False
# Set camera autoexposure
uvc.set(self.camera, uvc.EXPOSURE_AUTO, 1)
uvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0)
### Initialize UI elements ###
# Filter Controls Window
ctlWindow = cv2.namedWindow(self.CTL_NAME)
cv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler)
cv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler)
# Camera input window
camWindow = cv2.namedWindow(self.CAM_FEED_NAME)
cv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged)
cv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged)
cv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged)
cv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration
# Rectified/Calibrated Image window
#calWindow = cv2.namedWindow(self.CAL_NAME)
#cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler)
# Image processing Window 2
procWindow = cv2.namedWindow(self.PROC_NAME)
# History for filter bank
self.xHistory = deque(self.EMPTY_KERNEL)
self.yHistory = deque(self.EMPTY_KERNEL)
self.thetaHistory = deque(self.EMPTY_KERNEL)
# Run vision on a frame
def processFrame(self):
### Main processing loop ###
#while(True):
frameRet, self.camImg = self.vidcap.read()
#Img = self.drawCalMarkers()
cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers())
if(self.calstate == CalState.CALIBRATED):
self.remapImage() # Apply perspective warp
bl = cv2.getTrackbarPos('Blue', self.CTL_NAME)
gr = cv2.getTrackbarPos('Green', self.CTL_NAME)
rd = cv2.getTrackbarPos('Red', self.CTL_NAME)
bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME)
gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME) | rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin)
#vu.printCentroids(gCentroid, rCentroid)
if(bgroundFlag):
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg)
else:
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg)
ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid)
if((ctr != None) and (theta != None)):
fctr, ftheta = self.filterPoints(ctr, theta)
self.x_est = ctr[0]
self.y_est = ctr[1]
# print 'Theta IN:', theta
self.theta_est = theta#ftheta
self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location
vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255))
if(gCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255))
if(rCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0))
if(bCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0))
wpIndex = 0
for wp in self.waypointEst:
wpIndex = wpIndex + 1
if(wpIndex == 1):
wpcolor = (0,0,255)
else:
wpcolor = (0,255,255)
vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) #
vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index
if(self.tagLoc[0] != None):
vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160))
#vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255))
#cv2.imshow(self.CAL_NAME, self.warpImg)
cv2.imshow(self.PROC_NAME, self.rgbImg)
#if cv2.waitKey(20) & 0xFF == ord('q'):
# break
# Use current perspective transform to remap image
def remapImage(self):
if(self.calstate == CalState.CALIBRATED):
self.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE)))
self.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1)
self.warpImg = cv2.medianBlur(self.warpImg, 5)
else:
print 'Transform not calibrated'
# Draws calibration markers on the camera image
def drawCalMarkers(self):
markedImg = self | rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME)
smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME)
bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME)
bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin)
gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin) | random_line_split |
vision.py | cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers())
if(self.calstate == CalState.CALIBRATED):
self.remapImage() # Apply perspective warp
bl = cv2.getTrackbarPos('Blue', self.CTL_NAME)
gr = cv2.getTrackbarPos('Green', self.CTL_NAME)
rd = cv2.getTrackbarPos('Red', self.CTL_NAME)
bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME)
gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME)
rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME)
smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME)
bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME)
bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin)
gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin)
rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin)
#vu.printCentroids(gCentroid, rCentroid)
if(bgroundFlag):
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg)
else:
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg)
ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid)
if((ctr != None) and (theta != None)):
fctr, ftheta = self.filterPoints(ctr, theta)
self.x_est = ctr[0]
self.y_est = ctr[1]
# print 'Theta IN:', theta
self.theta_est = theta#ftheta
self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location
vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255))
if(gCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255))
if(rCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0))
if(bCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0))
wpIndex = 0
for wp in self.waypointEst:
wpIndex = wpIndex + 1
if(wpIndex == 1):
wpcolor = (0,0,255)
else:
wpcolor = (0,255,255)
vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) #
vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index
if(self.tagLoc[0] != None):
vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160))
#vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255))
#cv2.imshow(self.CAL_NAME, self.warpImg)
cv2.imshow(self.PROC_NAME, self.rgbImg)
#if cv2.waitKey(20) & 0xFF == ord('q'):
# break
# Use current perspective transform to remap image
def remapImage(self):
if(self.calstate == CalState.CALIBRATED):
self.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE)))
self.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1)
self.warpImg = cv2.medianBlur(self.warpImg, 5)
else:
print 'Transform not calibrated'
# Draws calibration markers on the camera image
def drawCalMarkers(self):
markedImg = self.camImg.copy()
for pt in self.calpts:
vu.drawSquareMarker(markedImg, pt[0], pt[1], 5, (255,0,255))
return markedImg
# Finds a marker's central moment
def findMarker(self, image, hueCenter, hueWidth, satMin, valMin):
hsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
markerImg = cv2.inRange(hsvImg, np.array([hueCenter-hueWidth/2, satMin, valMin]), np.array([hueCenter+hueWidth/2, 255, 255]))
cleanElement = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
markerImg = cv2.erode(markerImg, cleanElement) # Clean up marker image w/ erode-dilate-median
markerImg = cv2.dilate(markerImg, cleanElement)
markerImg = cv2.medianBlur(markerImg, 3)
mMoments = cv2.moments(markerImg) # Compute moments
m00 = mMoments['m00']
if(m00 > 0.1):
return (mMoments['m10']/m00, mMoments['m01']/m00), markerImg
return None, markerImg
# FIR on centers and angles
def filterPoints(self, ctr, theta):
if((ctr != None) and (theta != None)):
if(len(self.xHistory) == len(self.FIR_KERNEL)):
self.xHistory.popleft()
if(len(self.yHistory) == len(self.FIR_KERNEL)):
self.yHistory.popleft()
if(len(self.thetaHistory) == len(self.FIR_KERNEL)):
self.thetaHistory.popleft()
self.xHistory.append(ctr[0])
self.yHistory.append(ctr[1])
self.thetaHistory.append(theta)
xFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.xHistory)),1)
yFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.yHistory)),1)
thetaFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.thetaHistory)),1)
#print 'Filtered Phi:', phiFilter, ' Raw Theta:', theta
return (xFilter, yFilter), thetaFilter
# Interface to get current state estimates
def getState(self):
# Give estimated [x,y,theta]
if(self.tagLoc != None):
tx = self.tagLoc[0]
ty = self.tagLoc[1]
else:
tx = None
ty = None
return [self.x_est, self.y_est, self.theta_est, tx, ty]
### Event Handlers ###
# Camera input mouseclick handler
def mouseClickHandler(self, event, x, y, flags, param):
if event == cv2.EVENT_RBUTTONDOWN:
print 'Recalibration requested'
self.calstate = CalState.CAL_PROG
self.calpts = [] # Reset calibration points
if event == cv2.EVENT_LBUTTONDOWN:
print 'Mouse left click event at ' + str(x) + ',' + str(y)
if(self.calstate == CalState.UNCAL):
self.calstate = CalState.CAL_PROG
print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')'
self.calpts.append([x,y])
elif(self.calstate == CalState.CAL_PROG):
if(len(self.calpts) < 4):
print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')'
self.calpts.append([x,y])
# Finish
if(len(self.calpts) == 4):
print 'Calibrated'
self.warp = cv2.getPerspectiveTransform(np.float32(self.calpts), self.worldpts)
print str(self.calpts)
self.calstate = CalState.CALIBRATED
elif(self.calstate == CalState.CALIBRATED):
print 'Already calibrated'
# Color click handler for cal window
def colorClickHandler(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print 'Checking marker 1 color at ', str(x), ',', str(y)
pass # Get color at point
if event == cv2.EVENT_RBUTTONDOWN:
print 'Checking marker 2 color at ', str(x), ',', str(y)
pass # Get color at point
# Generic do-nothing slider handler (for )
def | trackbarChangeHandler | identifier_name | |
vision.py | [90, -1. * 110], #26
[0, -1. * 107] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
# Swap x-y coordinates (WTF!)
'''self.worldpts = np.float32([
[-105,-5], #22
[-100, 90], #27
[110, 90], #26
[107, 0] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
self.worldpts = np.float32([
[-104,-2], #22
[-104,85], #27
[115,84], #26
[115,3] #25
])
self.worldpts = vu.toImageCoordinates(self.worldpts)
testPts = vu.toWaypointCoordinates(self.worldpts)
print 'TestWorldPts', str(testPts)
# ===== *************** ===== #
### Camera initialization ###
print 'Opening Camera ' + str(camera)
self.vidcap = cv2.VideoCapture(camera)# Open up specified camera
# Check if camera is opened and exit if not
if self.vidcap.isOpened():
print 'Camera ' + str(camera) + ' opened successfully'
else:
print 'ERROR: Camera ' + str(camera) + ' not opened'
return False
# Set camera autoexposure
uvc.set(self.camera, uvc.EXPOSURE_AUTO, 1)
uvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0)
### Initialize UI elements ###
# Filter Controls Window
ctlWindow = cv2.namedWindow(self.CTL_NAME)
cv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler)
cv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler)
# Camera input window
camWindow = cv2.namedWindow(self.CAM_FEED_NAME)
cv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged)
cv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged)
cv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged)
cv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration
# Rectified/Calibrated Image window
#calWindow = cv2.namedWindow(self.CAL_NAME)
#cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler)
# Image processing Window 2
procWindow = cv2.namedWindow(self.PROC_NAME)
# History for filter bank
self.xHistory = deque(self.EMPTY_KERNEL)
self.yHistory = deque(self.EMPTY_KERNEL)
self.thetaHistory = deque(self.EMPTY_KERNEL)
# Run vision on a frame
def processFrame(self):
### Main processing loop ###
#while(True):
frameRet, self.camImg = self.vidcap.read()
#Img = self.drawCalMarkers()
cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers())
if(self.calstate == CalState.CALIBRATED):
self.remapImage() # Apply perspective warp
bl = cv2.getTrackbarPos('Blue', self.CTL_NAME)
gr = cv2.getTrackbarPos('Green', self.CTL_NAME)
rd = cv2.getTrackbarPos('Red', self.CTL_NAME)
bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME)
gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME)
rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME)
smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME)
bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME)
bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin)
gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin)
rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin)
#vu.printCentroids(gCentroid, rCentroid)
if(bgroundFlag):
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg)
else:
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg)
ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid)
if((ctr != None) and (theta != None)):
fctr, ftheta = self.filterPoints(ctr, theta)
self.x_est = ctr[0]
self.y_est = ctr[1]
# print 'Theta IN:', theta
self.theta_est = theta#ftheta
self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location
vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255))
if(gCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255))
if(rCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0))
if(bCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0))
wpIndex = 0
for wp in self.waypointEst:
wpIndex = wpIndex + 1
if(wpIndex == 1):
wpcolor = (0,0,255)
else:
wpcolor = (0,255,255)
vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) #
vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index
if(self.tagLoc[0] != None):
vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160))
#vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255))
#cv2.imshow(self.CAL_NAME, self.warpImg)
cv2.imshow(self.PROC_NAME, self.rgbImg)
#if cv2.waitKey(20) & 0xFF == ord('q'):
# break
# Use current perspective transform to remap image
def remapImage(self):
if(self.calstate == CalState.CALIBRATED):
self.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE)))
self.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1)
self.warpImg = cv2.medianBlur(self.warpImg, 5)
else:
print 'Transform not calibrated'
# Draws calibration markers on the camera image
def drawCalMarkers(self):
markedImg = self.camImg.copy()
for pt in self.calpts:
| vu.drawSquareMarker(markedImg, pt[0], pt[1], 5, (255,0,255)) | conditional_block | |
vision.py | # [self.XSIZE,self.YSIZE/2]
# ])
# ===== ***** Calibration points from world *****===== #
'''self.worldpts = np.float32([
[-5, -1. * -105], #22
[90, -1. * -100], #27
[90, -1. * 110], #26
[0, -1. * 107] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
# Swap x-y coordinates (WTF!)
'''self.worldpts = np.float32([
[-105,-5], #22
[-100, 90], #27
[110, 90], #26
[107, 0] #25
])#*self.IMG_SCALE + self.IMG_OFFSET'''
self.worldpts = np.float32([
[-104,-2], #22
[-104,85], #27
[115,84], #26
[115,3] #25
])
self.worldpts = vu.toImageCoordinates(self.worldpts)
testPts = vu.toWaypointCoordinates(self.worldpts)
print 'TestWorldPts', str(testPts)
# ===== *************** ===== #
### Camera initialization ###
print 'Opening Camera ' + str(camera)
self.vidcap = cv2.VideoCapture(camera)# Open up specified camera
# Check if camera is opened and exit if not
if self.vidcap.isOpened():
print 'Camera ' + str(camera) + ' opened successfully'
else:
print 'ERROR: Camera ' + str(camera) + ' not opened'
return False
# Set camera autoexposure
uvc.set(self.camera, uvc.EXPOSURE_AUTO, 1)
uvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0)
### Initialize UI elements ###
# Filter Controls Window
ctlWindow = cv2.namedWindow(self.CTL_NAME)
cv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler)
cv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler)
cv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler)
cv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler)
# Camera input window
camWindow = cv2.namedWindow(self.CAM_FEED_NAME)
cv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged)
cv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged)
cv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged)
cv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration
# Rectified/Calibrated Image window
#calWindow = cv2.namedWindow(self.CAL_NAME)
#cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler)
# Image processing Window 2
procWindow = cv2.namedWindow(self.PROC_NAME)
# History for filter bank
self.xHistory = deque(self.EMPTY_KERNEL)
self.yHistory = deque(self.EMPTY_KERNEL)
self.thetaHistory = deque(self.EMPTY_KERNEL)
# Run vision on a frame
def processFrame(self):
### Main processing loop ###
#while(True):
frameRet, self.camImg = self.vidcap.read()
#Img = self.drawCalMarkers()
cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers())
if(self.calstate == CalState.CALIBRATED):
self.remapImage() # Apply perspective warp
bl = cv2.getTrackbarPos('Blue', self.CTL_NAME)
gr = cv2.getTrackbarPos('Green', self.CTL_NAME)
rd = cv2.getTrackbarPos('Red', self.CTL_NAME)
bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME)
gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME)
rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME)
smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME)
bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME)
bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin)
gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin)
rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin)
#vu.printCentroids(gCentroid, rCentroid)
if(bgroundFlag):
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg)
else:
self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg)
ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid)
if((ctr != None) and (theta != None)):
fctr, ftheta = self.filterPoints(ctr, theta)
self.x_est = ctr[0]
self.y_est = ctr[1]
# print 'Theta IN:', theta
self.theta_est = theta#ftheta
self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location
vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255))
if(gCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255))
if(rCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0))
if(bCentroid != None):
vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0))
wpIndex = 0
for wp in self.waypointEst:
wpIndex = wpIndex + 1
if(wpIndex == 1):
wpcolor = (0,0,255)
else:
wpcolor = (0,255,255)
vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) #
vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index
if(self.tagLoc[0] != None):
vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160))
#vu.drawVector(self.rgbImg, self | self.camera = camera
self.calstate = CalState.UNCAL
self.calpts = []
self.XSIZE = 1000
self.YSIZE = 1000
self.x_est = -1
self.y_est = -1
self.theta_est = -1
# Drawing storage
self.waypointEst = [(300,300)] # Waypoint estimates for UI
self.tagLoc = (10,10) # Tag location estimate
self.fVectorStart = (0,0)
self.fVectorEnd = (0,0)
#self.worldpts = np.float32([
# [0,self.YSIZE/2],
# [0,0],
# [self.XSIZE,0], | identifier_body | |
theoretical_tools.py | fi*(Ti*Ui)**2/2./(Ti+Tm))
fe, fi = fe+1e-9, fi+1e-9 # just to insure a non zero division,
Tv = ( fe*(Ue*Te)**2 + fi*(Ti*Ui)**2 ) /( fe*(Ue*Te)**2/(Te+Tm) + fi*(Ti*Ui)**2/(Ti+Tm) )
TvN = Tv*Gl/Cm
return muV, sV+1e-12, muGn, TvN
def mean_and_var_conductance(Fe, Fi, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !!
fi = Fi*gei*pconnec*Ntot
return Qe*Te*fe, Qi*Ti*fi, Qe*np.sqrt(Te*fe/2.), Qi*np.sqrt(Ti*fi/2.)
### FUNCTION, INVERSE FUNCTION
# @numba.jit()
def erfc_func(muV, sV, TvN, Vthre, Gl, Cm):
return .5/TvN*Gl/Cm*(sp_spec.erfc((Vthre-muV)/np.sqrt(2)/sV))
# @numba.jit()
def effective_Vthre(Y, muV, sV, TvN, Gl, Cm):
Vthre_eff = muV+np.sqrt(2)*sV*sp_spec.erfcinv(\
Y*2.*TvN*Cm/Gl) # effective threshold
return Vthre_eff
# @numba.jit()
def threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
"""
setting by default to True the square
because when use by external modules, coeff[5:]=np.zeros(3)
in the case of a linear threshold
"""
muV0, DmuV0 = -60e-3,10e-3
sV0, DsV0 =4e-3, 6e-3
TvN0, DTvN0 = 0.5, 1.
return P0+P1*(muV-muV0)/DmuV0+\
P2*(sV-sV0)/DsV0+P3*(TvN-TvN0)/DTvN0+\
0*P4*np.log(muGn)+P5*((muV-muV0)/DmuV0)**2+\
P6*((sV-sV0)/DsV0)**2+P7*((TvN-TvN0)/DTvN0)**2+\
P8*(muV-muV0)/DmuV0*(sV-sV0)/DsV0+\
P9*(muV-muV0)/DmuV0*(TvN-TvN0)/DTvN0+\
P10*(sV-sV0)/DsV0*(TvN-TvN0)/DTvN0
# final transfer function template :
# @numba.jit()
def TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
if(hasattr(fe, "__len__")):
fe[fe<1e-8]=1e-8
else:
if(fe<1e-8):
fe=1e-8
if(hasattr(fi, "__len__")):
fi[fi<1e-8]=1e-8
else:
if(fi<1e-8):
fi=1e-8
muV, sV, muGn, TvN = get_fluct_regime_varsup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
Vthre = threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
if(hasattr(muV, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
|
else:
if(sV<1e-4):
sV=1e-4
Fout_th = erfc_func(muV, sV, TvN, Vthre, Gl, Cm)
if(hasattr(Fout_th, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
Fout_th[Fout_th<1e-8]=1e-8
else:
if(Fout_th<1e-8):
Fout_th=1e-8
'''
if(El<-0.063):
if(hasattr(Fout_th, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
Fout_th[Fout_th>80.]=175
else:
if(Fout_th>80.):
print("Done")
Fout_th=175
'''
#print 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',fe,fi,muV, sV, TvN,Fout_th
return Fout_th
def gaussian(x, mu, sig):
return (1/(sig*np.sqrt(2*3.1415)))*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def TF_my_templateup_heterogeneity(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
def Phet(k):
locale=gaussian(k,1.,0.2)*TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El*k, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
return locale
outhet, err = quad(Phet, 0.1, 5)
return outhet
# @numba.jit()
def make_loop(t, nu, vm, nu_aff_exc, nu_aff_inh, BIN,\
Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
dt = t[1]-t[0]
# constructing the Euler method for the activity rate
for i_t in range(len(t)-1): # loop over time
fe = (nu_aff_exc[i_t]+nu[i_t]+Fdrive) # afferent+recurrent excitation
fi = nu[i_t]+nu_aff_inh[i_t] # recurrent inhibition
W[i_t+1] = W[i_t] + dt/Tw*(b*nu[i_t]*Tw - W[i_t])
nu[i_t+1] = nu[i_t] +\
dt/BIN*(\
TF_my_template(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P | sV[sV<1e-4]=1e-4 | conditional_block |
theoretical_tools.py | fi*(Ti*Ui)**2/2./(Ti+Tm))
fe, fi = fe+1e-9, fi+1e-9 # just to insure a non zero division,
Tv = ( fe*(Ue*Te)**2 + fi*(Ti*Ui)**2 ) /( fe*(Ue*Te)**2/(Te+Tm) + fi*(Ti*Ui)**2/(Ti+Tm) )
TvN = Tv*Gl/Cm
return muV, sV+1e-12, muGn, TvN
def mean_and_var_conductance(Fe, Fi, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !!
fi = Fi*gei*pconnec*Ntot
return Qe*Te*fe, Qi*Ti*fi, Qe*np.sqrt(Te*fe/2.), Qi*np.sqrt(Ti*fi/2.)
### FUNCTION, INVERSE FUNCTION
# @numba.jit()
def erfc_func(muV, sV, TvN, Vthre, Gl, Cm):
return .5/TvN*Gl/Cm*(sp_spec.erfc((Vthre-muV)/np.sqrt(2)/sV))
# @numba.jit()
def effective_Vthre(Y, muV, sV, TvN, Gl, Cm):
Vthre_eff = muV+np.sqrt(2)*sV*sp_spec.erfcinv(\
Y*2.*TvN*Cm/Gl) # effective threshold
return Vthre_eff
# @numba.jit()
def threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
"""
setting by default to True the square
because when use by external modules, coeff[5:]=np.zeros(3)
in the case of a linear threshold
"""
muV0, DmuV0 = -60e-3,10e-3
sV0, DsV0 =4e-3, 6e-3
TvN0, DTvN0 = 0.5, 1.
return P0+P1*(muV-muV0)/DmuV0+\
P2*(sV-sV0)/DsV0+P3*(TvN-TvN0)/DTvN0+\
0*P4*np.log(muGn)+P5*((muV-muV0)/DmuV0)**2+\
P6*((sV-sV0)/DsV0)**2+P7*((TvN-TvN0)/DTvN0)**2+\
P8*(muV-muV0)/DmuV0*(sV-sV0)/DsV0+\
P9*(muV-muV0)/DmuV0*(TvN-TvN0)/DTvN0+\
P10*(sV-sV0)/DsV0*(TvN-TvN0)/DTvN0
# final transfer function template :
# @numba.jit()
def TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
if(hasattr(fe, "__len__")):
fe[fe<1e-8]=1e-8
else:
if(fe<1e-8):
fe=1e-8
if(hasattr(fi, "__len__")):
fi[fi<1e-8]=1e-8
else:
if(fi<1e-8):
fi=1e-8
muV, sV, muGn, TvN = get_fluct_regime_varsup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
Vthre = threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
if(hasattr(muV, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
sV[sV<1e-4]=1e-4
else:
if(sV<1e-4):
sV=1e-4
Fout_th = erfc_func(muV, sV, TvN, Vthre, Gl, Cm)
if(hasattr(Fout_th, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
Fout_th[Fout_th<1e-8]=1e-8
else:
if(Fout_th<1e-8):
Fout_th=1e-8
'''
if(El<-0.063):
if(hasattr(Fout_th, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
Fout_th[Fout_th>80.]=175
else:
if(Fout_th>80.):
print("Done")
Fout_th=175
'''
#print 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',fe,fi,muV, sV, TvN,Fout_th
return Fout_th
def gaussian(x, mu, sig):
return (1/(sig*np.sqrt(2*3.1415)))*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def TF_my_templateup_heterogeneity(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
def Phet(k):
|
outhet, err = quad(Phet, 0.1, 5)
return outhet
# @numba.jit()
def make_loop(t, nu, vm, nu_aff_exc, nu_aff_inh, BIN,\
Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
dt = t[1]-t[0]
# constructing the Euler method for the activity rate
for i_t in range(len(t)-1): # loop over time
fe = (nu_aff_exc[i_t]+nu[i_t]+Fdrive) # afferent+recurrent excitation
fi = nu[i_t]+nu_aff_inh[i_t] # recurrent inhibition
W[i_t+1] = W[i_t] + dt/Tw*(b*nu[i_t]*Tw - W[i_t])
nu[i_t+1] = nu[i_t] +\
dt/BIN*(\
TF_my_template(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P | locale=gaussian(k,1.,0.2)*TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El*k, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
return locale | identifier_body |
theoretical_tools.py | fi*(Ti*Ui)**2/2./(Ti+Tm))
fe, fi = fe+1e-9, fi+1e-9 # just to insure a non zero division,
Tv = ( fe*(Ue*Te)**2 + fi*(Ti*Ui)**2 ) /( fe*(Ue*Te)**2/(Te+Tm) + fi*(Ti*Ui)**2/(Ti+Tm) )
TvN = Tv*Gl/Cm
| return muV, sV+1e-12, muGn, TvN
def mean_and_var_conductance(Fe, Fi, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !!
fi = Fi*gei*pconnec*Ntot
return Qe*Te*fe, Qi*Ti*fi, Qe*np.sqrt(Te*fe/2.), Qi*np.sqrt(Ti*fi/2.)
### FUNCTION, INVERSE FUNCTION
# @numba.jit()
def erfc_func(muV, sV, TvN, Vthre, Gl, Cm):
return .5/TvN*Gl/Cm*(sp_spec.erfc((Vthre-muV)/np.sqrt(2)/sV))
# @numba.jit()
def effective_Vthre(Y, muV, sV, TvN, Gl, Cm):
Vthre_eff = muV+np.sqrt(2)*sV*sp_spec.erfcinv(\
Y*2.*TvN*Cm/Gl) # effective threshold
return Vthre_eff
# @numba.jit()
def threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
"""
setting by default to True the square
because when use by external modules, coeff[5:]=np.zeros(3)
in the case of a linear threshold
"""
muV0, DmuV0 = -60e-3,10e-3
sV0, DsV0 =4e-3, 6e-3
TvN0, DTvN0 = 0.5, 1.
return P0+P1*(muV-muV0)/DmuV0+\
P2*(sV-sV0)/DsV0+P3*(TvN-TvN0)/DTvN0+\
0*P4*np.log(muGn)+P5*((muV-muV0)/DmuV0)**2+\
P6*((sV-sV0)/DsV0)**2+P7*((TvN-TvN0)/DTvN0)**2+\
P8*(muV-muV0)/DmuV0*(sV-sV0)/DsV0+\
P9*(muV-muV0)/DmuV0*(TvN-TvN0)/DTvN0+\
P10*(sV-sV0)/DsV0*(TvN-TvN0)/DTvN0
# final transfer function template :
# @numba.jit()
def TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
if(hasattr(fe, "__len__")):
fe[fe<1e-8]=1e-8
else:
if(fe<1e-8):
fe=1e-8
if(hasattr(fi, "__len__")):
fi[fi<1e-8]=1e-8
else:
if(fi<1e-8):
fi=1e-8
muV, sV, muGn, TvN = get_fluct_regime_varsup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
Vthre = threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
if(hasattr(muV, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
sV[sV<1e-4]=1e-4
else:
if(sV<1e-4):
sV=1e-4
Fout_th = erfc_func(muV, sV, TvN, Vthre, Gl, Cm)
if(hasattr(Fout_th, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
Fout_th[Fout_th<1e-8]=1e-8
else:
if(Fout_th<1e-8):
Fout_th=1e-8
'''
if(El<-0.063):
if(hasattr(Fout_th, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
Fout_th[Fout_th>80.]=175
else:
if(Fout_th>80.):
print("Done")
Fout_th=175
'''
#print 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',fe,fi,muV, sV, TvN,Fout_th
return Fout_th
def gaussian(x, mu, sig):
return (1/(sig*np.sqrt(2*3.1415)))*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def TF_my_templateup_heterogeneity(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
def Phet(k):
locale=gaussian(k,1.,0.2)*TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El*k, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
return locale
outhet, err = quad(Phet, 0.1, 5)
return outhet
# @numba.jit()
def make_loop(t, nu, vm, nu_aff_exc, nu_aff_inh, BIN,\
Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
dt = t[1]-t[0]
# constructing the Euler method for the activity rate
for i_t in range(len(t)-1): # loop over time
fe = (nu_aff_exc[i_t]+nu[i_t]+Fdrive) # afferent+recurrent excitation
fi = nu[i_t]+nu_aff_inh[i_t] # recurrent inhibition
W[i_t+1] = W[i_t] + dt/Tw*(b*nu[i_t]*Tw - W[i_t])
nu[i_t+1] = nu[i_t] +\
dt/BIN*(\
TF_my_template(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, | random_line_split | |
theoretical_tools.py | fi*(Ti*Ui)**2/2./(Ti+Tm))
fe, fi = fe+1e-9, fi+1e-9 # just to insure a non zero division,
Tv = ( fe*(Ue*Te)**2 + fi*(Ti*Ui)**2 ) /( fe*(Ue*Te)**2/(Te+Tm) + fi*(Ti*Ui)**2/(Ti+Tm) )
TvN = Tv*Gl/Cm
return muV, sV+1e-12, muGn, TvN
def mean_and_var_conductance(Fe, Fi, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !!
fi = Fi*gei*pconnec*Ntot
return Qe*Te*fe, Qi*Ti*fi, Qe*np.sqrt(Te*fe/2.), Qi*np.sqrt(Ti*fi/2.)
### FUNCTION, INVERSE FUNCTION
# @numba.jit()
def erfc_func(muV, sV, TvN, Vthre, Gl, Cm):
return .5/TvN*Gl/Cm*(sp_spec.erfc((Vthre-muV)/np.sqrt(2)/sV))
# @numba.jit()
def effective_Vthre(Y, muV, sV, TvN, Gl, Cm):
Vthre_eff = muV+np.sqrt(2)*sV*sp_spec.erfcinv(\
Y*2.*TvN*Cm/Gl) # effective threshold
return Vthre_eff
# @numba.jit()
def | (muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
"""
setting by default to True the square
because when use by external modules, coeff[5:]=np.zeros(3)
in the case of a linear threshold
"""
muV0, DmuV0 = -60e-3,10e-3
sV0, DsV0 =4e-3, 6e-3
TvN0, DTvN0 = 0.5, 1.
return P0+P1*(muV-muV0)/DmuV0+\
P2*(sV-sV0)/DsV0+P3*(TvN-TvN0)/DTvN0+\
0*P4*np.log(muGn)+P5*((muV-muV0)/DmuV0)**2+\
P6*((sV-sV0)/DsV0)**2+P7*((TvN-TvN0)/DTvN0)**2+\
P8*(muV-muV0)/DmuV0*(sV-sV0)/DsV0+\
P9*(muV-muV0)/DmuV0*(TvN-TvN0)/DTvN0+\
P10*(sV-sV0)/DsV0*(TvN-TvN0)/DTvN0
# final transfer function template :
# @numba.jit()
def TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
if(hasattr(fe, "__len__")):
fe[fe<1e-8]=1e-8
else:
if(fe<1e-8):
fe=1e-8
if(hasattr(fi, "__len__")):
fi[fi<1e-8]=1e-8
else:
if(fi<1e-8):
fi=1e-8
muV, sV, muGn, TvN = get_fluct_regime_varsup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
Vthre = threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
if(hasattr(muV, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
sV[sV<1e-4]=1e-4
else:
if(sV<1e-4):
sV=1e-4
Fout_th = erfc_func(muV, sV, TvN, Vthre, Gl, Cm)
if(hasattr(Fout_th, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
Fout_th[Fout_th<1e-8]=1e-8
else:
if(Fout_th<1e-8):
Fout_th=1e-8
'''
if(El<-0.063):
if(hasattr(Fout_th, "__len__")):
#print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
Fout_th[Fout_th>80.]=175
else:
if(Fout_th>80.):
print("Done")
Fout_th=175
'''
#print 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',fe,fi,muV, sV, TvN,Fout_th
return Fout_th
def gaussian(x, mu, sig):
return (1/(sig*np.sqrt(2*3.1415)))*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def TF_my_templateup_heterogeneity(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
# here TOTAL (sum over synapses) excitatory and inhibitory input
def Phet(k):
locale=gaussian(k,1.,0.2)*TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El*k, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)
return locale
outhet, err = quad(Phet, 0.1, 5)
return outhet
# @numba.jit()
def make_loop(t, nu, vm, nu_aff_exc, nu_aff_inh, BIN,\
Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10):
dt = t[1]-t[0]
# constructing the Euler method for the activity rate
for i_t in range(len(t)-1): # loop over time
fe = (nu_aff_exc[i_t]+nu[i_t]+Fdrive) # afferent+recurrent excitation
fi = nu[i_t]+nu_aff_inh[i_t] # recurrent inhibition
W[i_t+1] = W[i_t] + dt/Tw*(b*nu[i_t]*Tw - W[i_t])
nu[i_t+1] = nu[i_t] +\
dt/BIN*(\
TF_my_template(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, | threshold_func | identifier_name |
Home.js | text == 'object') {
objs.push(text)
return objs
}
let openBrace = -1
for(let i=0; i<text.length; i++) {
if(text[i] == '{' && openBrace == -1) {
openBrace = i
}
else if(text[i] == '}' && openBrace != -1) {
const subText = text.substring(openBrace, i+1)
// console.log(openBrace, i, subText)
let o
try {o = JSON.parse(subText)} catch(e){}
// console.log('o', o)
if(o) {
objs.push(o)
openBrace = -1
}
}
}
// const matches = text.match(/\{[\s\S]*\}/i) //match all whitespace and non white space chars
// if(matches) {
// const jsonText = matches[0]
// try {textJson = JSON.parse(jsonText)} catch(e){}
// }
return objs
}
class Home extends React.Component {
constructor(props) {
super(props)
this.checkScroll = this.checkScroll.bind(this)
this.onScroll = this.onScroll.bind(this)
this.onClick = this.onClick.bind(this)
this.onWheel = this.onWheel.bind(this)
this.onKeyDown = this.onKeyDown.bind(this)
}
componentDidUpdate(prevProps, prevState) {
this.checkScroll()
}
checkScroll() {
if(this.props.shouldScrollBottom) {
const ele = ReactDOM.findDOMNode(this.refs.trailingDiv)
if(ele)
ele.scrollIntoView({behavior: "smooth"})
}
}
onScroll(e) {
}
onClick(e) {
this.checkScroll()
//these delays trigger after the tree expands, can probably be improved upon by adding an expand listener to the tree object
setTimeout(this.checkScroll, 200)
setTimeout(this.checkScroll, 500)
}
onWheel(e) {
const ele = e.currentTarget
const height = ele.getBoundingClientRect().height
const atBottom = ((ele.scrollTop + height) - ele.scrollHeight) > 10
if(this.props.shouldScrollBottom != atBottom)
this.props.actions.set({'shouldScrollBottom': atBottom})
}
onKeyDown(e) {
// console.log('key', e.key, e.keyCode)
if(e.key == ' ' || e.keyCode == 32) {
this.props.actions.set({'shouldScrollBottom': !this.props.shouldScrollBottom})
}
}
render () {
const {filters, filteredTraces, actions, shouldScrollBottom, showingFilters, shouldReconnect, userCount, whiteSpace, jsonExpandLevel} = this.props
return (
<div className='home'>
<div className='filters'>
<div className='filters-controls'>
<button className='btn btn-default' onClick={() => actions.set({showingFilters:!showingFilters})}>
<i className={'fa' + (showingFilters?' fa-chevron-up':' fa-chevron-down')}/>
</button>
<button className='btn btn-default' onClick={() => actions.clearTraces()}>
<i className='fa fa-ban'/>
</button>
{shouldScrollBottom
? <button className='btn btn-info' onClick={() => actions.set({'shouldScrollBottom': false})}><i className='fa fa-hand-o-down'/></button>
: <button className='btn btn-danger' onClick={() => actions.set({'shouldScrollBottom': true})}><i className='fa fa-hand-paper-o'/></button>
}
{whiteSpace=='pre' && <button className='btn btn-default' onClick={() => actions.set({'whiteSpace': 'pre-wrap'})}><i className='fa fa-indent'/></button>}
{whiteSpace=='pre-wrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'nowrap'})}><i className='fa fa-align-left'/></button>}
{whiteSpace=='nowrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'pre'})}><i className='fa fa-list'/></button>}
<button className='btn btn-default' onClick={() => actions.set({'jsonExpandLevel': jsonExpandLevel>=3?-1:jsonExpandLevel+1})}><i className='fa fa-level-down'/>{jsonExpandLevel}</button>
<input type='text' className='form-control'
value={this.props.searchText}
onChange={e => actions.setSearchText(e.target.value)}/>
{this.props.isSocketConnected
? <button className='btn btn-default' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-link'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button>
: <button className='btn btn-danger' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-unlink'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button>
}
<button className='btn btn-default'><i className='fa fa-user'/>{' ' + (userCount==-1 ? '-' : userCount)}</button>
<button className='btn btn-default' onClick={() => actions.createFilter()}>
<i className='fa fa-plus'/>
</button>
</div>
{showingFilters && filters.map((filter, i) => {
const mod = (filter) => actions.setFilter(i, filter)
return (
<div key={i} className='filter'>
{/*Filter On/Off*/}
{filter.get('isActive')
? <button className='btn btn-default' onClick={() => mod(filter.set('isActive', false))}><i className='fa fa-circle'/></button>
: <button className='btn btn-danger' onClick={() => mod(filter.set('isActive', true))}><i className='fa fa-circle-o'/></button>
}
{/*Query Level*/}
{[null,'v','d','i','w','e'].map((level, i, a) => {
if(filter.get('queryLevel') != level)
return
const nextLevel = i==a.length-1 ? a[0] : a[i+1]
return <button className={'btn' + (level?' btn-info':' btn-default')} key={level}
onClick={() => mod(filter.set('queryLevel', nextLevel))}>
{level ? level.toUpperCase() : '--'}
</button>
})}
{/*Query Mode*/}
{filter.get('queryMode') == 'contains' &&
<button className="btn btn-default" title='Plain'
onClick={() => mod(filter.set('queryMode', 'regex'))}>
P
</button>
}
{filter.get('queryMode') == 'regex' &&
<button className="btn btn-info" title='Regex'
onClick={() => mod(filter.set('queryMode', 'bundle'))}>
R
</button>
}
{filter.get('queryMode') == 'bundle' &&
<button className="btn btn-info" title='Bundle'
onClick={() => mod(filter.set('queryMode', 'contains'))}>
B
</button>
}
{/*Query*/}
<input className='form-control' type='text' value={filter.get('query') || ''}
onChange={e => mod(filter.set('query', e.target.value))}
/>
{/*Visibility*/}
{filter.get('show') == true &&
<button className="btn btn-info" onClick={() => mod(filter.set('show', false))}>S</button>
}
{filter.get('show') == false &&
<button className="btn btn-info" onClick={() => mod(filter.set('show', null))}>H</button>
}
{filter.get('show') == null &&
<button className="btn btn-default" onClick={() => mod(filter.set('show', true))}>--</button>
}
{/*Styles*/}
<input className='form-control style-control' type='text' value={filter.getIn(['style', 'color']) || ''}
onChange={e => mod(filter.setIn(['style', 'color'], e.target.value))}
/>
<input className='form-control style-control' type='text' value={filter.getIn(['style', 'background']) || ''}
onChange={e => mod(filter.setIn(['style', 'background'], e.target.value))}
/>
{/*Order*/}
<button className='btn btn-default' onClick={() => actions.swapFilters(i, i-1)}>
<i className='fa fa-arrow-up'/>
</button>
<button className='btn btn-default' onClick={() => actions.swapFilters(i, i+1)}>
<i className='fa fa-arrow-down'/>
</button>
{/*Remove*/}
<button className='btn btn-default' onClick={() => actions.removeFilter(i)}>
<i className='fa fa-remove'/>
</button>
</div>
)
})}
</div>
<div className='traces' onScroll={this.onScroll} onClick={this.onClick} onWheel={this.onWheel} onKeyDown={this.onKeyDown}>
{filteredTraces.map((trace, i, list) => {
const style = trace.get('style') ? trace.get('style').toJS() : {}
const timestamp = moment(trace.get('instant')).format('HH:mm:ss')//.format('YYYY-MM-DD HH:mm:ss')
const bundle = trace.get('bundle') | const level = trace.get('level')
const text = trace.get('text')
// console.log(trace) | random_line_split | |
Home.js | f4bf75',
base0B: '#a6e22e',
base0C: '#a1efe4',
base0D: '#66d9ef',
base0E: '#ae81ff',
base0F: '#cc6633'
}
function parseObjects(text) {
const objs = []
if(!text)
return objs
if(typeof text == 'object') {
objs.push(text)
return objs
}
let openBrace = -1
for(let i=0; i<text.length; i++) {
if(text[i] == '{' && openBrace == -1) {
openBrace = i
}
else if(text[i] == '}' && openBrace != -1) {
const subText = text.substring(openBrace, i+1)
// console.log(openBrace, i, subText)
let o
try {o = JSON.parse(subText)} catch(e){}
// console.log('o', o)
if(o) {
objs.push(o)
openBrace = -1
}
}
}
// const matches = text.match(/\{[\s\S]*\}/i) //match all whitespace and non white space chars
// if(matches) {
// const jsonText = matches[0]
// try {textJson = JSON.parse(jsonText)} catch(e){}
// }
return objs
}
class Home extends React.Component {
constructor(props) {
super(props)
this.checkScroll = this.checkScroll.bind(this)
this.onScroll = this.onScroll.bind(this)
this.onClick = this.onClick.bind(this)
this.onWheel = this.onWheel.bind(this)
this.onKeyDown = this.onKeyDown.bind(this)
}
componentDidUpdate(prevProps, prevState) {
this.checkScroll()
}
checkScroll() |
onScroll(e) {
}
onClick(e) {
this.checkScroll()
//these delays trigger after the tree expands, can probably be improved upon by adding an expand listener to the tree object
setTimeout(this.checkScroll, 200)
setTimeout(this.checkScroll, 500)
}
onWheel(e) {
const ele = e.currentTarget
const height = ele.getBoundingClientRect().height
const atBottom = ((ele.scrollTop + height) - ele.scrollHeight) > 10
if(this.props.shouldScrollBottom != atBottom)
this.props.actions.set({'shouldScrollBottom': atBottom})
}
onKeyDown(e) {
// console.log('key', e.key, e.keyCode)
if(e.key == ' ' || e.keyCode == 32) {
this.props.actions.set({'shouldScrollBottom': !this.props.shouldScrollBottom})
}
}
render () {
const {filters, filteredTraces, actions, shouldScrollBottom, showingFilters, shouldReconnect, userCount, whiteSpace, jsonExpandLevel} = this.props
return (
<div className='home'>
<div className='filters'>
<div className='filters-controls'>
<button className='btn btn-default' onClick={() => actions.set({showingFilters:!showingFilters})}>
<i className={'fa' + (showingFilters?' fa-chevron-up':' fa-chevron-down')}/>
</button>
<button className='btn btn-default' onClick={() => actions.clearTraces()}>
<i className='fa fa-ban'/>
</button>
{shouldScrollBottom
? <button className='btn btn-info' onClick={() => actions.set({'shouldScrollBottom': false})}><i className='fa fa-hand-o-down'/></button>
: <button className='btn btn-danger' onClick={() => actions.set({'shouldScrollBottom': true})}><i className='fa fa-hand-paper-o'/></button>
}
{whiteSpace=='pre' && <button className='btn btn-default' onClick={() => actions.set({'whiteSpace': 'pre-wrap'})}><i className='fa fa-indent'/></button>}
{whiteSpace=='pre-wrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'nowrap'})}><i className='fa fa-align-left'/></button>}
{whiteSpace=='nowrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'pre'})}><i className='fa fa-list'/></button>}
<button className='btn btn-default' onClick={() => actions.set({'jsonExpandLevel': jsonExpandLevel>=3?-1:jsonExpandLevel+1})}><i className='fa fa-level-down'/>{jsonExpandLevel}</button>
<input type='text' className='form-control'
value={this.props.searchText}
onChange={e => actions.setSearchText(e.target.value)}/>
{this.props.isSocketConnected
? <button className='btn btn-default' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-link'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button>
: <button className='btn btn-danger' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-unlink'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button>
}
<button className='btn btn-default'><i className='fa fa-user'/>{' ' + (userCount==-1 ? '-' : userCount)}</button>
<button className='btn btn-default' onClick={() => actions.createFilter()}>
<i className='fa fa-plus'/>
</button>
</div>
{showingFilters && filters.map((filter, i) => {
const mod = (filter) => actions.setFilter(i, filter)
return (
<div key={i} className='filter'>
{/*Filter On/Off*/}
{filter.get('isActive')
? <button className='btn btn-default' onClick={() => mod(filter.set('isActive', false))}><i className='fa fa-circle'/></button>
: <button className='btn btn-danger' onClick={() => mod(filter.set('isActive', true))}><i className='fa fa-circle-o'/></button>
}
{/*Query Level*/}
{[null,'v','d','i','w','e'].map((level, i, a) => {
if(filter.get('queryLevel') != level)
return
const nextLevel = i==a.length-1 ? a[0] : a[i+1]
return <button className={'btn' + (level?' btn-info':' btn-default')} key={level}
onClick={() => mod(filter.set('queryLevel', nextLevel))}>
{level ? level.toUpperCase() : '--'}
</button>
})}
{/*Query Mode*/}
{filter.get('queryMode') == 'contains' &&
<button className="btn btn-default" title='Plain'
onClick={() => mod(filter.set('queryMode', 'regex'))}>
P
</button>
}
{filter.get('queryMode') == 'regex' &&
<button className="btn btn-info" title='Regex'
onClick={() => mod(filter.set('queryMode', 'bundle'))}>
R
</button>
}
{filter.get('queryMode') == 'bundle' &&
<button className="btn btn-info" title='Bundle'
onClick={() => mod(filter.set('queryMode', 'contains'))}>
B
</button>
}
{/*Query*/}
<input className='form-control' type='text' value={filter.get('query') || ''}
onChange={e => mod(filter.set('query', e.target.value))}
/>
{/*Visibility*/}
{filter.get('show') == true &&
<button className="btn btn-info" onClick={() => mod(filter.set('show', false))}>S</button>
}
{filter.get('show') == false &&
<button className="btn btn-info" onClick={() => mod(filter.set('show', null))}>H</button>
}
{filter.get('show') == null &&
<button className="btn btn-default" onClick={() => mod(filter.set('show', true))}>--</button>
}
{/*Styles*/}
<input className='form-control style-control' type='text' value={filter.getIn(['style', 'color']) || ''}
onChange={e => mod(filter.setIn(['style', 'color'], e.target.value))}
/>
<input className='form-control style-control' type='text' value={filter.getIn(['style', 'background']) || ''}
onChange={e => mod(filter.setIn(['style', 'background'], e.target.value))}
/>
{/*Order*/}
<button className='btn btn-default' onClick={() => actions.swapFilters(i, i-1)}>
<i className='fa fa-arrow-up'/>
</button>
<button className='btn btn-default' onClick={() => actions.swapFilters(i, i+1)}>
<i className='fa fa-arrow-down'/>
</button>
{/*Remove*/}
<button className='btn btn-default' onClick={() => actions.removeFilter(i)}>
<i className='fa fa-remove'/>
</button>
</div>
)
})}
</div>
<div className='traces' onScroll={this.onScroll} onClick={this.onClick} onWheel={this.onWheel} onKeyDown={this.onKeyDown}>
{filteredTr | {
if(this.props.shouldScrollBottom) {
const ele = ReactDOM.findDOMNode(this.refs.trailingDiv)
if(ele)
ele.scrollIntoView({behavior: "smooth"})
}
} | identifier_body |
Home.js | f4bf75',
base0B: '#a6e22e',
base0C: '#a1efe4',
base0D: '#66d9ef',
base0E: '#ae81ff',
base0F: '#cc6633'
}
function parseObjects(text) {
const objs = []
if(!text)
return objs
if(typeof text == 'object') {
objs.push(text)
return objs
}
let openBrace = -1
for(let i=0; i<text.length; i++) {
if(text[i] == '{' && openBrace == -1) {
openBrace = i
}
else if(text[i] == '}' && openBrace != -1) {
const subText = text.substring(openBrace, i+1)
// console.log(openBrace, i, subText)
let o
try {o = JSON.parse(subText)} catch(e){}
// console.log('o', o)
if(o) {
objs.push(o)
openBrace = -1
}
}
}
// const matches = text.match(/\{[\s\S]*\}/i) //match all whitespace and non white space chars
// if(matches) {
// const jsonText = matches[0]
// try {textJson = JSON.parse(jsonText)} catch(e){}
// }
return objs
}
class Home extends React.Component {
constructor(props) {
super(props)
this.checkScroll = this.checkScroll.bind(this)
this.onScroll = this.onScroll.bind(this)
this.onClick = this.onClick.bind(this)
this.onWheel = this.onWheel.bind(this)
this.onKeyDown = this.onKeyDown.bind(this)
}
| (prevProps, prevState) {
this.checkScroll()
}
checkScroll() {
if(this.props.shouldScrollBottom) {
const ele = ReactDOM.findDOMNode(this.refs.trailingDiv)
if(ele)
ele.scrollIntoView({behavior: "smooth"})
}
}
onScroll(e) {
}
onClick(e) {
this.checkScroll()
//these delays trigger after the tree expands, can probably be improved upon by adding an expand listener to the tree object
setTimeout(this.checkScroll, 200)
setTimeout(this.checkScroll, 500)
}
onWheel(e) {
const ele = e.currentTarget
const height = ele.getBoundingClientRect().height
const atBottom = ((ele.scrollTop + height) - ele.scrollHeight) > 10
if(this.props.shouldScrollBottom != atBottom)
this.props.actions.set({'shouldScrollBottom': atBottom})
}
onKeyDown(e) {
// console.log('key', e.key, e.keyCode)
if(e.key == ' ' || e.keyCode == 32) {
this.props.actions.set({'shouldScrollBottom': !this.props.shouldScrollBottom})
}
}
render () {
const {filters, filteredTraces, actions, shouldScrollBottom, showingFilters, shouldReconnect, userCount, whiteSpace, jsonExpandLevel} = this.props
return (
<div className='home'>
<div className='filters'>
<div className='filters-controls'>
<button className='btn btn-default' onClick={() => actions.set({showingFilters:!showingFilters})}>
<i className={'fa' + (showingFilters?' fa-chevron-up':' fa-chevron-down')}/>
</button>
<button className='btn btn-default' onClick={() => actions.clearTraces()}>
<i className='fa fa-ban'/>
</button>
{shouldScrollBottom
? <button className='btn btn-info' onClick={() => actions.set({'shouldScrollBottom': false})}><i className='fa fa-hand-o-down'/></button>
: <button className='btn btn-danger' onClick={() => actions.set({'shouldScrollBottom': true})}><i className='fa fa-hand-paper-o'/></button>
}
{whiteSpace=='pre' && <button className='btn btn-default' onClick={() => actions.set({'whiteSpace': 'pre-wrap'})}><i className='fa fa-indent'/></button>}
{whiteSpace=='pre-wrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'nowrap'})}><i className='fa fa-align-left'/></button>}
{whiteSpace=='nowrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'pre'})}><i className='fa fa-list'/></button>}
<button className='btn btn-default' onClick={() => actions.set({'jsonExpandLevel': jsonExpandLevel>=3?-1:jsonExpandLevel+1})}><i className='fa fa-level-down'/>{jsonExpandLevel}</button>
<input type='text' className='form-control'
value={this.props.searchText}
onChange={e => actions.setSearchText(e.target.value)}/>
{this.props.isSocketConnected
? <button className='btn btn-default' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-link'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button>
: <button className='btn btn-danger' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-unlink'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button>
}
<button className='btn btn-default'><i className='fa fa-user'/>{' ' + (userCount==-1 ? '-' : userCount)}</button>
<button className='btn btn-default' onClick={() => actions.createFilter()}>
<i className='fa fa-plus'/>
</button>
</div>
{showingFilters && filters.map((filter, i) => {
const mod = (filter) => actions.setFilter(i, filter)
return (
<div key={i} className='filter'>
{/*Filter On/Off*/}
{filter.get('isActive')
? <button className='btn btn-default' onClick={() => mod(filter.set('isActive', false))}><i className='fa fa-circle'/></button>
: <button className='btn btn-danger' onClick={() => mod(filter.set('isActive', true))}><i className='fa fa-circle-o'/></button>
}
{/*Query Level*/}
{[null,'v','d','i','w','e'].map((level, i, a) => {
if(filter.get('queryLevel') != level)
return
const nextLevel = i==a.length-1 ? a[0] : a[i+1]
return <button className={'btn' + (level?' btn-info':' btn-default')} key={level}
onClick={() => mod(filter.set('queryLevel', nextLevel))}>
{level ? level.toUpperCase() : '--'}
</button>
})}
{/*Query Mode*/}
{filter.get('queryMode') == 'contains' &&
<button className="btn btn-default" title='Plain'
onClick={() => mod(filter.set('queryMode', 'regex'))}>
P
</button>
}
{filter.get('queryMode') == 'regex' &&
<button className="btn btn-info" title='Regex'
onClick={() => mod(filter.set('queryMode', 'bundle'))}>
R
</button>
}
{filter.get('queryMode') == 'bundle' &&
<button className="btn btn-info" title='Bundle'
onClick={() => mod(filter.set('queryMode', 'contains'))}>
B
</button>
}
{/*Query*/}
<input className='form-control' type='text' value={filter.get('query') || ''}
onChange={e => mod(filter.set('query', e.target.value))}
/>
{/*Visibility*/}
{filter.get('show') == true &&
<button className="btn btn-info" onClick={() => mod(filter.set('show', false))}>S</button>
}
{filter.get('show') == false &&
<button className="btn btn-info" onClick={() => mod(filter.set('show', null))}>H</button>
}
{filter.get('show') == null &&
<button className="btn btn-default" onClick={() => mod(filter.set('show', true))}>--</button>
}
{/*Styles*/}
<input className='form-control style-control' type='text' value={filter.getIn(['style', 'color']) || ''}
onChange={e => mod(filter.setIn(['style', 'color'], e.target.value))}
/>
<input className='form-control style-control' type='text' value={filter.getIn(['style', 'background']) || ''}
onChange={e => mod(filter.setIn(['style', 'background'], e.target.value))}
/>
{/*Order*/}
<button className='btn btn-default' onClick={() => actions.swapFilters(i, i-1)}>
<i className='fa fa-arrow-up'/>
</button>
<button className='btn btn-default' onClick={() => actions.swapFilters(i, i+1)}>
<i className='fa fa-arrow-down'/>
</button>
{/*Remove*/}
<button className='btn btn-default' onClick={() => actions.removeFilter(i)}>
<i className='fa fa-remove'/>
</button>
</div>
)
})}
</div>
<div className='traces' onScroll={this.onScroll} onClick={this.onClick} onWheel={this.onWheel} onKeyDown={this.onKeyDown}>
{filteredTr | componentDidUpdate | identifier_name |
mc6845.rs | :
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
devices::mc6845.rs
Implementation of the Motorola MC6845 CRT controller.
Used internally by the MDA and CGA video cards.
*/
use crate::tracelogger::TraceLogger;
const CURSOR_LINE_MASK: u8 = 0b0000_1111;
const CURSOR_ATTR_MASK: u8 = 0b0011_0000;
const REGISTER_MAX: usize = 17;
const REGISTER_UNREADABLE_VALUE: u8 = 0x00;
#[derive (Copy, Clone, Debug)]
pub enum CrtcRegister {
HorizontalTotal,
HorizontalDisplayed,
HorizontalSyncPosition,
SyncWidth,
VerticalTotal,
VerticalTotalAdjust,
VerticalDisplayed,
VerticalSync,
InterlaceMode,
MaximumScanlineAddress,
CursorStartLine,
CursorEndLine,
StartAddressH,
StartAddressL,
CursorAddressH,
CursorAddressL,
LightPenPositionH,
LightPenPositionL,
}
use crate::mc6845::CrtcRegister::*;
macro_rules! trace {
($self:ident, $($t:tt)*) => {{
$self.trace_logger.print(&format!($($t)*));
$self.trace_logger.print("\n".to_string());
}};
}
macro_rules! trace_regs {
($self:ident) => {
$self.trace_logger.print(
&format!("")
/*
&format!(
"[SL:{:03} HCC:{:03} VCC:{:03} VT:{:03} VS:{:03}] ",
$self.scanline,
$self.hcc_c0,
$self.vcc_c4,
$self.crtc_vertical_total,
$self.crtc_vertical_sync_pos
)
*/
);
};
}
pub struct Crtc6845 {
reg: [u8; 18], // Externally-accessable CRTC register file
reg_select: CrtcRegister, // Selected CRTC register
start_address: u16, // Calculated value from R12 & R13
cursor_address: u16, // Calculated value from R14 & R15
lightpen_position: u16, // Calculated value from R16 & R17
cursor_status: bool,
cursor_start_line: u8,
cursor_slow_blink: bool,
cursor_blink_rate: f64,
display_enable: bool, // True if we are in counting in the display area, false otherwise
hcc_c0: u8, // Horizontal character counter (x pos of character)
vlc_c9: u8, // Vertical line counter - counts during vsync period
vcc_c4: u8, // Vertical character counter (y pos of character)
vsc_c3h: u8,
hsc_c3l: u8,
vtac_c5: u8,
vma: u16, // VMA register - Video memory address
vma_t: u16, // VMA' register - Video memory address temporary
trace_logger: TraceLogger,
}
impl Crtc6845 {
fn new(trace_logger: TraceLogger) -> Self {
Self {
reg: [0; 18],
reg_select: HorizontalTotal,
start_address: 0,
cursor_address: 0,
lightpen_position: 0,
cursor_status: false,
cursor_start_line: 0,
cursor_slow_blink: false,
cursor_blink_rate: 0.0,
display_enable: false,
hcc_c0: 0,
vlc_c9: 0,
vcc_c4: 0,
vsc_c3h: 0,
hsc_c3l: 0,
vtac_c5: 0,
vma: 0,
vma_t: 0,
trace_logger
}
}
pub fn select_register(&mut self, idx: usize) {
if idx > REGISTER_MAX {
return
}
let reg_select = match idx {
0 => HorizontalTotal,
1 => HorizontalDisplayed,
2 => HorizontalSyncPosition,
3 => SyncWidth,
4 => VerticalTotal,
5 => VerticalTotalAdjust,
6 => VerticalDisplayed,
7 => VerticalSync,
8 => InterlaceMode,
9 => MaximumScanlineAddress,
10 => CursorStartLine,
11 => CursorEndLine,
12 => StartAddressH,
13 => StartAddressL,
14 => CursorAddressH,
15 => CursorAddressL,
16 => LightPenPositionH,
_ => LightPenPositionL,
};
}
pub fn write_register(&mut self, byte: u8) {
match self.reg_select {
CrtcRegister::HorizontalTotal => {
// (R0) 8 bit write only
self.reg[0] = byte;
},
CrtcRegister::HorizontalDisplayed => {
// (R1) 8 bit write only
self.reg[1] = byte;
}
CrtcRegister::HorizontalSyncPosition => {
// (R2) 8 bit write only
self.reg[2] = byte;
},
CrtcRegister::SyncWidth => {
// (R3) 8 bit write only
self.reg[3] = byte;
},
CrtcRegister::VerticalTotal => {
// (R4) 7 bit write only
self.reg[4] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (04h): VerticalTotal updated: {}",
self.reg[4]
)
},
CrtcRegister::VerticalTotalAdjust => {
// (R5) 5 bit write only
self.reg[5] = byte & 0x1F;
}
CrtcRegister::VerticalDisplayed => {
// (R6) 7 bit write only
self.reg[6] = byte & 0x7F;
},
CrtcRegister::VerticalSync => {
// (R7) 7 bit write only
self.reg[7] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (07h): VerticalSync updated: {}",
self.reg[7]
)
},
CrtcRegister::InterlaceMode => {
// (R8) 2 bit write only
self.reg[8] = byte & 0x03;
},
CrtcRegister::MaximumScanlineAddress => {
// (R9) 5 bit write only
self.reg[9] = byte & 0x1F;
}
CrtcRegister::CursorStartLine => {
// (R10) 7 bit bitfield. Write only.
self.reg[10] = byte & 0x7F;
self.cursor_start_line = byte & CURSOR_LINE_MASK;
match byte & CURSOR_ATTR_MASK >> 4 {
0b00 | 0b10 => {
self.cursor_status = true;
self.cursor_slow_blink = false;
}
0b01 => {
self.cursor_status = false;
self.cursor_slow_blink = false;
}
_ => {
self.cursor_status = true;
self.cursor_slow_blink = true;
}
}
}
CrtcRegister::CursorEndLine => {
// (R11) 5 bit write only
self.reg[11] = byte & 0x1F;
}
CrtcRegister::StartAddressH => {
// (R12) 6 bit write only
self.reg[12] = byte & 0x3F;
trace_regs!(self);
trace!(
self, | self.update_start_address();
}
CrtcRegister::StartAddressL => {
// (R13) 8 bit write only
self.reg[13] = byte;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Dh): StartAddressL updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::CursorAddressH => {
// (R14) 6 bit read/write | "CRTC Register Write (0Ch): StartAddressH updated: {:02X}",
byte
); | random_line_split |
mc6845.rs | :
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
devices::mc6845.rs
Implementation of the Motorola MC6845 CRT controller.
Used internally by the MDA and CGA video cards.
*/
use crate::tracelogger::TraceLogger;
const CURSOR_LINE_MASK: u8 = 0b0000_1111;
const CURSOR_ATTR_MASK: u8 = 0b0011_0000;
const REGISTER_MAX: usize = 17;
const REGISTER_UNREADABLE_VALUE: u8 = 0x00;
#[derive (Copy, Clone, Debug)]
pub enum CrtcRegister {
HorizontalTotal,
HorizontalDisplayed,
HorizontalSyncPosition,
SyncWidth,
VerticalTotal,
VerticalTotalAdjust,
VerticalDisplayed,
VerticalSync,
InterlaceMode,
MaximumScanlineAddress,
CursorStartLine,
CursorEndLine,
StartAddressH,
StartAddressL,
CursorAddressH,
CursorAddressL,
LightPenPositionH,
LightPenPositionL,
}
use crate::mc6845::CrtcRegister::*;
macro_rules! trace {
($self:ident, $($t:tt)*) => {{
$self.trace_logger.print(&format!($($t)*));
$self.trace_logger.print("\n".to_string());
}};
}
macro_rules! trace_regs {
($self:ident) => {
$self.trace_logger.print(
&format!("")
/*
&format!(
"[SL:{:03} HCC:{:03} VCC:{:03} VT:{:03} VS:{:03}] ",
$self.scanline,
$self.hcc_c0,
$self.vcc_c4,
$self.crtc_vertical_total,
$self.crtc_vertical_sync_pos
)
*/
);
};
}
pub struct Crtc6845 | reg: [u8; 18], // Externally-accessable CRTC register file
reg_select: CrtcRegister, // Selected CRTC register
start_address: u16, // Calculated value from R12 & R13
cursor_address: u16, // Calculated value from R14 & R15
lightpen_position: u16, // Calculated value from R16 & R17
cursor_status: bool,
cursor_start_line: u8,
cursor_slow_blink: bool,
cursor_blink_rate: f64,
display_enable: bool, // True if we are in counting in the display area, false otherwise
hcc_c0: u8, // Horizontal character counter (x pos of character)
vlc_c9: u8, // Vertical line counter - counts during vsync period
vcc_c4: u8, // Vertical character counter (y pos of character)
vsc_c3h: u8,
hsc_c3l: u8,
vtac_c5: u8,
vma: u16, // VMA register - Video memory address
vma_t: u16, // VMA' register - Video memory address temporary
trace_logger: TraceLogger,
}
impl Crtc6845 {
fn new(trace_logger: TraceLogger) -> Self {
Self {
reg: [0; 18],
reg_select: HorizontalTotal,
start_address: 0,
cursor_address: 0,
lightpen_position: 0,
cursor_status: false,
cursor_start_line: 0,
cursor_slow_blink: false,
cursor_blink_rate: 0.0,
display_enable: false,
hcc_c0: 0,
vlc_c9: 0,
vcc_c4: 0,
vsc_c3h: 0,
hsc_c3l: 0,
vtac_c5: 0,
vma: 0,
vma_t: 0,
trace_logger
}
}
pub fn select_register(&mut self, idx: usize) {
if idx > REGISTER_MAX {
return
}
let reg_select = match idx {
0 => HorizontalTotal,
1 => HorizontalDisplayed,
2 => HorizontalSyncPosition,
3 => SyncWidth,
4 => VerticalTotal,
5 => VerticalTotalAdjust,
6 => VerticalDisplayed,
7 => VerticalSync,
8 => InterlaceMode,
9 => MaximumScanlineAddress,
10 => CursorStartLine,
11 => CursorEndLine,
12 => StartAddressH,
13 => StartAddressL,
14 => CursorAddressH,
15 => CursorAddressL,
16 => LightPenPositionH,
_ => LightPenPositionL,
};
}
pub fn write_register(&mut self, byte: u8) {
match self.reg_select {
CrtcRegister::HorizontalTotal => {
// (R0) 8 bit write only
self.reg[0] = byte;
},
CrtcRegister::HorizontalDisplayed => {
// (R1) 8 bit write only
self.reg[1] = byte;
}
CrtcRegister::HorizontalSyncPosition => {
// (R2) 8 bit write only
self.reg[2] = byte;
},
CrtcRegister::SyncWidth => {
// (R3) 8 bit write only
self.reg[3] = byte;
},
CrtcRegister::VerticalTotal => {
// (R4) 7 bit write only
self.reg[4] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (04h): VerticalTotal updated: {}",
self.reg[4]
)
},
CrtcRegister::VerticalTotalAdjust => {
// (R5) 5 bit write only
self.reg[5] = byte & 0x1F;
}
CrtcRegister::VerticalDisplayed => {
// (R6) 7 bit write only
self.reg[6] = byte & 0x7F;
},
CrtcRegister::VerticalSync => {
// (R7) 7 bit write only
self.reg[7] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (07h): VerticalSync updated: {}",
self.reg[7]
)
},
CrtcRegister::InterlaceMode => {
// (R8) 2 bit write only
self.reg[8] = byte & 0x03;
},
CrtcRegister::MaximumScanlineAddress => {
// (R9) 5 bit write only
self.reg[9] = byte & 0x1F;
}
CrtcRegister::CursorStartLine => {
// (R10) 7 bit bitfield. Write only.
self.reg[10] = byte & 0x7F;
self.cursor_start_line = byte & CURSOR_LINE_MASK;
match byte & CURSOR_ATTR_MASK >> 4 {
0b00 | 0b10 => {
self.cursor_status = true;
self.cursor_slow_blink = false;
}
0b01 => {
self.cursor_status = false;
self.cursor_slow_blink = false;
}
_ => {
self.cursor_status = true;
self.cursor_slow_blink = true;
}
}
}
CrtcRegister::CursorEndLine => {
// (R11) 5 bit write only
self.reg[11] = byte & 0x1F;
}
CrtcRegister::StartAddressH => {
// (R12) 6 bit write only
self.reg[12] = byte & 0x3F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Ch): StartAddressH updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::StartAddressL => {
// (R13) 8 bit write only
self.reg[13] = byte;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Dh): StartAddressL updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::CursorAddressH => {
// (R14) 6 bit read/write | {
| identifier_name |
mc6845.rs | The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
devices::mc6845.rs
Implementation of the Motorola MC6845 CRT controller.
Used internally by the MDA and CGA video cards.
*/
use crate::tracelogger::TraceLogger;
const CURSOR_LINE_MASK: u8 = 0b0000_1111;
const CURSOR_ATTR_MASK: u8 = 0b0011_0000;
const REGISTER_MAX: usize = 17;
const REGISTER_UNREADABLE_VALUE: u8 = 0x00;
#[derive (Copy, Clone, Debug)]
pub enum CrtcRegister {
HorizontalTotal,
HorizontalDisplayed,
HorizontalSyncPosition,
SyncWidth,
VerticalTotal,
VerticalTotalAdjust,
VerticalDisplayed,
VerticalSync,
InterlaceMode,
MaximumScanlineAddress,
CursorStartLine,
CursorEndLine,
StartAddressH,
StartAddressL,
CursorAddressH,
CursorAddressL,
LightPenPositionH,
LightPenPositionL,
}
use crate::mc6845::CrtcRegister::*;
macro_rules! trace {
($self:ident, $($t:tt)*) => {{
$self.trace_logger.print(&format!($($t)*));
$self.trace_logger.print("\n".to_string());
}};
}
macro_rules! trace_regs {
($self:ident) => {
$self.trace_logger.print(
&format!("")
/*
&format!(
"[SL:{:03} HCC:{:03} VCC:{:03} VT:{:03} VS:{:03}] ",
$self.scanline,
$self.hcc_c0,
$self.vcc_c4,
$self.crtc_vertical_total,
$self.crtc_vertical_sync_pos
)
*/
);
};
}
pub struct Crtc6845 {
reg: [u8; 18], // Externally-accessable CRTC register file
reg_select: CrtcRegister, // Selected CRTC register
start_address: u16, // Calculated value from R12 & R13
cursor_address: u16, // Calculated value from R14 & R15
lightpen_position: u16, // Calculated value from R16 & R17
cursor_status: bool,
cursor_start_line: u8,
cursor_slow_blink: bool,
cursor_blink_rate: f64,
display_enable: bool, // True if we are in counting in the display area, false otherwise
hcc_c0: u8, // Horizontal character counter (x pos of character)
vlc_c9: u8, // Vertical line counter - counts during vsync period
vcc_c4: u8, // Vertical character counter (y pos of character)
vsc_c3h: u8,
hsc_c3l: u8,
vtac_c5: u8,
vma: u16, // VMA register - Video memory address
vma_t: u16, // VMA' register - Video memory address temporary
trace_logger: TraceLogger,
}
impl Crtc6845 {
fn new(trace_logger: TraceLogger) -> Self {
Self {
reg: [0; 18],
reg_select: HorizontalTotal,
start_address: 0,
cursor_address: 0,
lightpen_position: 0,
cursor_status: false,
cursor_start_line: 0,
cursor_slow_blink: false,
cursor_blink_rate: 0.0,
display_enable: false,
hcc_c0: 0,
vlc_c9: 0,
vcc_c4: 0,
vsc_c3h: 0,
hsc_c3l: 0,
vtac_c5: 0,
vma: 0,
vma_t: 0,
trace_logger
}
}
pub fn select_register(&mut self, idx: usize) {
if idx > REGISTER_MAX {
return
}
let reg_select = match idx {
0 => HorizontalTotal,
1 => HorizontalDisplayed,
2 => HorizontalSyncPosition,
3 => SyncWidth,
4 => VerticalTotal,
5 => VerticalTotalAdjust,
6 => VerticalDisplayed,
7 => VerticalSync,
8 => InterlaceMode,
9 => MaximumScanlineAddress,
10 => CursorStartLine,
11 => CursorEndLine,
12 => StartAddressH,
13 => StartAddressL,
14 => CursorAddressH,
15 => CursorAddressL,
16 => LightPenPositionH,
_ => LightPenPositionL,
};
}
pub fn write_register(&mut self, byte: u8) {
|
trace_regs!(self);
trace!(
self,
"CRTC Register Write (04h): VerticalTotal updated: {}",
self.reg[4]
)
},
CrtcRegister::VerticalTotalAdjust => {
// (R5) 5 bit write only
self.reg[5] = byte & 0x1F;
}
CrtcRegister::VerticalDisplayed => {
// (R6) 7 bit write only
self.reg[6] = byte & 0x7F;
},
CrtcRegister::VerticalSync => {
// (R7) 7 bit write only
self.reg[7] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (07h): VerticalSync updated: {}",
self.reg[7]
)
},
CrtcRegister::InterlaceMode => {
// (R8) 2 bit write only
self.reg[8] = byte & 0x03;
},
CrtcRegister::MaximumScanlineAddress => {
// (R9) 5 bit write only
self.reg[9] = byte & 0x1F;
}
CrtcRegister::CursorStartLine => {
// (R10) 7 bit bitfield. Write only.
self.reg[10] = byte & 0x7F;
self.cursor_start_line = byte & CURSOR_LINE_MASK;
match byte & CURSOR_ATTR_MASK >> 4 {
0b00 | 0b10 => {
self.cursor_status = true;
self.cursor_slow_blink = false;
}
0b01 => {
self.cursor_status = false;
self.cursor_slow_blink = false;
}
_ => {
self.cursor_status = true;
self.cursor_slow_blink = true;
}
}
}
CrtcRegister::CursorEndLine => {
// (R11) 5 bit write only
self.reg[11] = byte & 0x1F;
}
CrtcRegister::StartAddressH => {
// (R12) 6 bit write only
self.reg[12] = byte & 0x3F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Ch): StartAddressH updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::StartAddressL => {
// (R13) 8 bit write only
self.reg[13] = byte;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Dh): StartAddressL updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::CursorAddressH => {
// (R14) 6 bit read | match self.reg_select {
CrtcRegister::HorizontalTotal => {
// (R0) 8 bit write only
self.reg[0] = byte;
},
CrtcRegister::HorizontalDisplayed => {
// (R1) 8 bit write only
self.reg[1] = byte;
}
CrtcRegister::HorizontalSyncPosition => {
// (R2) 8 bit write only
self.reg[2] = byte;
},
CrtcRegister::SyncWidth => {
// (R3) 8 bit write only
self.reg[3] = byte;
},
CrtcRegister::VerticalTotal => {
// (R4) 7 bit write only
self.reg[4] = byte & 0x7F; | identifier_body |
UnFlowLoss.py | (x)
x_floor = x1.clamp(0, W - 1)
y1 = torch.floor(y)
y_floor = y1.clamp(0, H - 1)
x0 = x1 + 1
x_ceil = x0.clamp(0, W - 1)
y0 = y1 + 1
y_ceil = y0.clamp(0, H - 1)
x_ceil_out = x0 != x_ceil
y_ceil_out = y0 != y_ceil
x_floor_out = x1 != x_floor
y_floor_out = y1 != y_floor
invalid = torch.cat([x_ceil_out | y_ceil_out,
x_ceil_out | y_floor_out,
x_floor_out | y_ceil_out,
x_floor_out | y_floor_out], dim=1)
# encode coordinates, since the scatter function can only index along one axis
corresponding_map = torch.zeros(B, H * W).type_as(data)
indices = torch.cat([x_ceil + y_ceil * W,
x_ceil + y_floor * W,
x_floor + y_ceil * W,
x_floor + y_floor * W], 1).long() # BxN (N=4*H*W)
values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))],
1)
# values = torch.ones_like(values)
values[invalid] = 0
corresponding_map.scatter_add_(1, indices, values)
# decode coordinates
corresponding_map = corresponding_map.view(B, H, W)
return corresponding_map.unsqueeze(1)
def flow_warp(image, flow12, pad='border', mode='bilinear'):
'''
Warps an image given a flow prediction using grid_sample
'''
batch_sz, _, height, width = image.size()
base_grid = mesh_grid(batch_sz, height, width).type_as(image) # B2HW
v_grid = norm_grid(base_grid + flow12) # BHW2
im1_recons = nn.functional.grid_sample(image, v_grid, mode=mode, padding_mode=pad,
align_corners=False)
return im1_recons
def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5):
'''
Get an occlusion mask using both flows such that they match each other
'''
flow21_warped = flow_warp(flow21, flow12, pad='zeros')
flow12_diff = flow12 + flow21_warped
mag = (flow12 * flow12).sum(1, keepdim=True) + \
(flow21_warped * flow21_warped).sum(1, keepdim=True)
occ_thresh = scale * mag + bias
occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh
return occ.float()
def get_occu_mask_backward(flow21, theta=0.2):
'''
Get an occlusion mask using backward propagation
'''
B, _, H, W = flow21.size()
base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW
corr_map = get_corresponding_map(base_grid + flow21) # BHW
occu_mask = corr_map.clamp(min=0., max=1.) < theta
return occu_mask.float()
# Credit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py
def TernaryLoss(im, im_warp, max_distance=1):
patch_size = 2 * max_distance + 1
def _rgb_to_grayscale(image):
grayscale = image[:, 0, :, :] * 0.2989 + \
image[:, 1, :, :] * 0.5870 + \
image[:, 2, :, :] * 0.1140
return grayscale.unsqueeze(1)
def _ternary_transform(image):
intensities = _rgb_to_grayscale(image) * 255
out_channels = patch_size * patch_size
w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size))
weights = w.type_as(im)
patches = F.conv2d(intensities, weights, padding=max_distance)
transf = patches - intensities
transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2))
return transf_norm
def _hamming_distance(t1, t2):
dist = torch.pow(t1 - t2, 2)
dist_norm = dist / (0.1 + dist)
dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum
return dist_mean
def _valid_mask(t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
t1 = _ternary_transform(im)
t2 = _ternary_transform(im_warp)
dist = _hamming_distance(t1, t2)
mask = _valid_mask(im, max_distance)
return dist * mask
def gradient(data):
|
def smooth_grad_1st(flow, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flow)
loss_x = weights_x * dx.abs() / 2.
loss_y = weights_y * dy.abs() / 2
return (loss_x.mean() + loss_y.mean()) / 2.
def smooth_grad_2nd(flow, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flow)
dx2, _ = gradient(dx)
_, dy2 = gradient(dy)
loss_x = weights_x[:, :, :, 1:] * dx2.abs()
loss_y = weights_y[:, :, 1:, :] * dy2.abs()
return (loss_x.mean() + loss_y.mean()) / 2.
class unFlowLoss(nn.modules.Module):
"""
Loss function adopted by ARFlow from originally Unflow.
"""
def __init__(self, weight=1.0, weights=None, consistency=True, back_occ_only=False, **kwargs):
super().__init__()
self.weight = weight
if "l1" in weights:
self.l1_weight = weights["l1"]
if "ssim" in weights:
self.ssim_weight = weights["ssim"]
self.SSIM = SSIM().to("cuda" if torch.cuda.is_available() else "cpu")
if "ternary" in weights:
self.ternary_weight = weights["ternary"]
if 'smooth' in kwargs:
self.smooth_args = kwargs['smooth']
else:
self.smooth_args = {"degree": 2, "alpha" : 0.2, "weighting": 75.0}
self.smooth_w = 75.0 if 'smooth_w' not in kwargs else kwargs['smooth_w']
if 'w_sm_scales' in kwargs:
self.w_sm_scales = kwargs['w_sm_scales']
else:
self.w_sm_scales = [1.0, 0.0, 0.0, 0.0, 0.0]
if 'w_wrp_scales' in kwargs:
self.w_wrp_scales = kwargs['w_wrp_scales']
else:
self.w_wrp_scales = [1.0, 1.0, 1.0, 1.0, 0.0]
self.consistency = consistency
self.back_occ_only = back_occ_only
def loss_photometric(self, im_orig: torch.Tensor,
im_recons: torch.Tensor, occu_mask: torch.Tensor):
loss = []
if occu_mask.mean() == 0:
occu_mask = torch.ones_like(occu_mask)
if hasattr(self, 'l1_weight'):
loss += [self.l1_weight * (im_orig - im_recons | D_dy = data[:, :, 1:] - data[:, :, :-1]
D_dx = data[:, :, :, 1:] - data[:, :, :, :-1]
return D_dx, D_dy | identifier_body |
UnFlowLoss.py | (x)
x_floor = x1.clamp(0, W - 1)
y1 = torch.floor(y)
y_floor = y1.clamp(0, H - 1)
x0 = x1 + 1
x_ceil = x0.clamp(0, W - 1)
y0 = y1 + 1
y_ceil = y0.clamp(0, H - 1)
x_ceil_out = x0 != x_ceil
y_ceil_out = y0 != y_ceil
x_floor_out = x1 != x_floor
y_floor_out = y1 != y_floor
invalid = torch.cat([x_ceil_out | y_ceil_out,
x_ceil_out | y_floor_out,
x_floor_out | y_ceil_out,
x_floor_out | y_floor_out], dim=1)
# encode coordinates, since the scatter function can only index along one axis
corresponding_map = torch.zeros(B, H * W).type_as(data)
indices = torch.cat([x_ceil + y_ceil * W,
x_ceil + y_floor * W,
x_floor + y_ceil * W,
x_floor + y_floor * W], 1).long() # BxN (N=4*H*W)
values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))],
1)
# values = torch.ones_like(values)
values[invalid] = 0
corresponding_map.scatter_add_(1, indices, values)
# decode coordinates
corresponding_map = corresponding_map.view(B, H, W)
return corresponding_map.unsqueeze(1)
def flow_warp(image, flow12, pad='border', mode='bilinear'):
'''
Warps an image given a flow prediction using grid_sample
'''
batch_sz, _, height, width = image.size()
base_grid = mesh_grid(batch_sz, height, width).type_as(image) # B2HW
v_grid = norm_grid(base_grid + flow12) # BHW2
im1_recons = nn.functional.grid_sample(image, v_grid, mode=mode, padding_mode=pad,
align_corners=False)
return im1_recons
def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5):
'''
Get an occlusion mask using both flows such that they match each other
'''
flow21_warped = flow_warp(flow21, flow12, pad='zeros')
flow12_diff = flow12 + flow21_warped
mag = (flow12 * flow12).sum(1, keepdim=True) + \
(flow21_warped * flow21_warped).sum(1, keepdim=True)
occ_thresh = scale * mag + bias
occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh
return occ.float()
def get_occu_mask_backward(flow21, theta=0.2):
'''
Get an occlusion mask using backward propagation
'''
B, _, H, W = flow21.size()
base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW
corr_map = get_corresponding_map(base_grid + flow21) # BHW
occu_mask = corr_map.clamp(min=0., max=1.) < theta
return occu_mask.float()
# Credit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py
def TernaryLoss(im, im_warp, max_distance=1):
patch_size = 2 * max_distance + 1
def _rgb_to_grayscale(image):
grayscale = image[:, 0, :, :] * 0.2989 + \
image[:, 1, :, :] * 0.5870 + \
image[:, 2, :, :] * 0.1140
return grayscale.unsqueeze(1)
def _ternary_transform(image):
intensities = _rgb_to_grayscale(image) * 255
out_channels = patch_size * patch_size
w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size))
weights = w.type_as(im)
patches = F.conv2d(intensities, weights, padding=max_distance)
transf = patches - intensities
transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2))
return transf_norm
def _hamming_distance(t1, t2):
dist = torch.pow(t1 - t2, 2)
dist_norm = dist / (0.1 + dist)
dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum
return dist_mean
def _valid_mask(t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
t1 = _ternary_transform(im)
t2 = _ternary_transform(im_warp)
dist = _hamming_distance(t1, t2)
mask = _valid_mask(im, max_distance)
return dist * mask
def gradient(data):
D_dy = data[:, :, 1:] - data[:, :, :-1]
D_dx = data[:, :, :, 1:] - data[:, :, :, :-1]
return D_dx, D_dy
def smooth_grad_1st(flow, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flow)
loss_x = weights_x * dx.abs() / 2.
loss_y = weights_y * dy.abs() / 2
return (loss_x.mean() + loss_y.mean()) / 2.
def smooth_grad_2nd(flow, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flow)
dx2, _ = gradient(dx)
_, dy2 = gradient(dy)
loss_x = weights_x[:, :, :, 1:] * dx2.abs()
loss_y = weights_y[:, :, 1:, :] * dy2.abs()
return (loss_x.mean() + loss_y.mean()) / 2.
class unFlowLoss(nn.modules.Module):
"""
Loss function adopted by ARFlow from originally Unflow.
"""
def __init__(self, weight=1.0, weights=None, consistency=True, back_occ_only=False, **kwargs):
super().__init__()
self.weight = weight
if "l1" in weights:
self.l1_weight = weights["l1"]
if "ssim" in weights:
self.ssim_weight = weights["ssim"]
self.SSIM = SSIM().to("cuda" if torch.cuda.is_available() else "cpu")
if "ternary" in weights:
self.ternary_weight = weights["ternary"]
if 'smooth' in kwargs:
self.smooth_args = kwargs['smooth']
else:
|
self.smooth_w = 75.0 if 'smooth_w' not in kwargs else kwargs['smooth_w']
if 'w_sm_scales' in kwargs:
self.w_sm_scales = kwargs['w_sm_scales']
else:
self.w_sm_scales = [1.0, 0.0, 0.0, 0.0, 0.0]
if 'w_wrp_scales' in kwargs:
self.w_wrp_scales = kwargs['w_wrp_scales']
else:
self.w_wrp_scales = [1.0, 1.0, 1.0, 1.0, 0.0]
self.consistency = consistency
self.back_occ_only = back_occ_only
def loss_photometric(self, im_orig: torch.Tensor,
im_recons: torch.Tensor, occu_mask: torch.Tensor):
loss = []
if occu_mask.mean() == 0:
occu_mask = torch.ones_like(occu_mask)
if hasattr(self, 'l1_weight'):
loss += [self.l1_weight * (im_orig - im_re | self.smooth_args = {"degree": 2, "alpha" : 0.2, "weighting": 75.0} | conditional_block |
UnFlowLoss.py | (x)
x_floor = x1.clamp(0, W - 1)
y1 = torch.floor(y)
y_floor = y1.clamp(0, H - 1)
x0 = x1 + 1
x_ceil = x0.clamp(0, W - 1)
y0 = y1 + 1
y_ceil = y0.clamp(0, H - 1)
x_ceil_out = x0 != x_ceil
y_ceil_out = y0 != y_ceil
x_floor_out = x1 != x_floor
y_floor_out = y1 != y_floor
invalid = torch.cat([x_ceil_out | y_ceil_out,
x_ceil_out | y_floor_out,
x_floor_out | y_ceil_out,
x_floor_out | y_floor_out], dim=1)
# encode coordinates, since the scatter function can only index along one axis
corresponding_map = torch.zeros(B, H * W).type_as(data)
indices = torch.cat([x_ceil + y_ceil * W,
x_ceil + y_floor * W,
x_floor + y_ceil * W,
x_floor + y_floor * W], 1).long() # BxN (N=4*H*W)
values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))],
1)
# values = torch.ones_like(values)
values[invalid] = 0
corresponding_map.scatter_add_(1, indices, values)
# decode coordinates
corresponding_map = corresponding_map.view(B, H, W)
return corresponding_map.unsqueeze(1)
def | (image, flow12, pad='border', mode='bilinear'):
'''
Warps an image given a flow prediction using grid_sample
'''
batch_sz, _, height, width = image.size()
base_grid = mesh_grid(batch_sz, height, width).type_as(image) # B2HW
v_grid = norm_grid(base_grid + flow12) # BHW2
im1_recons = nn.functional.grid_sample(image, v_grid, mode=mode, padding_mode=pad,
align_corners=False)
return im1_recons
def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5):
'''
Get an occlusion mask using both flows such that they match each other
'''
flow21_warped = flow_warp(flow21, flow12, pad='zeros')
flow12_diff = flow12 + flow21_warped
mag = (flow12 * flow12).sum(1, keepdim=True) + \
(flow21_warped * flow21_warped).sum(1, keepdim=True)
occ_thresh = scale * mag + bias
occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh
return occ.float()
def get_occu_mask_backward(flow21, theta=0.2):
'''
Get an occlusion mask using backward propagation
'''
B, _, H, W = flow21.size()
base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW
corr_map = get_corresponding_map(base_grid + flow21) # BHW
occu_mask = corr_map.clamp(min=0., max=1.) < theta
return occu_mask.float()
# Credit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py
def TernaryLoss(im, im_warp, max_distance=1):
patch_size = 2 * max_distance + 1
def _rgb_to_grayscale(image):
grayscale = image[:, 0, :, :] * 0.2989 + \
image[:, 1, :, :] * 0.5870 + \
image[:, 2, :, :] * 0.1140
return grayscale.unsqueeze(1)
def _ternary_transform(image):
intensities = _rgb_to_grayscale(image) * 255
out_channels = patch_size * patch_size
w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size))
weights = w.type_as(im)
patches = F.conv2d(intensities, weights, padding=max_distance)
transf = patches - intensities
transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2))
return transf_norm
def _hamming_distance(t1, t2):
dist = torch.pow(t1 - t2, 2)
dist_norm = dist / (0.1 + dist)
dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum
return dist_mean
def _valid_mask(t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
t1 = _ternary_transform(im)
t2 = _ternary_transform(im_warp)
dist = _hamming_distance(t1, t2)
mask = _valid_mask(im, max_distance)
return dist * mask
def gradient(data):
D_dy = data[:, :, 1:] - data[:, :, :-1]
D_dx = data[:, :, :, 1:] - data[:, :, :, :-1]
return D_dx, D_dy
def smooth_grad_1st(flow, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flow)
loss_x = weights_x * dx.abs() / 2.
loss_y = weights_y * dy.abs() / 2
return (loss_x.mean() + loss_y.mean()) / 2.
def smooth_grad_2nd(flow, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flow)
dx2, _ = gradient(dx)
_, dy2 = gradient(dy)
loss_x = weights_x[:, :, :, 1:] * dx2.abs()
loss_y = weights_y[:, :, 1:, :] * dy2.abs()
return (loss_x.mean() + loss_y.mean()) / 2.
class unFlowLoss(nn.modules.Module):
"""
Loss function adopted by ARFlow from originally Unflow.
"""
def __init__(self, weight=1.0, weights=None, consistency=True, back_occ_only=False, **kwargs):
super().__init__()
self.weight = weight
if "l1" in weights:
self.l1_weight = weights["l1"]
if "ssim" in weights:
self.ssim_weight = weights["ssim"]
self.SSIM = SSIM().to("cuda" if torch.cuda.is_available() else "cpu")
if "ternary" in weights:
self.ternary_weight = weights["ternary"]
if 'smooth' in kwargs:
self.smooth_args = kwargs['smooth']
else:
self.smooth_args = {"degree": 2, "alpha" : 0.2, "weighting": 75.0}
self.smooth_w = 75.0 if 'smooth_w' not in kwargs else kwargs['smooth_w']
if 'w_sm_scales' in kwargs:
self.w_sm_scales = kwargs['w_sm_scales']
else:
self.w_sm_scales = [1.0, 0.0, 0.0, 0.0, 0.0]
if 'w_wrp_scales' in kwargs:
self.w_wrp_scales = kwargs['w_wrp_scales']
else:
self.w_wrp_scales = [1.0, 1.0, 1.0, 1.0, 0.0]
self.consistency = consistency
self.back_occ_only = back_occ_only
def loss_photometric(self, im_orig: torch.Tensor,
im_recons: torch.Tensor, occu_mask: torch.Tensor):
loss = []
if occu_mask.mean() == 0:
occu_mask = torch.ones_like(occu_mask)
if hasattr(self, 'l1_weight'):
loss += [self.l1_weight * (im_orig - im_re | flow_warp | identifier_name |
UnFlowLoss.py | (x)
x_floor = x1.clamp(0, W - 1)
y1 = torch.floor(y)
y_floor = y1.clamp(0, H - 1)
x0 = x1 + 1
x_ceil = x0.clamp(0, W - 1)
y0 = y1 + 1
y_ceil = y0.clamp(0, H - 1)
x_ceil_out = x0 != x_ceil
y_ceil_out = y0 != y_ceil
x_floor_out = x1 != x_floor
y_floor_out = y1 != y_floor
invalid = torch.cat([x_ceil_out | y_ceil_out,
x_ceil_out | y_floor_out,
x_floor_out | y_ceil_out,
x_floor_out | y_floor_out], dim=1)
# encode coordinates, since the scatter function can only index along one axis
corresponding_map = torch.zeros(B, H * W).type_as(data)
indices = torch.cat([x_ceil + y_ceil * W,
x_ceil + y_floor * W,
x_floor + y_ceil * W,
x_floor + y_floor * W], 1).long() # BxN (N=4*H*W)
values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))],
1)
# values = torch.ones_like(values) |
corresponding_map.scatter_add_(1, indices, values)
# decode coordinates
corresponding_map = corresponding_map.view(B, H, W)
return corresponding_map.unsqueeze(1)
def flow_warp(image, flow12, pad='border', mode='bilinear'):
'''
Warps an image given a flow prediction using grid_sample
'''
batch_sz, _, height, width = image.size()
base_grid = mesh_grid(batch_sz, height, width).type_as(image) # B2HW
v_grid = norm_grid(base_grid + flow12) # BHW2
im1_recons = nn.functional.grid_sample(image, v_grid, mode=mode, padding_mode=pad,
align_corners=False)
return im1_recons
def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5):
'''
Get an occlusion mask using both flows such that they match each other
'''
flow21_warped = flow_warp(flow21, flow12, pad='zeros')
flow12_diff = flow12 + flow21_warped
mag = (flow12 * flow12).sum(1, keepdim=True) + \
(flow21_warped * flow21_warped).sum(1, keepdim=True)
occ_thresh = scale * mag + bias
occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh
return occ.float()
def get_occu_mask_backward(flow21, theta=0.2):
'''
Get an occlusion mask using backward propagation
'''
B, _, H, W = flow21.size()
base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW
corr_map = get_corresponding_map(base_grid + flow21) # BHW
occu_mask = corr_map.clamp(min=0., max=1.) < theta
return occu_mask.float()
# Credit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py
def TernaryLoss(im, im_warp, max_distance=1):
patch_size = 2 * max_distance + 1
def _rgb_to_grayscale(image):
grayscale = image[:, 0, :, :] * 0.2989 + \
image[:, 1, :, :] * 0.5870 + \
image[:, 2, :, :] * 0.1140
return grayscale.unsqueeze(1)
def _ternary_transform(image):
intensities = _rgb_to_grayscale(image) * 255
out_channels = patch_size * patch_size
w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size))
weights = w.type_as(im)
patches = F.conv2d(intensities, weights, padding=max_distance)
transf = patches - intensities
transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2))
return transf_norm
def _hamming_distance(t1, t2):
dist = torch.pow(t1 - t2, 2)
dist_norm = dist / (0.1 + dist)
dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum
return dist_mean
def _valid_mask(t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
t1 = _ternary_transform(im)
t2 = _ternary_transform(im_warp)
dist = _hamming_distance(t1, t2)
mask = _valid_mask(im, max_distance)
return dist * mask
def gradient(data):
D_dy = data[:, :, 1:] - data[:, :, :-1]
D_dx = data[:, :, :, 1:] - data[:, :, :, :-1]
return D_dx, D_dy
def smooth_grad_1st(flow, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flow)
loss_x = weights_x * dx.abs() / 2.
loss_y = weights_y * dy.abs() / 2
return (loss_x.mean() + loss_y.mean()) / 2.
def smooth_grad_2nd(flow, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flow)
dx2, _ = gradient(dx)
_, dy2 = gradient(dy)
loss_x = weights_x[:, :, :, 1:] * dx2.abs()
loss_y = weights_y[:, :, 1:, :] * dy2.abs()
return (loss_x.mean() + loss_y.mean()) / 2.
class unFlowLoss(nn.modules.Module):
"""
Loss function adopted by ARFlow from originally Unflow.
"""
def __init__(self, weight=1.0, weights=None, consistency=True, back_occ_only=False, **kwargs):
super().__init__()
self.weight = weight
if "l1" in weights:
self.l1_weight = weights["l1"]
if "ssim" in weights:
self.ssim_weight = weights["ssim"]
self.SSIM = SSIM().to("cuda" if torch.cuda.is_available() else "cpu")
if "ternary" in weights:
self.ternary_weight = weights["ternary"]
if 'smooth' in kwargs:
self.smooth_args = kwargs['smooth']
else:
self.smooth_args = {"degree": 2, "alpha" : 0.2, "weighting": 75.0}
self.smooth_w = 75.0 if 'smooth_w' not in kwargs else kwargs['smooth_w']
if 'w_sm_scales' in kwargs:
self.w_sm_scales = kwargs['w_sm_scales']
else:
self.w_sm_scales = [1.0, 0.0, 0.0, 0.0, 0.0]
if 'w_wrp_scales' in kwargs:
self.w_wrp_scales = kwargs['w_wrp_scales']
else:
self.w_wrp_scales = [1.0, 1.0, 1.0, 1.0, 0.0]
self.consistency = consistency
self.back_occ_only = back_occ_only
def loss_photometric(self, im_orig: torch.Tensor,
im_recons: torch.Tensor, occu_mask: torch.Tensor):
loss = []
if occu_mask.mean() == 0:
occu_mask = torch.ones_like(occu_mask)
if hasattr(self, 'l1_weight'):
loss += [self.l1_weight * (im_orig - im_re |
values[invalid] = 0 | random_line_split |
model.py | self.clear_groups()
# 再全部重新分类
for sample in self.samples:
to_group = self.groups.get(sample.target_value)
if to_group:
to_group.add_sample(sample)
def classify(self, iteration_callback, completion_callback):
self.iteration_callback = iteration_callback
self.completion_callback = completion_callback
self.iteration_times = 0
self.clear_groups()
self._training()
def predicate(self, features=[]):
# Dirctly output the target value by formula : yi = (W^T * xi + b) or (W^T * xi - b)
# 计算目标估值
target_value = -self.bias
for sample_x in self.samples:
if sample_x.alpha_value != 0:
# SUM ai * yi * K(Xi * x)
target_value += sample_x.alpha_value * sample_x.target_value * self.kernel.calculate(sample_x.features, features)
return self.sgn(target_value)
# 用于在预测输出时,将计算完的样本点目标值正规化成分类目标的 +1 / -1
def sgn(self, value=0.0):
return 1.0 if value >=0.0 else -1.0
'''
@ Private
'''
# 建立要分类的群
def _create_groups(self, targets=[]):
for target_value in targets:
self.groups[target_value] = Group(target_value)
def _training(self):
self.iteration_times += 1
waiting_samples = []
if self.examine_all == True:
waiting_samples = self._samples_without_kkt(self.split_index)
else:
waiting_samples = np.copy(self.samples).tolist()
self._start_to_update(waiting_samples)
def _complet |
self.classify_to_group() # 分类到所属群里
self.completion_callback(self.iteration_times, self.weights, self.bias, self.groups.values())
def _iteration(self):
if self.iteration_callback:
self.iteration_callback(self.iteration_times, self.weights, self.bias)
def _random_pick_index(self, avoid_index=0):
max = len(self.samples)
random_index = 0
# 整体样本数有2个,就直接选择另一个点来做
if max == 2:
random_index = (max - 1) - avoid_index
else:
# 整体样本有多个,就跑 Random Picking
random_index = np.random.random_integers(0, max-1)
if random_index == avoid_index:
random_index = self._random_pick_index(avoid_index)
return random_index
def _update_parameters(self, update_alphas=[]):
alphas_count = len(update_alphas)
# 如果 update_alphas 为空,代表完成本次迭代训练, 但所有Samples 都还未全部符合 KKT 条件
if alphas_count == 0:
return TrainingTypes.OneIterationFinished
self._calculate_error_value()
self.iteration_update_count += 1
# If we still have over 2 samples can do match-update task
if alphas_count > 1:
match_sample = update_alphas.pop(0) # Romoved the sample from array
self.split_index = self.samples.index(match_sample) +1
max_index = -1
max_error_value = -1.0
for index, other_sample in enumerate(self.samples):
# 找到误差距离绝对值最大的样本点
error_distance = abs(other_sample.error_value - match_sample.error_value)
if error_distance > max_error_value and index >= self.split_index:
max_error_value = error_distance
max_index = index
# If we successfully chose a sample
if max_index >= 0:
self.update_alpha(max_index, self.samples.index(match_sample))
# 单纯检查是否所有数据都符合 KKT 条件了 ? 还有不符合的就再递归跑本 function
if self._all_conform_kkt() == False:
if self.examine_all == True:
update_alphas = self._samples_without_kkt(self.split_index)
# 将其它不符合 KKT 条件的点都再重新进行更新 weights & bias 运算, 直至所有点都运算完毕, 才 return 完成 1 迭代
return self._update_parameters(update_alphas)
else:
# 更新完所有不符合 KKT 条件的点, 同时代表完成完整的 1 迭代运算就 return 完成
return TrainingTypes.AllConformedKKT
else:
# 挑 1 出来搭配,之后重新跑一次上次的运算
# 这里有 2 个挑选的方式
match_sample = update_alphas.pop(0)
if self.examine_all == True:
self.split_index = self.samples.index(match_sample) + 1
update_alphas = self._samples_without_kkt(self.split_index)
match_index = self.samples.index(match_sample)
self.update_alpha(self._random_pick_index(match_index), match_index)
return self._update_parameters(update_alphas)
# Default is failed.
return TrainingTypes.Failed
# Updating alpha and bias.
def update_alpha(self, main_index, match_index):
main = self.samples[main_index]
match = self.samples[match_index]
new_match_alpha = self._calculate_new_match_alpha(main, match)
new_main_alpha =self._calculate_new_main_alpha(main, match, new_match_alpha)
# Quickly updating the weights and bias by used 2 new alpha values
# 1). calculates the delta weights, Formula:
# delta main = (new alpha 1 - old alpha 1) * target1 * x1
# delta match = (new alpha 2 - old alpha 2) * target2 * x2
# delta weights = delta main + delta match
main_factor = (new_main_alpha - main.alpha_value) * main.target_value
delta_main = np.multiply(main.features, main_factor)
match_factor = (new_match_alpha - match.alpha_value) * match.target_value
delta_match = np.multiply(match.features, match_factor)
delta_weights = np.add(delta_main, delta_match)
# 2). let original weights + delta weights to be new weights array, Formula:
new_weights = np.add(self.weights, delta_weights) # 这里 new_weights 会是 numpy.ndarray
del self.weights[:]
self.weights = new_weights.tolist()
# 3). quickly updating bias via 2 samples (Main & Match), Formula:
# W: weights, X: sample features, b: bias, T: sample target value (+1 / -1)
# WX - b = T
# -> -b = T - WX
# b = WX -T
# 故 new bias = new weights * X - (+1 or -1)
# +1 或 -1 是看当前的 X 是被分到 +1 或者 -1 的标签(Target)
# 这里会有 2 个 new bias, 再去按照条件做挑选 1 个出来用。
# 以下有个更新 bias 的方法( New, Old):
# Linear method
# new_main_bias = np.dot(self.weights, main.features) - main.target_value
# new_match_bias = np.dot(self.weights, match.features) - match.target_value
# Old method
new_main_bias = self.bias + main.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, main.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, main.features))
new_match_bias = self.bias + match.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, match.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, match.features))
# 4). to choose the final bias or to get the average value of biases
self.samples[main_index].alpha_value = new_main_alpha
self.samples[match_index].alpha_value = new_match_alpha
new_bias = 0.0
if self._is_accept_alpha(new_main_alpha):
new_bias = new_main_bias
elif self._is_accept_alpha(new_match_alpha):
new_bias = new_match_bias
else:
new_bias = (new_main_bias + new_match_bias) * 0.5
# Update old bias
self.bias = new_bias
# 更新 Weights / Bias
def _start_to_update(self, waiting_samples=[]):
# if len(waiting_samples) == 0:
# self._completion()
# return
# 更新参数(权重与偏权)后,再判断是否需要停止迭代或要继续下一迭代的训练
training_result = self._update_parameters(waiting_samples)
self.split_index = 0
self.examine_all = True
# 完成 1 个迭代的运算
if training_result == TrainingTypes.OneIterationFinished:
# 先判断迭代是否达到上限
if self.iteration_times >= self.max_iteration:
| ion(self):
if self.completion_callback: | conditional_block |
model.py | ):
self.clear_groups()
# 再全部重新分类
for sample in self.samples:
to_group = self.groups.get(sample.target_value)
if to_group:
to_group.add_sample(sample)
def classify(self, iteration_callback, completion_callback):
self.iteration_callback = iteration_callback
self.completion_callback = completion_callback
self.iteration_times = 0
self.clear_groups()
self._training()
def predicate(self, features=[]):
# Dirctly output the target value by formula : yi = (W^T * xi + b) or (W^T * xi - b)
# 计算目标估值
target_value = -self.bias
for sample_x in self.samples:
if sample_x.alpha_value != 0:
# SUM ai * yi * K(Xi * x)
target_value += sample_x.alpha_value * sample_x.target_value * self.kernel.calculate(sample_x.features, features)
return self.sgn(target_value)
# 用于在预测输出时,将计算完的样本点目标值正规化成分类目标的 +1 / -1
def sgn(self, value=0.0):
return 1.0 if value >=0.0 else -1.0
'''
@ Private
'''
# 建立要分类的群
def _create_groups(self, targets=[]):
for target_value in targets:
self.groups[target_value] = Group(target_value)
def _training(self):
self.iteration_times += 1
waiting_samples = []
if self.examine_all == True:
waiting_samples = self._samples_without_kkt(self.split_index)
else:
waiting_samples = np.copy(self.samples).tolist()
self._start_to_update(waiting_samples)
def _completion(self):
if self.completion_callback:
self.classify_to_group() # 分类到所属群里
self.completion_callback(self.iteration_times, self.weights, self.bias, self.groups.values())
def _iteration(self):
if self.iteration_callback:
self.iteration_callback(self.iteration_times, self.weights, self.bias)
def _random_pick_index(self, avoid_index=0):
max = len(self.samples)
random_index = 0
# 整体样本数有2个,就直接选择另一个点来做
if max == 2:
random_index = (max - 1) - avoid_index
else:
# 整体样本有多个,就跑 Random Pi | random_index = np.random.random_integers(0, max-1)
if random_index == avoid_index:
random_index = self._random_pick_index(avoid_index)
return random_index
def _update_parameters(self, update_alphas=[]):
alphas_count = len(update_alphas)
# 如果 update_alphas 为空,代表完成本次迭代训练, 但所有Samples 都还未全部符合 KKT 条件
if alphas_count == 0:
return TrainingTypes.OneIterationFinished
self._calculate_error_value()
self.iteration_update_count += 1
# If we still have over 2 samples can do match-update task
if alphas_count > 1:
match_sample = update_alphas.pop(0) # Romoved the sample from array
self.split_index = self.samples.index(match_sample) +1
max_index = -1
max_error_value = -1.0
for index, other_sample in enumerate(self.samples):
# 找到误差距离绝对值最大的样本点
error_distance = abs(other_sample.error_value - match_sample.error_value)
if error_distance > max_error_value and index >= self.split_index:
max_error_value = error_distance
max_index = index
# If we successfully chose a sample
if max_index >= 0:
self.update_alpha(max_index, self.samples.index(match_sample))
# 单纯检查是否所有数据都符合 KKT 条件了 ? 还有不符合的就再递归跑本 function
if self._all_conform_kkt() == False:
if self.examine_all == True:
update_alphas = self._samples_without_kkt(self.split_index)
# 将其它不符合 KKT 条件的点都再重新进行更新 weights & bias 运算, 直至所有点都运算完毕, 才 return 完成 1 迭代
return self._update_parameters(update_alphas)
else:
# 更新完所有不符合 KKT 条件的点, 同时代表完成完整的 1 迭代运算就 return 完成
return TrainingTypes.AllConformedKKT
else:
# 挑 1 出来搭配,之后重新跑一次上次的运算
# 这里有 2 个挑选的方式
match_sample = update_alphas.pop(0)
if self.examine_all == True:
self.split_index = self.samples.index(match_sample) + 1
update_alphas = self._samples_without_kkt(self.split_index)
match_index = self.samples.index(match_sample)
self.update_alpha(self._random_pick_index(match_index), match_index)
return self._update_parameters(update_alphas)
# Default is failed.
return TrainingTypes.Failed
# Updating alpha and bias.
def update_alpha(self, main_index, match_index):
main = self.samples[main_index]
match = self.samples[match_index]
new_match_alpha = self._calculate_new_match_alpha(main, match)
new_main_alpha =self._calculate_new_main_alpha(main, match, new_match_alpha)
# Quickly updating the weights and bias by used 2 new alpha values
# 1). calculates the delta weights, Formula:
# delta main = (new alpha 1 - old alpha 1) * target1 * x1
# delta match = (new alpha 2 - old alpha 2) * target2 * x2
# delta weights = delta main + delta match
main_factor = (new_main_alpha - main.alpha_value) * main.target_value
delta_main = np.multiply(main.features, main_factor)
match_factor = (new_match_alpha - match.alpha_value) * match.target_value
delta_match = np.multiply(match.features, match_factor)
delta_weights = np.add(delta_main, delta_match)
# 2). let original weights + delta weights to be new weights array, Formula:
new_weights = np.add(self.weights, delta_weights) # 这里 new_weights 会是 numpy.ndarray
del self.weights[:]
self.weights = new_weights.tolist()
# 3). quickly updating bias via 2 samples (Main & Match), Formula:
# W: weights, X: sample features, b: bias, T: sample target value (+1 / -1)
# WX - b = T
# -> -b = T - WX
# b = WX -T
# 故 new bias = new weights * X - (+1 or -1)
# +1 或 -1 是看当前的 X 是被分到 +1 或者 -1 的标签(Target)
# 这里会有 2 个 new bias, 再去按照条件做挑选 1 个出来用。
# 以下有个更新 bias 的方法( New, Old):
# Linear method
# new_main_bias = np.dot(self.weights, main.features) - main.target_value
# new_match_bias = np.dot(self.weights, match.features) - match.target_value
# Old method
new_main_bias = self.bias + main.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, main.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, main.features))
new_match_bias = self.bias + match.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, match.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, match.features))
# 4). to choose the final bias or to get the average value of biases
self.samples[main_index].alpha_value = new_main_alpha
self.samples[match_index].alpha_value = new_match_alpha
new_bias = 0.0
if self._is_accept_alpha(new_main_alpha):
new_bias = new_main_bias
elif self._is_accept_alpha(new_match_alpha):
new_bias = new_match_bias
else:
new_bias = (new_main_bias + new_match_bias) * 0.5
# Update old bias
self.bias = new_bias
# 更新 Weights / Bias
def _start_to_update(self, waiting_samples=[]):
# if len(waiting_samples) == 0:
# self._completion()
# return
# 更新参数(权重与偏权)后,再判断是否需要停止迭代或要继续下一迭代的训练
training_result = self._update_parameters(waiting_samples)
self.split_index = 0
self.examine_all = True
# 完成 1 个迭代的运算
if training_result == TrainingTypes.OneIterationFinished:
# 先判断迭代是否达到上限
if self.iteration_times >= self.max_iteration:
| cking
| identifier_name |
model.py | ):
self.clear_groups()
# 再全部重新分类
for sample in self.samples:
to_group = self.groups.get(sample.target_value)
if to_group:
to_group.add_sample(sample)
def classify(self, iteration_callback, completion_callback):
self.iteration_callback = iteration_callback
self.completion_callback = completion_callback
self.iteration_times = 0
self.clear_groups()
self._training()
def predicate(self, features=[]):
# Dirctly output the target value by formula : yi = (W^T * xi + b) or (W^T * xi - b)
# 计算目标估值
target_value = -self.bias
for sample_x in self.samples:
if sample_x.alpha_value != 0:
# SUM ai * yi * K(Xi * x)
target_value += sample_x.alpha_value * sample_x.target_value * self.kernel.calculate(sample_x.features, features)
return self.sgn(target_value)
# 用于在预测输出时,将计算完的样本点目标值正规化成分类目标的 +1 / -1
def sgn(self, value=0.0):
return 1.0 if value >=0.0 else -1.0
'''
@ Private
'''
# 建立要分类的群
def _create_groups(self, targets=[]):
for target_value in targets:
self.groups[target_value] = Group(target_value)
def _training(self):
self.iteration_times += 1
waiting_samples = []
if self.examine_all == True:
waiting_samples = self._samples_without_kkt(self.split_index)
else:
waiting_samples = np.copy(self.samples).tolist()
self._start_to_update(waiting_samples)
def _completion(self):
if self.completion_callback:
self.classify_to_group() # 分类到所属群里
self.completion_callback(self.iteration_times, self.weights, self.bias, self.groups.values())
def _iteration(self):
if self.iteration_callback:
self.iteration_callback(self.iteration_times, self.weights, self.bias)
def _random_pick_index(self, avoid_index=0):
max = len(self.samples)
random_index = 0
# 整体样本数有2个,就直接选择另一个点来做
if max == 2:
random_index = (max - 1) - avoid_index
else:
# 整体样本有多个,就跑 Random Picking
random_index = np.random.random_integers(0, max-1)
if random_index == avoid_index:
random_index = self._random_pick_index(avoid_index)
return random_index
def _update_parameters(self, update_alphas=[]):
alphas_count = len(update_alphas)
# 如果 update_alphas 为空,代表完成本次迭代训练, 但所有Samples 都还未全部符合 KKT 条件
if alphas_count == 0:
return TrainingTypes.OneIterationFinished
self._calculate_error_value()
self.iteration_update_count += 1
# If we still have over 2 samples can do match-update task
if alphas_count > 1:
match_sample = update_alphas.pop(0) # Romoved the sample from array
self.split_index = self.samples.index(match_sample) +1
max_index = -1
max_error_value = -1.0
for index, other_sample in enumerate(self.samples):
# 找到误差距离绝对值最大的样本点
error_distance = abs(other_sample.error_value - match_sample.error_value)
if error_distance > max_error_value and index >= self.split_index:
max_error_value = error_distance
max_index = index
# If we successfully chose a sample
if max_index >= 0:
self.update_alpha(max_index, self.samples.index(match_sample))
# 单纯检查是否所有数据都符合 KKT 条件了 ? 还有不符合的就再递归跑本 function
if self._all_conform_kkt() == False:
if self.examine_all == True:
update_alphas = self._samples_without_kkt(self.split_index)
# 将其它不符合 KKT 条件的点都再重新进行更新 weights & bias 运算, 直至所有点都运算完毕, 才 return 完成 1 迭代
return self._update_parameters(update_alphas)
else:
# 更新完所有不符合 KKT 条件的点, 同时代表完成完整的 1 迭代运算就 return 完成
return TrainingTypes.AllConformedKKT
else:
# 挑 1 出来搭配,之后重新跑一次上次的运算
# 这里有 2 个挑选的方式
match_sample = update_alphas.pop(0)
if self.examine_all == True:
self.split_index = self.samples.index(match_sample) + 1
update_alphas = self._samples_without_kkt(self.split_index)
match_index = self.samples.index(match_sample)
self.update_alpha(self._random_pick_index(match_index), match_index)
return self._update_parameters(update_alphas)
# Default is failed.
return TrainingTypes.Failed
# Updating alpha and bias.
def update_alpha(self, main_index, match_index): |
# Quickly updating the weights and bias by used 2 new alpha values
# 1). calculates the delta weights, Formula:
# delta main = (new alpha 1 - old alpha 1) * target1 * x1
# delta match = (new alpha 2 - old alpha 2) * target2 * x2
# delta weights = delta main + delta match
main_factor = (new_main_alpha - main.alpha_value) * main.target_value
delta_main = np.multiply(main.features, main_factor)
match_factor = (new_match_alpha - match.alpha_value) * match.target_value
delta_match = np.multiply(match.features, match_factor)
delta_weights = np.add(delta_main, delta_match)
# 2). let original weights + delta weights to be new weights array, Formula:
new_weights = np.add(self.weights, delta_weights) # 这里 new_weights 会是 numpy.ndarray
del self.weights[:]
self.weights = new_weights.tolist()
# 3). quickly updating bias via 2 samples (Main & Match), Formula:
# W: weights, X: sample features, b: bias, T: sample target value (+1 / -1)
# WX - b = T
# -> -b = T - WX
# b = WX -T
# 故 new bias = new weights * X - (+1 or -1)
# +1 或 -1 是看当前的 X 是被分到 +1 或者 -1 的标签(Target)
# 这里会有 2 个 new bias, 再去按照条件做挑选 1 个出来用。
# 以下有个更新 bias 的方法( New, Old):
# Linear method
# new_main_bias = np.dot(self.weights, main.features) - main.target_value
# new_match_bias = np.dot(self.weights, match.features) - match.target_value
# Old method
new_main_bias = self.bias + main.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, main.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, main.features))
new_match_bias = self.bias + match.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, match.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, match.features))
# 4). to choose the final bias or to get the average value of biases
self.samples[main_index].alpha_value = new_main_alpha
self.samples[match_index].alpha_value = new_match_alpha
new_bias = 0.0
if self._is_accept_alpha(new_main_alpha):
new_bias = new_main_bias
elif self._is_accept_alpha(new_match_alpha):
new_bias = new_match_bias
else:
new_bias = (new_main_bias + new_match_bias) * 0.5
# Update old bias
self.bias = new_bias
# 更新 Weights / Bias
def _start_to_update(self, waiting_samples=[]):
# if len(waiting_samples) == 0:
# self._completion()
# return
# 更新参数(权重与偏权)后,再判断是否需要停止迭代或要继续下一迭代的训练
training_result = self._update_parameters(waiting_samples)
self.split_index = 0
self.examine_all = True
# 完成 1 个迭代的运算
if training_result == TrainingTypes.OneIterationFinished:
# 先判断迭代是否达到上限
if self.iteration_times >= self.max_iteration:
self | main = self.samples[main_index]
match = self.samples[match_index]
new_match_alpha = self._calculate_new_match_alpha(main, match)
new_main_alpha =self._calculate_new_main_alpha(main, match, new_match_alpha) | random_line_split |
model.py | del self.samples[:]
def clear_groups(self):
# 清空 group 里记录的 samples
for target, group in self.groups.items():
group.clear()
# 从每一个 Sample 的target value 来逐一判断该点是属于哪一群
def classify_to_group(self):
self.clear_groups()
# 再全部重新分类
for sample in self.samples:
to_group = self.groups.get(sample.target_value)
if to_group:
to_group.add_sample(sample)
def classify(self, iteration_callback, completion_callback):
self.iteration_callback = iteration_callback
self.completion_callback = completion_callback
self.iteration_times = 0
self.clear_groups()
self._training()
def predicate(self, features=[]):
# Dirctly output the target value by formula : yi = (W^T * xi + b) or (W^T * xi - b)
# 计算目标估值
target_value = -self.bias
for sample_x in self.samples:
if sample_x.alpha_value != 0:
# SUM ai * yi * K(Xi * x)
target_value += sample_x.alpha_value * sample_x.target_value * self.kernel.calculate(sample_x.features, features)
return self.sgn(target_value)
# 用于在预测输出时,将计算完的样本点目标值正规化成分类目标的 +1 / -1
def sgn(self, value=0.0):
return 1.0 if value >=0.0 else -1.0
'''
@ Private
'''
# 建立要分类的群
def _create_groups(self, targets=[]):
for target_value in targets:
self.groups[target_value] = Group(target_value)
def _training(self):
self.iteration_times += 1
waiting_samples = []
if self.examine_all == True:
waiting_samples = self._samples_without_kkt(self.split_index)
else:
waiting_samples = np.copy(self.samples).tolist()
self._start_to_update(waiting_samples)
def _completion(self):
if self.completion_callback:
self.classify_to_group() # 分类到所属群里
self.completion_callback(self.iteration_times, self.weights, self.bias, self.groups.values())
def _iteration(self):
if self.iteration_callback:
self.iteration_callback(self.iteration_times, self.weights, self.bias)
def _random_pick_index(self, avoid_index=0):
max = len(self.samples)
random_index = 0
# 整体样本数有2个,就直接选择另一个点来做
if max == 2:
random_index = (max - 1) - avoid_index
else:
# 整体样本有多个,就跑 Random Picking
random_index = np.random.random_integers(0, max-1)
if random_index == avoid_index:
random_index = self._random_pick_index(avoid_index)
return random_index
def _update_parameters(self, update_alphas=[]):
alphas_count = len(update_alphas)
# 如果 update_alphas 为空,代表完成本次迭代训练, 但所有Samples 都还未全部符合 KKT 条件
if alphas_count == 0:
return TrainingTypes.OneIterationFinished
self._calculate_error_value()
self.iteration_update_count += 1
# If we still have over 2 samples can do match-update task
if alphas_count > 1:
match_sample = update_alphas.pop(0) # Romoved the sample from array
self.split_index = self.samples.index(match_sample) +1
max_index = -1
max_error_value = -1.0
for index, other_sample in enumerate(self.samples):
# 找到误差距离绝对值最大的样本点
error_distance = abs(other_sample.error_value - match_sample.error_value)
if error_distance > max_error_value and index >= self.split_index:
max_error_value = error_distance
max_index = index
# If we successfully chose a sample
if max_index >= 0:
self.update_alpha(max_index, self.samples.index(match_sample))
# 单纯检查是否所有数据都符合 KKT 条件了 ? 还有不符合的就再递归跑本 function
if self._all_conform_kkt() == False:
if self.examine_all == True:
update_alphas = self._samples_without_kkt(self.split_index)
# 将其它不符合 KKT 条件的点都再重新进行更新 weights & bias 运算, 直至所有点都运算完毕, 才 return 完成 1 迭代
return self._update_parameters(update_alphas)
else:
# 更新完所有不符合 KKT 条件的点, 同时代表完成完整的 1 迭代运算就 return 完成
return TrainingTypes.AllConformedKKT
else:
# 挑 1 出来搭配,之后重新跑一次上次的运算
# 这里有 2 个挑选的方式
match_sample = update_alphas.pop(0)
if self.examine_all == True:
self.split_index = self.samples.index(match_sample) + 1
update_alphas = self._samples_without_kkt(self.split_index)
match_index = self.samples.index(match_sample)
self.update_alpha(self._random_pick_index(match_index), match_index)
return self._update_parameters(update_alphas)
# Default is failed.
return TrainingTypes.Failed
# Updating alpha and bias.
def update_alpha(self, main_index, match_index):
main = self.samples[main_index]
match = self.samples[match_index]
new_match_alpha = self._calculate_new_match_alpha(main, match)
new_main_alpha =self._calculate_new_main_alpha(main, match, new_match_alpha)
# Quickly updating the weights and bias by used 2 new alpha values
# 1). calculates the delta weights, Formula:
# delta main = (new alpha 1 - old alpha 1) * target1 * x1
# delta match = (new alpha 2 - old alpha 2) * target2 * x2
# delta weights = delta main + delta match
main_factor = (new_main_alpha - main.alpha_value) * main.target_value
delta_main = np.multiply(main.features, main_factor)
match_factor = (new_match_alpha - match.alpha_value) * match.target_value
delta_match = np.multiply(match.features, match_factor)
delta_weights = np.add(delta_main, delta_match)
# 2). let original weights + delta weights to be new weights array, Formula:
new_weights = np.add(self.weights, delta_weights) # 这里 new_weights 会是 numpy.ndarray
del self.weights[:]
self.weights = new_weights.tolist()
# 3). quickly updating bias via 2 samples (Main & Match), Formula:
# W: weights, X: sample features, b: bias, T: sample target value (+1 / -1)
# WX - b = T
# -> -b = T - WX
# b = WX -T
# 故 new bias = new weights * X - (+1 or -1)
# +1 或 -1 是看当前的 X 是被分到 +1 或者 -1 的标签(Target)
# 这里会有 2 个 new bias, 再去按照条件做挑选 1 个出来用。
# 以下有个更新 bias 的方法( New, Old):
# Linear method
# new_main_bias = np.dot(self.weights, main.features) - main.target_value
# new_match_bias = np.dot(self.weights, match.features) - match.target_value
# Old method
new_main_bias = self.bias + main.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, main.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, main.features))
new_match_bias = self.bias + match.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, match.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, match.features))
# 4). to choose the final bias or to get the average value of biases
self.samples[main_index].alpha_value = new_main_alpha
self.samples[match_index].alpha_value = new_match_alpha
new_bias = 0.0
if self._is_accept_alpha(new_main_alpha):
new_bias = new_main_bias
elif self._is_accept_alpha(new_match_alpha):
new_bias = new_match_bias
else:
new_bias = (new_main_bias + new_match_bias) * 0.5
# Update old bias
self.bias = new_bias
# 更新 Weights / Bias
def _start_to_update(self, waiting_samples=[]):
# if len(waiting_samples) == 0:
# self._completion()
# return
# 更新参数(权重与偏权 | self.weights[:]
for i in xrange(0, count):
self.weights.append(0.0)
def clear_samples(self):
| identifier_body | |
coeditor.rs | Constraints, Target, LifeCycle, LifeCycleCtx, Size};
use std::sync::Arc;
use tokio::sync::broadcast::{Sender};
use tokio::task::JoinHandle;
use parking_lot::RwLock;
use crate::{RustpadClient, Edit};
use std::time::Duration;
use crate::editor_binding::EditorBinding;
use crate::code_editor::code_editor::CodeEditor;
use crate::code_editor::text::{Selection, EditableText};
use tokio::sync::broadcast;
use std::collections::HashMap;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::connect_async;
use futures::StreamExt;
use log::{info, warn};
pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client");
pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit");
pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data");
fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> {
tokio::spawn(async move {
info!("connecting");
let conn = client.read().server_url.clone();
loop {
let x = Arc::clone(&client);
if try_connect(&conn, x, close_tx.clone()).await.is_none() {
break;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
warn!("Reconnecting ...");
}
})
}
async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> {
let url = url::Url::parse(connect_addr).unwrap();
let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>();
client.write().ws_sender = Some(ws_tx.clone());
client.write().users.clear();
//
let res = connect_async(url).await;
if res.is_err() {
eprintln!("{:?}", res.err().unwrap());
return Some(());
}
let (ws_stream, _) = res.unwrap();
println!("WebSocket handshake has been successfully completed");
client.read().on_connected.invoke(());
let (write, read) = ws_stream.split();
let websocket_tx = ws_rx.map(Ok).forward(write);
let client2 = Arc::clone(&client);
let receive_handler =
read.for_each(|message| async {
if message.is_err() |
let data = message.unwrap().to_string();
println!("Received: {}", &data);
client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed"));
});
client.write().send_info();
client.write().send_cursor_data();
if let Some(outstanding) = &client.read().outstanding {
client.write().send_operation(outstanding);
}
let mut close_rx = close_tx.subscribe();
tokio::select! {
_ = close_rx.recv() => {
ws_tx.unbounded_send(Message::Close(None)).unwrap();
println!("client closed.");
return None;
}
_ = websocket_tx => {}
_ = receive_handler => {
println!("server closed");
}
}
println!("{} disconnected", &connect_addr);
client.write().ws_sender = None;
Some(())
}
pub struct CoEditorWidget {
inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>,
id: WidgetId,
pub server_url: String,
client: Option<Arc<RwLock<RustpadClient>>>,
connection_handle: Option<JoinHandle<()>>,
event_sink: Option<ExtEventSink>,
close_tx: Sender<()>,
last_selection: Selection,
}
impl Drop for CoEditorWidget {
fn drop(&mut self) {
self.close_tx.send(()).unwrap();
futures::executor::block_on(
tokio::time::timeout(Duration::from_secs(5),
self.connection_handle.take().unwrap(),
)
);
println!("CoEditorWidget destructed");
}
}
impl CoEditorWidget {
pub fn new(server_url: String) -> Self {
println!("CoEditorWidget created");
CoEditorWidget {
inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()),
server_url,
id: WidgetId::next(),
client: None,
connection_handle: None,
event_sink: None,
close_tx: broadcast::channel(1).0,
last_selection: Selection::default(),
}
}
}
impl Widget<EditorBinding> for CoEditorWidget {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) {
if let Event::Command(cmd) = event {
println!("received {:?}", cmd);
}
match event {
Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
let client = command.get(COEDITOR_INIT_CLIENT).unwrap();
data.set_client(client);
println!("editor binding client initialized");
}
Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received edit command");
let edit = command.get(USER_EDIT_SELECTOR).unwrap();
let selection = self.inner.widget().text().borrow().selection();
let transform_selection = |selection: Selection| -> Selection {
let transform_index = |x: usize| -> usize {
if x < edit.begin {
x
} else if x > edit.end {
x + edit.begin + edit.content.len() - edit.end
} else {
edit.begin + edit.content.len()
}
};
Selection::new(
transform_index(selection.anchor),
transform_index(selection.active),
)
};
data.edit_without_callback(edit);
let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection));
self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut()
.for_each(|(_, b)| *b = transform_selection(b.clone()));
}
Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received cursor command");
let content = &data.content;
let unicode_offset_to_utf8_offset = |offset: u32| -> usize {
content.iter().take(offset as usize).collect::<String>().len()
};
let mut new_decorations = HashMap::new();
let my_id = self.client.as_ref().unwrap().read().id();
self.client.as_ref().unwrap().read().user_cursors.iter()
.filter(|(&id, _)| id != my_id)
.filter(|(_, data)| !data.selections.is_empty())
.for_each(|(&id, sel)| {
new_decorations.insert(id, Selection::new(
unicode_offset_to_utf8_offset(sel.selections[0].0),
unicode_offset_to_utf8_offset(sel.selections[0].1),
));
});
self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations;
}
_ => self.inner.event(ctx, event, data, env)
}
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) {
self.inner.lifecycle(ctx, event, data, env);
match event {
LifeCycle::WidgetAdded => {
self.id = ctx.widget_id();
println!("CoEditorWidget initialized with id: {:?}", self.id);
self.event_sink = Some(ctx.get_external_handle());
let client = RustpadClient::create(self.server_url.clone());
client.write().widget_id = Some(self.id);
client.write().set_event_sink(
self.event_sink.as_ref().unwrap().clone(),
self.id,
);
self.client = Some(Arc::clone(&client));
ctx.get_external_handle().submit_command(
COEDITOR_INIT_CLIENT,
Box::new(Arc::clone(&client)),
Target::Widget(self.id),
).expect("send command failed");
self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone()));
}
_ => {}
}
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) {
if old_data.after_edits.len() != data.after_edits.len() {
println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len());
}
let new_selection = self.inner.widget().text().borrow().selection();
if self.last_selection != new_selection {
self.last_selection = new_selection;
let borrow = self.inner.widget_mut().text_mut().borrow_mut();
let content = &borrow.layout.text().unwrap().content_as_string;
let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32;
let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32;
self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor));
}
self.inner.update(ctx, data, env);
}
fn layout(&mut | {
return;
} | conditional_block |
coeditor.rs | BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size};
use std::sync::Arc;
use tokio::sync::broadcast::{Sender};
use tokio::task::JoinHandle;
use parking_lot::RwLock;
use crate::{RustpadClient, Edit};
use std::time::Duration;
use crate::editor_binding::EditorBinding;
use crate::code_editor::code_editor::CodeEditor;
use crate::code_editor::text::{Selection, EditableText};
use tokio::sync::broadcast;
use std::collections::HashMap; | use futures::StreamExt;
use log::{info, warn};
pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client");
pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit");
pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data");
fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> {
tokio::spawn(async move {
info!("connecting");
let conn = client.read().server_url.clone();
loop {
let x = Arc::clone(&client);
if try_connect(&conn, x, close_tx.clone()).await.is_none() {
break;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
warn!("Reconnecting ...");
}
})
}
async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> {
let url = url::Url::parse(connect_addr).unwrap();
let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>();
client.write().ws_sender = Some(ws_tx.clone());
client.write().users.clear();
//
let res = connect_async(url).await;
if res.is_err() {
eprintln!("{:?}", res.err().unwrap());
return Some(());
}
let (ws_stream, _) = res.unwrap();
println!("WebSocket handshake has been successfully completed");
client.read().on_connected.invoke(());
let (write, read) = ws_stream.split();
let websocket_tx = ws_rx.map(Ok).forward(write);
let client2 = Arc::clone(&client);
let receive_handler =
read.for_each(|message| async {
if message.is_err() {
return;
}
let data = message.unwrap().to_string();
println!("Received: {}", &data);
client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed"));
});
client.write().send_info();
client.write().send_cursor_data();
if let Some(outstanding) = &client.read().outstanding {
client.write().send_operation(outstanding);
}
let mut close_rx = close_tx.subscribe();
tokio::select! {
_ = close_rx.recv() => {
ws_tx.unbounded_send(Message::Close(None)).unwrap();
println!("client closed.");
return None;
}
_ = websocket_tx => {}
_ = receive_handler => {
println!("server closed");
}
}
println!("{} disconnected", &connect_addr);
client.write().ws_sender = None;
Some(())
}
pub struct CoEditorWidget {
inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>,
id: WidgetId,
pub server_url: String,
client: Option<Arc<RwLock<RustpadClient>>>,
connection_handle: Option<JoinHandle<()>>,
event_sink: Option<ExtEventSink>,
close_tx: Sender<()>,
last_selection: Selection,
}
impl Drop for CoEditorWidget {
fn drop(&mut self) {
self.close_tx.send(()).unwrap();
futures::executor::block_on(
tokio::time::timeout(Duration::from_secs(5),
self.connection_handle.take().unwrap(),
)
);
println!("CoEditorWidget destructed");
}
}
impl CoEditorWidget {
pub fn new(server_url: String) -> Self {
println!("CoEditorWidget created");
CoEditorWidget {
inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()),
server_url,
id: WidgetId::next(),
client: None,
connection_handle: None,
event_sink: None,
close_tx: broadcast::channel(1).0,
last_selection: Selection::default(),
}
}
}
impl Widget<EditorBinding> for CoEditorWidget {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) {
if let Event::Command(cmd) = event {
println!("received {:?}", cmd);
}
match event {
Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
let client = command.get(COEDITOR_INIT_CLIENT).unwrap();
data.set_client(client);
println!("editor binding client initialized");
}
Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received edit command");
let edit = command.get(USER_EDIT_SELECTOR).unwrap();
let selection = self.inner.widget().text().borrow().selection();
let transform_selection = |selection: Selection| -> Selection {
let transform_index = |x: usize| -> usize {
if x < edit.begin {
x
} else if x > edit.end {
x + edit.begin + edit.content.len() - edit.end
} else {
edit.begin + edit.content.len()
}
};
Selection::new(
transform_index(selection.anchor),
transform_index(selection.active),
)
};
data.edit_without_callback(edit);
let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection));
self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut()
.for_each(|(_, b)| *b = transform_selection(b.clone()));
}
Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received cursor command");
let content = &data.content;
let unicode_offset_to_utf8_offset = |offset: u32| -> usize {
content.iter().take(offset as usize).collect::<String>().len()
};
let mut new_decorations = HashMap::new();
let my_id = self.client.as_ref().unwrap().read().id();
self.client.as_ref().unwrap().read().user_cursors.iter()
.filter(|(&id, _)| id != my_id)
.filter(|(_, data)| !data.selections.is_empty())
.for_each(|(&id, sel)| {
new_decorations.insert(id, Selection::new(
unicode_offset_to_utf8_offset(sel.selections[0].0),
unicode_offset_to_utf8_offset(sel.selections[0].1),
));
});
self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations;
}
_ => self.inner.event(ctx, event, data, env)
}
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) {
self.inner.lifecycle(ctx, event, data, env);
match event {
LifeCycle::WidgetAdded => {
self.id = ctx.widget_id();
println!("CoEditorWidget initialized with id: {:?}", self.id);
self.event_sink = Some(ctx.get_external_handle());
let client = RustpadClient::create(self.server_url.clone());
client.write().widget_id = Some(self.id);
client.write().set_event_sink(
self.event_sink.as_ref().unwrap().clone(),
self.id,
);
self.client = Some(Arc::clone(&client));
ctx.get_external_handle().submit_command(
COEDITOR_INIT_CLIENT,
Box::new(Arc::clone(&client)),
Target::Widget(self.id),
).expect("send command failed");
self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone()));
}
_ => {}
}
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) {
if old_data.after_edits.len() != data.after_edits.len() {
println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len());
}
let new_selection = self.inner.widget().text().borrow().selection();
if self.last_selection != new_selection {
self.last_selection = new_selection;
let borrow = self.inner.widget_mut().text_mut().borrow_mut();
let content = &borrow.layout.text().unwrap().content_as_string;
let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32;
let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32;
self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor));
}
self.inner.update(ctx, data, env);
}
fn layout(&mut self | use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::connect_async; | random_line_split |
coeditor.rs | Constraints, Target, LifeCycle, LifeCycleCtx, Size};
use std::sync::Arc;
use tokio::sync::broadcast::{Sender};
use tokio::task::JoinHandle;
use parking_lot::RwLock;
use crate::{RustpadClient, Edit};
use std::time::Duration;
use crate::editor_binding::EditorBinding;
use crate::code_editor::code_editor::CodeEditor;
use crate::code_editor::text::{Selection, EditableText};
use tokio::sync::broadcast;
use std::collections::HashMap;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::connect_async;
use futures::StreamExt;
use log::{info, warn};
pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client");
pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit");
pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data");
fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> {
tokio::spawn(async move {
info!("connecting");
let conn = client.read().server_url.clone();
loop {
let x = Arc::clone(&client);
if try_connect(&conn, x, close_tx.clone()).await.is_none() {
break;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
warn!("Reconnecting ...");
}
})
}
async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> {
let url = url::Url::parse(connect_addr).unwrap();
let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>();
client.write().ws_sender = Some(ws_tx.clone());
client.write().users.clear();
//
let res = connect_async(url).await;
if res.is_err() {
eprintln!("{:?}", res.err().unwrap());
return Some(());
}
let (ws_stream, _) = res.unwrap();
println!("WebSocket handshake has been successfully completed");
client.read().on_connected.invoke(());
let (write, read) = ws_stream.split();
let websocket_tx = ws_rx.map(Ok).forward(write);
let client2 = Arc::clone(&client);
let receive_handler =
read.for_each(|message| async {
if message.is_err() {
return;
}
let data = message.unwrap().to_string();
println!("Received: {}", &data);
client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed"));
});
client.write().send_info();
client.write().send_cursor_data();
if let Some(outstanding) = &client.read().outstanding {
client.write().send_operation(outstanding);
}
let mut close_rx = close_tx.subscribe();
tokio::select! {
_ = close_rx.recv() => {
ws_tx.unbounded_send(Message::Close(None)).unwrap();
println!("client closed.");
return None;
}
_ = websocket_tx => {}
_ = receive_handler => {
println!("server closed");
}
}
println!("{} disconnected", &connect_addr);
client.write().ws_sender = None;
Some(())
}
pub struct CoEditorWidget {
inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>,
id: WidgetId,
pub server_url: String,
client: Option<Arc<RwLock<RustpadClient>>>,
connection_handle: Option<JoinHandle<()>>,
event_sink: Option<ExtEventSink>,
close_tx: Sender<()>,
last_selection: Selection,
}
impl Drop for CoEditorWidget {
fn drop(&mut self) |
}
impl CoEditorWidget {
pub fn new(server_url: String) -> Self {
println!("CoEditorWidget created");
CoEditorWidget {
inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()),
server_url,
id: WidgetId::next(),
client: None,
connection_handle: None,
event_sink: None,
close_tx: broadcast::channel(1).0,
last_selection: Selection::default(),
}
}
}
impl Widget<EditorBinding> for CoEditorWidget {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) {
if let Event::Command(cmd) = event {
println!("received {:?}", cmd);
}
match event {
Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
let client = command.get(COEDITOR_INIT_CLIENT).unwrap();
data.set_client(client);
println!("editor binding client initialized");
}
Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received edit command");
let edit = command.get(USER_EDIT_SELECTOR).unwrap();
let selection = self.inner.widget().text().borrow().selection();
let transform_selection = |selection: Selection| -> Selection {
let transform_index = |x: usize| -> usize {
if x < edit.begin {
x
} else if x > edit.end {
x + edit.begin + edit.content.len() - edit.end
} else {
edit.begin + edit.content.len()
}
};
Selection::new(
transform_index(selection.anchor),
transform_index(selection.active),
)
};
data.edit_without_callback(edit);
let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection));
self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut()
.for_each(|(_, b)| *b = transform_selection(b.clone()));
}
Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received cursor command");
let content = &data.content;
let unicode_offset_to_utf8_offset = |offset: u32| -> usize {
content.iter().take(offset as usize).collect::<String>().len()
};
let mut new_decorations = HashMap::new();
let my_id = self.client.as_ref().unwrap().read().id();
self.client.as_ref().unwrap().read().user_cursors.iter()
.filter(|(&id, _)| id != my_id)
.filter(|(_, data)| !data.selections.is_empty())
.for_each(|(&id, sel)| {
new_decorations.insert(id, Selection::new(
unicode_offset_to_utf8_offset(sel.selections[0].0),
unicode_offset_to_utf8_offset(sel.selections[0].1),
));
});
self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations;
}
_ => self.inner.event(ctx, event, data, env)
}
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) {
self.inner.lifecycle(ctx, event, data, env);
match event {
LifeCycle::WidgetAdded => {
self.id = ctx.widget_id();
println!("CoEditorWidget initialized with id: {:?}", self.id);
self.event_sink = Some(ctx.get_external_handle());
let client = RustpadClient::create(self.server_url.clone());
client.write().widget_id = Some(self.id);
client.write().set_event_sink(
self.event_sink.as_ref().unwrap().clone(),
self.id,
);
self.client = Some(Arc::clone(&client));
ctx.get_external_handle().submit_command(
COEDITOR_INIT_CLIENT,
Box::new(Arc::clone(&client)),
Target::Widget(self.id),
).expect("send command failed");
self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone()));
}
_ => {}
}
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) {
if old_data.after_edits.len() != data.after_edits.len() {
println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len());
}
let new_selection = self.inner.widget().text().borrow().selection();
if self.last_selection != new_selection {
self.last_selection = new_selection;
let borrow = self.inner.widget_mut().text_mut().borrow_mut();
let content = &borrow.layout.text().unwrap().content_as_string;
let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32;
let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32;
self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor));
}
self.inner.update(ctx, data, env);
}
fn layout(&mut | {
self.close_tx.send(()).unwrap();
futures::executor::block_on(
tokio::time::timeout(Duration::from_secs(5),
self.connection_handle.take().unwrap(),
)
);
println!("CoEditorWidget destructed");
} | identifier_body |
coeditor.rs | BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size};
use std::sync::Arc;
use tokio::sync::broadcast::{Sender};
use tokio::task::JoinHandle;
use parking_lot::RwLock;
use crate::{RustpadClient, Edit};
use std::time::Duration;
use crate::editor_binding::EditorBinding;
use crate::code_editor::code_editor::CodeEditor;
use crate::code_editor::text::{Selection, EditableText};
use tokio::sync::broadcast;
use std::collections::HashMap;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::connect_async;
use futures::StreamExt;
use log::{info, warn};
pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client");
pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit");
pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data");
fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> {
tokio::spawn(async move {
info!("connecting");
let conn = client.read().server_url.clone();
loop {
let x = Arc::clone(&client);
if try_connect(&conn, x, close_tx.clone()).await.is_none() {
break;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
warn!("Reconnecting ...");
}
})
}
async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> {
let url = url::Url::parse(connect_addr).unwrap();
let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>();
client.write().ws_sender = Some(ws_tx.clone());
client.write().users.clear();
//
let res = connect_async(url).await;
if res.is_err() {
eprintln!("{:?}", res.err().unwrap());
return Some(());
}
let (ws_stream, _) = res.unwrap();
println!("WebSocket handshake has been successfully completed");
client.read().on_connected.invoke(());
let (write, read) = ws_stream.split();
let websocket_tx = ws_rx.map(Ok).forward(write);
let client2 = Arc::clone(&client);
let receive_handler =
read.for_each(|message| async {
if message.is_err() {
return;
}
let data = message.unwrap().to_string();
println!("Received: {}", &data);
client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed"));
});
client.write().send_info();
client.write().send_cursor_data();
if let Some(outstanding) = &client.read().outstanding {
client.write().send_operation(outstanding);
}
let mut close_rx = close_tx.subscribe();
tokio::select! {
_ = close_rx.recv() => {
ws_tx.unbounded_send(Message::Close(None)).unwrap();
println!("client closed.");
return None;
}
_ = websocket_tx => {}
_ = receive_handler => {
println!("server closed");
}
}
println!("{} disconnected", &connect_addr);
client.write().ws_sender = None;
Some(())
}
pub struct CoEditorWidget {
inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>,
id: WidgetId,
pub server_url: String,
client: Option<Arc<RwLock<RustpadClient>>>,
connection_handle: Option<JoinHandle<()>>,
event_sink: Option<ExtEventSink>,
close_tx: Sender<()>,
last_selection: Selection,
}
impl Drop for CoEditorWidget {
fn drop(&mut self) {
self.close_tx.send(()).unwrap();
futures::executor::block_on(
tokio::time::timeout(Duration::from_secs(5),
self.connection_handle.take().unwrap(),
)
);
println!("CoEditorWidget destructed");
}
}
impl CoEditorWidget {
pub fn new(server_url: String) -> Self {
println!("CoEditorWidget created");
CoEditorWidget {
inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()),
server_url,
id: WidgetId::next(),
client: None,
connection_handle: None,
event_sink: None,
close_tx: broadcast::channel(1).0,
last_selection: Selection::default(),
}
}
}
impl Widget<EditorBinding> for CoEditorWidget {
fn | (&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) {
if let Event::Command(cmd) = event {
println!("received {:?}", cmd);
}
match event {
Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
let client = command.get(COEDITOR_INIT_CLIENT).unwrap();
data.set_client(client);
println!("editor binding client initialized");
}
Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received edit command");
let edit = command.get(USER_EDIT_SELECTOR).unwrap();
let selection = self.inner.widget().text().borrow().selection();
let transform_selection = |selection: Selection| -> Selection {
let transform_index = |x: usize| -> usize {
if x < edit.begin {
x
} else if x > edit.end {
x + edit.begin + edit.content.len() - edit.end
} else {
edit.begin + edit.content.len()
}
};
Selection::new(
transform_index(selection.anchor),
transform_index(selection.active),
)
};
data.edit_without_callback(edit);
let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection));
self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut()
.for_each(|(_, b)| *b = transform_selection(b.clone()));
}
Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received cursor command");
let content = &data.content;
let unicode_offset_to_utf8_offset = |offset: u32| -> usize {
content.iter().take(offset as usize).collect::<String>().len()
};
let mut new_decorations = HashMap::new();
let my_id = self.client.as_ref().unwrap().read().id();
self.client.as_ref().unwrap().read().user_cursors.iter()
.filter(|(&id, _)| id != my_id)
.filter(|(_, data)| !data.selections.is_empty())
.for_each(|(&id, sel)| {
new_decorations.insert(id, Selection::new(
unicode_offset_to_utf8_offset(sel.selections[0].0),
unicode_offset_to_utf8_offset(sel.selections[0].1),
));
});
self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations;
}
_ => self.inner.event(ctx, event, data, env)
}
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) {
self.inner.lifecycle(ctx, event, data, env);
match event {
LifeCycle::WidgetAdded => {
self.id = ctx.widget_id();
println!("CoEditorWidget initialized with id: {:?}", self.id);
self.event_sink = Some(ctx.get_external_handle());
let client = RustpadClient::create(self.server_url.clone());
client.write().widget_id = Some(self.id);
client.write().set_event_sink(
self.event_sink.as_ref().unwrap().clone(),
self.id,
);
self.client = Some(Arc::clone(&client));
ctx.get_external_handle().submit_command(
COEDITOR_INIT_CLIENT,
Box::new(Arc::clone(&client)),
Target::Widget(self.id),
).expect("send command failed");
self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone()));
}
_ => {}
}
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) {
if old_data.after_edits.len() != data.after_edits.len() {
println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len());
}
let new_selection = self.inner.widget().text().borrow().selection();
if self.last_selection != new_selection {
self.last_selection = new_selection;
let borrow = self.inner.widget_mut().text_mut().borrow_mut();
let content = &borrow.layout.text().unwrap().content_as_string;
let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32;
let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32;
self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor));
}
self.inner.update(ctx, data, env);
}
fn layout(&mut | event | identifier_name |
build-xml.js | str, prefix, ignores) {
// 替换字符串中 {{}} 包含的表达式
// 获取类似 a.b.c 表达式中第一个 | ction getFirstWord(word) {
return word.match(/[_a-z][\w\d]*/i)[0];
}
// 检查类似 a.b.c 格式表达式是否忽略绑定
function shouldIgnore(word, matchs, n) {
if (word[0] === '"' || word[0] === "'" || /^\d+$/.test(word)) return true;
let w = getFirstWord(word);
if (ignores.hasOwnProperty(w) || (matchs && inText(matchs, n))) {
return true;
}
if (['state', 'props'].indexOf(w) < 0) {
console.error(`'${from.fromSrc}' 中发现无效变量引用 '${word}',XML模板中只能引用组件'props'和'state'中的数据。`.red);
console.error('如果您的项目基于Labrador 0.5.x,请按照升级指南升级到0.6.x版本 https://github.com/maichong/labrador');
}
return false;
}
if (prefix) {
prefix += '.';
} else {
prefix = '';
}
return str.replace(/\{\{([^}]+)\}\}/ig, function (matchs, words) {
// matchs 是{{xxxxx}}格式的字符串
// words 是{{}}中间的表达式
// ...foo
if (/^\s*\.\.\.[\w_][\w\d\-_.\[\]]*\s*$/.test(words)) {
let word = words.match(/\s*\.\.\.([\w_][\w\d\-_.\[\]]*)/)[1].trim();
if (shouldIgnore(word)) {
return matchs;
}
return `{{...${prefix}${word}}}`;
}
let isArray = /{{\s*\[/.test(matchs);
if (!isArray) {
//支持对象简写
let arrays = words.split(',');
if (arrays.length > 1) {
let isObject = true;
let props = arrays.map(function (str) {
if (!isObject) return;
// str 为对象中的一个属性, 可能为 a:b / a / ...a / ...a.b
str = str.trim();
let arr = str.split(':');
if (arr.length === 1) {
// 如果属性表达式中不包含冒号
// 如果为简写属性表达式,例如 {foo}
if (/^[a-z_][\w\d]*$/i.test(str)) {
if (ignores[str]) {
return str + ':' + str;
}
return str + ':' + prefix + str;
}
// 属性展开表达式 ...foo
if (/^\.{3}[a-z_][\w\d.\[\]]*$/i.test(str)) {
let word = str.substr(3);
if (shouldIgnore(word)) {
return str;
}
return '...' + prefix + word;
}
// 判定 ${matchs} 不为对象表达式
isObject = false;
return;
}
// 存在冒号的对象属性表达式
let word = arr[1].trim();
// foo:2.3
if (/^[\d.]+$/.test(word)) {
return arr[0] + ':' + word;
}
// foo:bar
// 'foo':bar
if (shouldIgnore(word)) {
return str;
}
// foo:bar
// 'foo':bar
// foo
return arr[0] + ':' + prefix + word;
});
//console.log('isObject', isObject);
if (isObject) {
return '{{' + props.join(',') + '}}';
}
}
}
return matchs.replace(/[^\.\w'"]([a-z_\$][\w\d\._\$]*)/ig, function (match, word, n) {
if (shouldIgnore(word, matchs, n)) {
return match;
}
return match[0] + prefix + word;
});
});
}
/**
* 递归绑定XML中的节点
* @param from
* @param node
* @param comPrefix
* @param valPrefix
* @param clsPrefix
* @param ignores
*/
function bind(from, node, comPrefix, valPrefix, clsPrefix, ignores) {
ignores = Object.assign({
true: true,
false: true,
null: true,
undefined: true
}, ignores);
let hasPath = false;
//处理节点属性
let attributes = node.attributes;
for (let i in attributes) {
if (!/^\d+$/.test(i)) continue;
let attr = attributes[i];
//处理属性值
if (attr.value.indexOf('{') > -1) {
attr.value = replaceString(from, attr.value, valPrefix, ignores);
}
//绑定事件
if (/^(bind|catch)\w+/.test(attr.name)) {
node.setAttribute('data-' + attr.name, attr.value);
attr.value = '_dispatch';
if (!hasPath && comPrefix) {
node.setAttribute('data-path', comPrefix);
}
}
//如果是循环标签,则在子标签中忽略循环索引和值变量
if (attr.name === 'wx:for') {
let index = node.getAttribute('wx:for-index') || 'index';
let item = node.getAttribute('wx:for-item') || 'item';
ignores[index] = true;
ignores[item] = true;
}
if (clsPrefix && attr.name === 'class') {
const matchArr = [];
// "xxx {{a ? 'b' : 'c'}}"
// => "xxx $"
attr.value = attr.value.replace(/\{\{([^}]+)\}\}/ig, function (match) {
matchArr.push(match);
matchArr.push(match);
return '$';
});
// => "xxx prefix-xxx $ prefix-$"
attr.value = attr.value.split(' ').map(cls => `${cls} ${clsPrefix}-${cls}`).join(' ');
// => "xxx prefix-xxx {{a ? 'b' : 'c'}} prefix-{{a ? 'b' : 'c'}}"
attr.value = attr.value.replace(/\$/g, function () {
const matchItem = matchArr.shift();
return matchItem;
});
}
}
//如果节点为文本
if (node.nodeName === '#text') {
let data = node.data;
if (data) {
node.replaceData(0, data.length, replaceString(from, data, valPrefix, ignores));
}
}
//递归处理子节点
for (let i in node.childNodes) {
if (!/^\d+$/.test(i)) continue;
let n = node.childNodes[i];
// 不转换template 定义
if (n.nodeName === 'template' && n.getAttribute('name')) {
bindTemplateEvents(n);
continue;
}
bind(from, n, comPrefix, valPrefix, clsPrefix, ignores);
}
}
/**
* 递归绑定template标签子节点中的事件
* @param node
*/
function bindTemplateEvents(node) {
//处理节点属性
let attributes = node.attributes;
for (let i in attributes) {
if (!/^\d+$/.test(i)) continue;
let attr = attributes[i];
//绑定事件
if (/^(bind|catch)\w+/.test(attr.name)) {
node.setAttribute('data-' + attr.name, attr.value);
attr.value = '_dispatch';
}
}
for (let i in node.childNodes) {
if (!/^\d+$/.test(i)) continue;
let n = node.childNodes[i];
bindTemplateEvents(n);
}
}
/**
* @param {FileInfo} from
* @param {string} comPrefix
* @param {string} valPrefix
* @param {string} clsPrefix
* @param {Object} depends
* @returns {Document}
*/
function build(from, comPrefix, valPrefix, clsPrefix, depends) {
if (typeof from === 'string') {
from = utils.getInfo(from);
}
const components = config.srcDir + 'components/';
let data = fs.readFileSync(from.file, 'utf8');
if (!data) {
throw new Error('XML file is empty ' + from.relative);
}
let doc = new DOMParser().parseFromString(data);
bind(from, doc, comPrefix, valPrefix, clsPrefix);
let listElemnts = doc.getElementsByTagName('list');
//console.log('listElemnts', listElemnts);
for (let i = 0; i < listElemnts.$$length; i++) {
let el = listElemnts[i];
let key = el.getAttribute('key');
let name = el.getAttribute('name') || key;
if (!key) throw new Error('Unknown list key in ' + from.relative);
let src;
if (utils.isDirectory(path.join(components, name))) {
//在components目录中
src = path.join(components, name, name + '.xml');
} else if (utils.isFile(path.join(components, name + '.xml'))) {
| 有效变量名 a
fun | identifier_name |
build-xml.js | str, prefix, ignores) {
// 替换字符串中 {{}} 包含的表达式
// 获取类似 a.b.c 表达式中第一个有效变量名 a
function getFirstWord(word) {
return word.match(/[_a-z][\w\d]*/i)[0];
}
// 检查类似 a.b.c 格式表达式是否忽略绑定
function shouldIgnore(word, matchs, n) {
if (word[0] === '"' || word[0] === "'" || /^\d+$/.test(word)) return true;
let w = getFirstWord(word);
if (ignores.hasOwnProperty(w) || (matchs && inText(matchs, n))) {
return true;
}
if (['state', 'props'].indexOf(w) < 0) {
console.error(`'${from.fromSrc}' 中发现无效变量引用 '${word}',XML模板中只能引用组件'props'和'state'中的数据。`.red);
console.error('如果您的项目基于Labrador 0.5.x,请按照升级指南升级到0.6.x版本 https://github. | ]*\s*$/.test(words)) {
let word = words.match(/\s*\.\.\.([\w_][\w\d\-_.\[\]]*)/)[1].trim();
if (shouldIgnore(word)) {
return matchs;
}
return `{{...${prefix}${word}}}`;
}
let isArray = /{{\s*\[/.test(matchs);
if (!isArray) {
//支持对象简写
let arrays = words.split(',');
if (arrays.length > 1) {
let isObject = true;
let props = arrays.map(function (str) {
if (!isObject) return;
// str 为对象中的一个属性, 可能为 a:b / a / ...a / ...a.b
str = str.trim();
let arr = str.split(':');
if (arr.length === 1) {
// 如果属性表达式中不包含冒号
// 如果为简写属性表达式,例如 {foo}
if (/^[a-z_][\w\d]*$/i.test(str)) {
if (ignores[str]) {
return str + ':' + str;
}
return str + ':' + prefix + str;
}
// 属性展开表达式 ...foo
if (/^\.{3}[a-z_][\w\d.\[\]]*$/i.test(str)) {
let word = str.substr(3);
if (shouldIgnore(word)) {
return str;
}
return '...' + prefix + word;
}
// 判定 ${matchs} 不为对象表达式
isObject = false;
return;
}
// 存在冒号的对象属性表达式
let word = arr[1].trim();
// foo:2.3
if (/^[\d.]+$/.test(word)) {
return arr[0] + ':' + word;
}
// foo:bar
// 'foo':bar
if (shouldIgnore(word)) {
return str;
}
// foo:bar
// 'foo':bar
// foo
return arr[0] + ':' + prefix + word;
});
//console.log('isObject', isObject);
if (isObject) {
return '{{' + props.join(',') + '}}';
}
}
}
return matchs.replace(/[^\.\w'"]([a-z_\$][\w\d\._\$]*)/ig, function (match, word, n) {
if (shouldIgnore(word, matchs, n)) {
return match;
}
return match[0] + prefix + word;
});
});
}
/**
* 递归绑定XML中的节点
* @param from
* @param node
* @param comPrefix
* @param valPrefix
* @param clsPrefix
* @param ignores
*/
function bind(from, node, comPrefix, valPrefix, clsPrefix, ignores) {
ignores = Object.assign({
true: true,
false: true,
null: true,
undefined: true
}, ignores);
let hasPath = false;
//处理节点属性
let attributes = node.attributes;
for (let i in attributes) {
if (!/^\d+$/.test(i)) continue;
let attr = attributes[i];
//处理属性值
if (attr.value.indexOf('{') > -1) {
attr.value = replaceString(from, attr.value, valPrefix, ignores);
}
//绑定事件
if (/^(bind|catch)\w+/.test(attr.name)) {
node.setAttribute('data-' + attr.name, attr.value);
attr.value = '_dispatch';
if (!hasPath && comPrefix) {
node.setAttribute('data-path', comPrefix);
}
}
//如果是循环标签,则在子标签中忽略循环索引和值变量
if (attr.name === 'wx:for') {
let index = node.getAttribute('wx:for-index') || 'index';
let item = node.getAttribute('wx:for-item') || 'item';
ignores[index] = true;
ignores[item] = true;
}
if (clsPrefix && attr.name === 'class') {
const matchArr = [];
// "xxx {{a ? 'b' : 'c'}}"
// => "xxx $"
attr.value = attr.value.replace(/\{\{([^}]+)\}\}/ig, function (match) {
matchArr.push(match);
matchArr.push(match);
return '$';
});
// => "xxx prefix-xxx $ prefix-$"
attr.value = attr.value.split(' ').map(cls => `${cls} ${clsPrefix}-${cls}`).join(' ');
// => "xxx prefix-xxx {{a ? 'b' : 'c'}} prefix-{{a ? 'b' : 'c'}}"
attr.value = attr.value.replace(/\$/g, function () {
const matchItem = matchArr.shift();
return matchItem;
});
}
}
//如果节点为文本
if (node.nodeName === '#text') {
let data = node.data;
if (data) {
node.replaceData(0, data.length, replaceString(from, data, valPrefix, ignores));
}
}
//递归处理子节点
for (let i in node.childNodes) {
if (!/^\d+$/.test(i)) continue;
let n = node.childNodes[i];
// 不转换template 定义
if (n.nodeName === 'template' && n.getAttribute('name')) {
bindTemplateEvents(n);
continue;
}
bind(from, n, comPrefix, valPrefix, clsPrefix, ignores);
}
}
/**
* 递归绑定template标签子节点中的事件
* @param node
*/
function bindTemplateEvents(node) {
//处理节点属性
let attributes = node.attributes;
for (let i in attributes) {
if (!/^\d+$/.test(i)) continue;
let attr = attributes[i];
//绑定事件
if (/^(bind|catch)\w+/.test(attr.name)) {
node.setAttribute('data-' + attr.name, attr.value);
attr.value = '_dispatch';
}
}
for (let i in node.childNodes) {
if (!/^\d+$/.test(i)) continue;
let n = node.childNodes[i];
bindTemplateEvents(n);
}
}
/**
* @param {FileInfo} from
* @param {string} comPrefix
* @param {string} valPrefix
* @param {string} clsPrefix
* @param {Object} depends
* @returns {Document}
*/
function build(from, comPrefix, valPrefix, clsPrefix, depends) {
if (typeof from === 'string') {
from = utils.getInfo(from);
}
const components = config.srcDir + 'components/';
let data = fs.readFileSync(from.file, 'utf8');
if (!data) {
throw new Error('XML file is empty ' + from.relative);
}
let doc = new DOMParser().parseFromString(data);
bind(from, doc, comPrefix, valPrefix, clsPrefix);
let listElemnts = doc.getElementsByTagName('list');
//console.log('listElemnts', listElemnts);
for (let i = 0; i < listElemnts.$$length; i++) {
let el = listElemnts[i];
let key = el.getAttribute('key');
let name = el.getAttribute('name') || key;
if (!key) throw new Error('Unknown list key in ' + from.relative);
let src;
if (utils.isDirectory(path.join(components, name))) {
//在components目录中
src = path.join(components, name, name + '.xml');
} else if (utils.isFile(path.join(components, name + '.xml'))) {
| com/maichong/labrador');
}
return false;
}
if (prefix) {
prefix += '.';
} else {
prefix = '';
}
return str.replace(/\{\{([^}]+)\}\}/ig, function (matchs, words) {
// matchs 是{{xxxxx}}格式的字符串
// words 是{{}}中间的表达式
// ...foo
if (/^\s*\.\.\.[\w_][\w\d\-_.\[\] | conditional_block |
build-xml.js | str, prefix, ignores) {
// 替换字符串中 {{}} 包含的表达式
// 获取类似 a.b.c 表达式中第一个有效变量名 a
function getFirstWord(word) {
return word.match(/[_a-z][\w\d]*/i)[0];
}
// 检查类似 a.b.c 格式表达式是否忽略绑定
function shouldIgnore(word, matchs, n) {
if (word[0] === '"' || word[0] === "'" || /^\d+$/.test(word)) return true;
let w = getFirstWord(word);
if (ignores.hasOwnProperty(w) || (matchs && inText(matchs, n))) {
return true;
}
if (['state', 'props'].indexOf(w) < 0) {
console.error(`'${from.fromSrc}' 中发现无效变量引用 '${word}',XML模板中只能引用组件'props'和'state'中的数据。`.red);
console.error('如果您的项目基于Labrador 0.5.x,请按照升级指南升级到0.6.x版本 https://github.com/maichong/labrador');
}
return false;
}
if (prefix) {
prefix += '.';
} else {
prefix = '';
}
return str.replace(/\{\{([^}]+)\}\}/ig, function (matchs, words) {
// matchs 是{{xxxxx}}格式的字符串
// words 是{{}}中间的表达式
// ...foo
if (/^\s*\.\.\.[\w_][\w\d\-_.\[\]]*\s*$/.test(words)) {
let word = words.match(/\s*\.\.\.([\w_][\w\d\-_.\[\]]*)/)[1].trim();
if (shouldIgnore(word)) {
return matchs;
}
return `{{...${prefix}${word}}}`;
}
let isArray = /{{\s*\[/.test(matchs);
if (!isArray) {
//支持对象简写
let arrays = words.split(',');
if (arrays.length > 1) {
let isObject = true;
let props = arrays.map(function (str) {
if (!isObject) return;
// str 为对象中的一个属性, 可能为 a:b / a / ...a / ...a.b
str = str.trim();
let arr = str.split(':');
if (arr.length === 1) {
// 如果属性表达式中不包含冒号
// 如果为简写属性表达式,例如 {foo}
if (/^[a-z_][\w\d]*$/i.test(str)) {
if (ignores[str]) {
return str + ':' + str;
}
return str + ':' + prefix + str;
}
// 属性展开表达式 ...foo
if (/^\.{3}[a-z_][\w\d.\[\]]*$/i.test(str)) {
let word = str.substr(3);
if (shouldIgnore(word)) {
return str;
}
return '...' + prefix + word;
}
// 判定 ${matchs} 不为对象表达式
isObject = false;
return;
}
// 存在冒号的对象属性表达式
let word = arr[1].trim();
// foo:2.3
if (/^[\d.]+$/.test(word)) {
return arr[0] + ':' + word;
}
// foo:bar
// 'foo':bar
if (shouldIgnore(word)) {
return str;
}
// foo:bar
// 'foo':bar
// foo
return arr[0] + ':' + prefix + word;
});
//console.log('isObject', isObject);
if (isObject) {
return '{{' + props.join(',') + '}}';
}
}
}
return matchs.replace(/[^\.\w'"]([a-z_\$][\w\d\._\$]*)/ig, function (match, word, n) {
if (shouldIgnore(word, matchs, n)) {
return match;
}
return match[0] + prefix + word;
});
});
}
/**
* 递归绑定XML中的节点
* @param from
* @param node
* @param comPrefix
* @param valPrefix
* @param clsPrefix
* @param ignores
*/
function bind(from, node, comPrefix, valPrefix, clsPrefix, ignores) {
ignores = Object.assign({
true: true,
false: true,
null: true,
undefined: true
}, ignores);
let hasPath = false;
//处理节点属性
let attributes = node.attributes;
for (let i in attributes) {
if (!/^\d+$/.test(i)) continue;
let attr = attributes[i];
//处理属性值
if (attr.value.indexOf('{') > -1) {
attr.value = replaceString(from, attr.value, valPrefix, ignores);
}
//绑定事件
if (/^(bind|catch)\w+/.test(attr.name)) {
node.setAttribute('data-' + attr.name, attr.value);
attr.value = '_dispatch';
if (!hasPath && comPrefix) {
node.setAttribute('data-path', comPrefix);
}
}
//如果是循环标签,则在子标签中忽略循环索引和值变量
if (attr.name === 'wx:for') {
let index = node.getAttribute('wx:for-index') || 'index';
let item = node.getAttribute('wx:for-item') || 'item';
ignores[index] = true;
ignores[item] = true;
}
if (clsPrefix && attr.name === 'class') {
const matchArr = [];
// "xxx {{a ? 'b' : 'c'}}"
// => "xxx $"
attr.value = attr.value.replace(/\{\{([^}]+)\}\}/ig, function (match) {
matchArr.push(match);
matchArr.push(match);
return '$';
});
// => "xxx prefix-xxx $ prefix-$"
attr.value = attr.value.split(' ').map(cls => `${cls} ${clsPrefix}-${cls}`).join(' ');
// => "xxx prefix-xxx {{a ? 'b' : 'c'}} prefix-{{a ? 'b' : 'c'}}"
attr.value = attr.value.replace(/\$/g, function () {
const matchItem = matchArr.shift();
return matchItem;
});
}
}
//如果节点为文本
if (node.nodeName === '#text') {
let data = node.data;
if (data) {
node.replaceData(0, data.length, replaceString(from, data, valPrefix, ignores));
}
} | // 不转换template 定义
if (n.nodeName === 'template' && n.getAttribute('name')) {
bindTemplateEvents(n);
continue;
}
bind(from, n, comPrefix, valPrefix, clsPrefix, ignores);
}
}
/**
* 递归绑定template标签子节点中的事件
* @param node
*/
function bindTemplateEvents(node) {
//处理节点属性
let attributes = node.attributes;
for (let i in attributes) {
if (!/^\d+$/.test(i)) continue;
let attr = attributes[i];
//绑定事件
if (/^(bind|catch)\w+/.test(attr.name)) {
node.setAttribute('data-' + attr.name, attr.value);
attr.value = '_dispatch';
}
}
for (let i in node.childNodes) {
if (!/^\d+$/.test(i)) continue;
let n = node.childNodes[i];
bindTemplateEvents(n);
}
}
/**
* @param {FileInfo} from
* @param {string} comPrefix
* @param {string} valPrefix
* @param {string} clsPrefix
* @param {Object} depends
* @returns {Document}
*/
function build(from, comPrefix, valPrefix, clsPrefix, depends) {
if (typeof from === 'string') {
from = utils.getInfo(from);
}
const components = config.srcDir + 'components/';
let data = fs.readFileSync(from.file, 'utf8');
if (!data) {
throw new Error('XML file is empty ' + from.relative);
}
let doc = new DOMParser().parseFromString(data);
bind(from, doc, comPrefix, valPrefix, clsPrefix);
let listElemnts = doc.getElementsByTagName('list');
//console.log('listElemnts', listElemnts);
for (let i = 0; i < listElemnts.$$length; i++) {
let el = listElemnts[i];
let key = el.getAttribute('key');
let name = el.getAttribute('name') || key;
if (!key) throw new Error('Unknown list key in ' + from.relative);
let src;
if (utils.isDirectory(path.join(components, name))) {
//在components目录中
src = path.join(components, name, name + '.xml');
} else if (utils.isFile(path.join(components, name + '.xml'))) {
|
//递归处理子节点
for (let i in node.childNodes) {
if (!/^\d+$/.test(i)) continue;
let n = node.childNodes[i]; | random_line_split |
build-xml.js | str, prefix, ignores) {
// 替换字符串中 {{}} 包含的表达式
// 获取类似 a.b.c 表达式中第一个有效变量名 a
function getFirstWord(word) {
return word.match(/[_a-z][\w\d]*/i)[0];
}
// 检查类似 a.b.c 格式表达式是否忽略绑定
function shouldIgnore(word, matchs, n) {
if (word[0] === '"' || word[0] === "'" || /^\d+$/.test(word)) return true;
let w = getFirstWord(word);
if (ignores.hasOwnProperty(w) || (matchs && inText(matchs, n))) {
return true;
}
if (['state', 'props'].indexOf(w) < 0) {
console.error(`'${from.fromSrc}' 中发现无效变量引用 '${word}',XML模板中只能引用组件'props'和'state'中的数据。`.red);
console.error('如果您的项目基于Labrador 0.5.x,请按照升级指南升级到0.6.x版本 https://github.com/maichong/labrador');
}
return false;
}
if (prefix) {
prefix += '.';
} else {
prefix = '';
}
return str.replace(/\{\{([^}]+)\}\}/ig, function (matchs, words) {
// matchs 是{{xxxxx}}格式的字符串
// words 是{{}}中间的表达式
// ...foo
if (/^\s*\.\.\.[\w_][\w\d\-_.\[\]]*\s*$/.test(words)) {
let word = words.match(/\s*\.\.\.([\w_][\w\d\-_.\[\]]*)/)[1].trim();
if (shouldIgnore(word)) {
return matchs;
}
return `{{...${prefix}${word}}}`;
}
let isArray = /{{\s*\[/.test(matchs);
if (!isArray) {
//支持对象简写
let arrays = words.split(',');
if (arrays.length > 1) {
let isObject = true;
let props = arrays.map(function (str) {
if (!isObject) return;
// str 为对象中的一个属性, 可能为 a:b / a / ...a / ...a.b
str = str.trim();
let arr = str.split(':');
if (arr.length === 1) {
// 如果属性表达式中不包含冒号
// 如果为简写属性表达式,例如 {foo}
if (/^[a-z_][\w\d]*$/i.test(str)) {
if (ignores[str]) {
return str + ':' + str;
}
return str + ':' + prefix + str;
}
// 属性展开表达式 ...foo
if (/^\.{3}[a-z_][\w\d.\[\]]*$/i.test(str)) {
let word = str.substr(3);
if (shouldIgnore(word)) {
return str;
}
return '...' + prefix + word;
}
// 判定 ${matchs} 不为对象表达式
isObject = false;
return;
}
// 存在冒号的对象属性表达式
let word = arr[1].trim();
// foo:2.3
if (/^[\d.]+$/.test(word)) {
return arr[0] + ':' + word;
}
// foo:bar
// 'foo':bar
if (shouldIgnore(word)) {
return str;
}
// foo:bar
// 'foo':bar
// foo
return arr[0] + ':' + prefix + word;
});
//console.log('isObject', isObject);
if (isObject) {
return '{{' + props.join(',') + '}}';
}
}
}
return matchs.replace(/[^\.\w'"]([a-z_\$][\w\d\._\$]*)/ig, function (match, word, n) {
if (shouldIgnore(word, matchs, n)) {
return match;
}
return match[0] + prefix + word;
});
});
}
/**
* 递归绑定XML中的节点
* @param from
* @param node
* @param comPrefix
* @param valPrefix
* @param clsPrefix
* @param ignores
*/
function bind(from, node, comPrefix, valPrefix, clsPrefix, ignores) {
ignores = Object.assign({
true: true,
false: true,
null: true,
undefined: true
}, ignores);
let hasPath = false;
//处理节点属性
let attributes = node.attributes;
for (let i in attributes) {
if (!/^\d+$/.test(i)) continue;
let attr = attributes[i];
//处理属性值
if (attr.value.indexOf('{') > -1) {
attr.value = replaceString(from, attr.value, valPrefix, ignores);
}
//绑定事件
if (/^(bind|catch)\w+/.test(att | attr.value = attr.value.replace(/\{\{([^}]+)\}\}/ig, function (match) {
matchArr.push(match);
matchArr.push(match);
return '$';
});
// => "xxx prefix-xxx $ prefix-$"
attr.value = attr.value.split(' ').map(cls => `${cls} ${clsPrefix}-${cls}`).join(' ');
// => "xxx prefix-xxx {{a ? 'b' : 'c'}} prefix-{{a ? 'b' : 'c'}}"
attr.value = attr.value.replace(/\$/g, function () {
const matchItem = matchArr.shift();
return matchItem;
});
}
}
//如果节点为文本
if (node.nodeName === '#text') {
let data = node.data;
if (data) {
node.replaceData(0, data.length, replaceString(from, data, valPrefix, ignores));
}
}
//递归处理子节点
for (let i in node.childNodes) {
if (!/^\d+$/.test(i)) continue;
let n = node.childNodes[i];
// 不转换template 定义
if (n.nodeName === 'template' && n.getAttribute('name')) {
bindTemplateEvents(n);
continue;
}
bind(from, n, comPrefix, valPrefix, clsPrefix, ignores);
}
}
/**
* 递归绑定template标签子节点中的事件
* @param node
*/
function bindTemplateEvents(node) {
//处理节点属性
let attributes = node.attributes;
for (let i in attributes) {
if (!/^\d+$/.test(i)) continue;
let attr = attributes[i];
//绑定事件
if (/^(bind|catch)\w+/.test(attr.name)) {
node.setAttribute('data-' + attr.name, attr.value);
attr.value = '_dispatch';
}
}
for (let i in node.childNodes) {
if (!/^\d+$/.test(i)) continue;
let n = node.childNodes[i];
bindTemplateEvents(n);
}
}
/**
* @param {FileInfo} from
* @param {string}
comPrefix
* @param {string} valPrefix
* @param {string} clsPrefix
* @param {Object} depends
* @returns {Document}
*/
function build(from, comPrefix, valPrefix, clsPrefix, depends) {
if (typeof from === 'string') {
from = utils.getInfo(from);
}
const components = config.srcDir + 'components/';
let data = fs.readFileSync(from.file, 'utf8');
if (!data) {
throw new Error('XML file is empty ' + from.relative);
}
let doc = new DOMParser().parseFromString(data);
bind(from, doc, comPrefix, valPrefix, clsPrefix);
let listElemnts = doc.getElementsByTagName('list');
//console.log('listElemnts', listElemnts);
for (let i = 0; i < listElemnts.$$length; i++) {
let el = listElemnts[i];
let key = el.getAttribute('key');
let name = el.getAttribute('name') || key;
if (!key) throw new Error('Unknown list key in ' + from.relative);
let src;
if (utils.isDirectory(path.join(components, name))) {
//在components目录中
src = path.join(components, name, name + '.xml');
} else if (utils.isFile(path.join(components, name + '.xml'))) {
| r.name)) {
node.setAttribute('data-' + attr.name, attr.value);
attr.value = '_dispatch';
if (!hasPath && comPrefix) {
node.setAttribute('data-path', comPrefix);
}
}
//如果是循环标签,则在子标签中忽略循环索引和值变量
if (attr.name === 'wx:for') {
let index = node.getAttribute('wx:for-index') || 'index';
let item = node.getAttribute('wx:for-item') || 'item';
ignores[index] = true;
ignores[item] = true;
}
if (clsPrefix && attr.name === 'class') {
const matchArr = [];
// "xxx {{a ? 'b' : 'c'}}"
// => "xxx $" | identifier_body |
lib.rs | .bin_name("self_update_example")
.show_download_progress(true)
.current_version(cargo_crate_version!())
.build()?
.update()?;
println!("Update status: `{}`!", status.version());
Ok(())
}
# fn main() { }
```
Run the above example to see `self_update` in action: `cargo run --example github`
Separate utilities are also exposed:
```
extern crate self_update;
fn update() -> Result<(), Box<::std::error::Error>> {
let target = self_update::get_target()?;
let releases = self_update::backends::github::ReleaseList::configure()
.repo_owner("jaemk")
.repo_name("self_update")
.with_target(&target)
.build()?
.fetch()?;
println!("found releases:");
println!("{:#?}\n", releases);
// get the first available release
let asset = releases[0]
.asset_for(&target).unwrap();
let tmp_dir = self_update::TempDir::new_in(::std::env::current_dir()?, "self_update")?;
let tmp_tarball_path = tmp_dir.path().join(&asset.name);
let tmp_tarball = ::std::fs::File::open(&tmp_tarball_path)?;
self_update::Download::from_url(&asset.download_url)
.download_to(&tmp_tarball)?;
self_update::Extract::from_source(&tmp_tarball_path)
.archive(self_update::ArchiveKind::Tar)
.encoding(self_update::EncodingKind::Gz)
.extract_into(&tmp_dir.path())?;
let tmp_file = tmp_dir.path().join("replacement_tmp");
let bin_name = "self_update_bin";
let bin_path = tmp_dir.path().join(bin_name);
self_update::Move::from_source(&bin_path)
.replace_using_temp(&tmp_file)
.to_dest(&::std::env::current_exe()?)?;
Ok(())
}
# fn main() { }
```
*/
extern crate serde_json;
extern crate reqwest;
extern crate tempdir;
extern crate flate2;
extern crate tar;
extern crate semver;
extern crate pbr;
pub use tempdir::TempDir;
use std::fs;
use std::io;
use std::path;
#[macro_use] mod macros;
pub mod errors;
pub mod backends;
pub mod version;
use errors::*;
/// Try to determine the current target triple.
///
/// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an
/// `Error::Config` if the current config cannot be determined or is not some combination of the
/// following values:
/// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc`
///
/// * Errors:
/// * Unexpected system config
pub fn get_target() -> Result<String> {
let arch_config = (cfg!(target_arch = "x86"), cfg!(target_arch = "x86_64"), cfg!(target_arch = "arm"));
let arch = match arch_config {
(true, _, _) => "i686",
(_, true, _) => "x86_64",
(_, _, true) => "armv7",
_ => bail!(Error::Update, "Unable to determine target-architecture"),
};
let os_config = (cfg!(target_os = "linux"), cfg!(target_os = "macos"), cfg!(target_os = "windows"));
let os = match os_config {
(true, _, _) => "unknown-linux",
(_, true, _) => "apple-darwin",
(_, _, true) => "pc-windows",
_ => bail!(Error::Update, "Unable to determine target-os"),
};
let s;
let os = if cfg!(target_os = "macos") {
os
} else {
let env_config = (cfg!(target_env = "gnu"), cfg!(target_env = "musl"), cfg!(target_env = "msvc"));
let env = match env_config {
(true, _, _) => "gnu",
(_, true, _) => "musl",
(_, _, true) => "msvc",
_ => bail!(Error::Update, "Unable to determine target-environment"),
};
s = format!("{}-{}", os, env);
&s
};
Ok(format!("{}-{}", arch, os))
}
/// Check if a version tag is greater than the current
#[deprecated(since="0.4.2", note="`should_update` functionality has been moved to `version::bump_is_greater`.\
`version::bump_is_compatible` should be used instead.")]
pub fn should_update(current: &str, latest: &str) -> Result<bool> {
use semver::Version;
Ok(Version::parse(latest)? > Version::parse(current)?)
}
/// Flush a message to stdout and check if they respond `yes`.
/// Interprets a blank response as yes.
///
/// * Errors:
/// * Io flushing
/// * User entered anything other than enter/Y/y
fn confirm(msg: &str) -> Result<()> {
print_flush!("{}", msg);
let mut s = String::new();
io::stdin().read_line(&mut s)?;
let s = s.trim().to_lowercase();
if ! s.is_empty() && s != "y" {
bail!(Error::Update, "Update aborted");
}
Ok(())
}
/// Status returned after updating
///
/// Wrapped `String`s are version tags
#[derive(Debug, Clone)]
pub enum Status {
UpToDate(String),
Updated(String),
}
impl Status {
/// Return the version tag
pub fn version(&self) -> &str {
use Status::*;
match *self {
UpToDate(ref s) => s,
Updated(ref s) => s,
}
}
/// Returns `true` if `Status::UpToDate`
pub fn uptodate(&self) -> bool {
match *self {
Status::UpToDate(_) => true,
_ => false,
}
}
/// Returns `true` if `Status::Updated`
pub fn updated(&self) -> bool {
match *self {
Status::Updated(_) => true,
_ => false,
}
}
}
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use Status::*;
match *self {
UpToDate(ref s) => write!(f, "UpToDate({})", s),
Updated(ref s) => write!(f, "Updated({})", s),
}
}
}
/// Supported archive formats
#[derive(Debug)]
pub enum ArchiveKind {
Tar,
Plain,
}
/// Supported encoding formats
#[derive(Debug)]
pub enum EncodingKind {
Gz,
Plain,
}
/// Extract contents of an encoded archive (e.g. tar.gz) file to a specified directory
///
/// * Errors:
/// * Io - opening files
/// * Io - gzip decoding
/// * Io - archive unpacking
#[derive(Debug)]
pub struct Extract<'a> {
source: &'a path::Path,
archive: ArchiveKind,
encoding: EncodingKind,
}
impl<'a> Extract<'a> {
pub fn from_source(source: &'a path::Path) -> Extract<'a> {
Self {
source: source,
archive: ArchiveKind::Plain,
encoding: EncodingKind::Plain,
}
}
pub fn archive(&mut self, kind: ArchiveKind) -> &mut Self {
self.archive = kind;
self
}
pub fn encoding(&mut self, kind: EncodingKind) -> &mut Self {
self.encoding = kind;
self
}
pub fn | (&self, into_dir: &path::Path) -> Result<()> {
let source = fs::File::open(self.source)?;
let archive: Box<io::Read> = match self.encoding {
EncodingKind::Plain => Box::new(source),
EncodingKind::Gz => {
let reader = flate2::read::GzDecoder::new(source);
Box::new(reader)
},
};
match self.archive {
ArchiveKind::Plain => (),
ArchiveKind::Tar => {
let mut archive = tar::Archive::new(archive);
archive.unpack(into_dir)?;
}
};
Ok(())
}
}
/// Moves a file from the given path to the specified destination.
///
/// `source` and `dest` must be on the same filesystem.
/// If `replace_using_temp` is provided, the destination file will be
/// replaced using the given temp path as a backup in case of `io` errors.
///
/// * Errors:
/// * Io - copying / renaming
#[derive(Debug)]
pub struct Move<'a> {
source: &'a path::Path,
temp: Option<&'a path::Path>,
}
impl<'a> Move<'a> {
/// Specify source file
pub fn from_source(source: &'a path::Path) -> Move<'a> {
Self {
source: source,
temp: None,
}
}
/// If specified and the destination file already exists, the destination
/// file will be "safely" replaced using a temp path.
/// The `temp` dir should must be explicitly provided since `replace` operations require | extract_into | identifier_name |
lib.rs | .bin_name("self_update_example")
.show_download_progress(true)
.current_version(cargo_crate_version!())
.build()?
.update()?;
println!("Update status: `{}`!", status.version());
Ok(())
}
# fn main() { }
```
Run the above example to see `self_update` in action: `cargo run --example github`
Separate utilities are also exposed:
```
extern crate self_update;
fn update() -> Result<(), Box<::std::error::Error>> {
let target = self_update::get_target()?;
let releases = self_update::backends::github::ReleaseList::configure()
.repo_owner("jaemk")
.repo_name("self_update")
.with_target(&target)
.build()?
.fetch()?;
println!("found releases:");
println!("{:#?}\n", releases);
// get the first available release
let asset = releases[0]
.asset_for(&target).unwrap();
let tmp_dir = self_update::TempDir::new_in(::std::env::current_dir()?, "self_update")?;
let tmp_tarball_path = tmp_dir.path().join(&asset.name);
let tmp_tarball = ::std::fs::File::open(&tmp_tarball_path)?;
self_update::Download::from_url(&asset.download_url)
.download_to(&tmp_tarball)?;
self_update::Extract::from_source(&tmp_tarball_path)
.archive(self_update::ArchiveKind::Tar)
.encoding(self_update::EncodingKind::Gz)
.extract_into(&tmp_dir.path())?;
let tmp_file = tmp_dir.path().join("replacement_tmp");
let bin_name = "self_update_bin";
let bin_path = tmp_dir.path().join(bin_name);
self_update::Move::from_source(&bin_path)
.replace_using_temp(&tmp_file)
.to_dest(&::std::env::current_exe()?)?;
Ok(())
}
# fn main() { }
```
*/
extern crate serde_json;
extern crate reqwest;
extern crate tempdir;
extern crate flate2;
extern crate tar;
extern crate semver;
extern crate pbr;
pub use tempdir::TempDir;
use std::fs;
use std::io;
use std::path;
#[macro_use] mod macros;
pub mod errors;
pub mod backends;
pub mod version;
use errors::*;
/// Try to determine the current target triple.
///
/// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an
/// `Error::Config` if the current config cannot be determined or is not some combination of the
/// following values:
/// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc`
///
/// * Errors:
/// * Unexpected system config
pub fn get_target() -> Result<String> {
let arch_config = (cfg!(target_arch = "x86"), cfg!(target_arch = "x86_64"), cfg!(target_arch = "arm"));
let arch = match arch_config {
(true, _, _) => "i686",
(_, true, _) => "x86_64",
(_, _, true) => "armv7",
_ => bail!(Error::Update, "Unable to determine target-architecture"),
};
let os_config = (cfg!(target_os = "linux"), cfg!(target_os = "macos"), cfg!(target_os = "windows"));
let os = match os_config {
(true, _, _) => "unknown-linux",
(_, true, _) => "apple-darwin",
(_, _, true) => "pc-windows",
_ => bail!(Error::Update, "Unable to determine target-os"),
};
let s;
let os = if cfg!(target_os = "macos") {
os
} else {
let env_config = (cfg!(target_env = "gnu"), cfg!(target_env = "musl"), cfg!(target_env = "msvc"));
let env = match env_config {
(true, _, _) => "gnu",
(_, true, _) => "musl",
(_, _, true) => "msvc",
_ => bail!(Error::Update, "Unable to determine target-environment"),
};
s = format!("{}-{}", os, env);
&s
};
Ok(format!("{}-{}", arch, os))
}
/// Check if a version tag is greater than the current
#[deprecated(since="0.4.2", note="`should_update` functionality has been moved to `version::bump_is_greater`.\
`version::bump_is_compatible` should be used instead.")]
pub fn should_update(current: &str, latest: &str) -> Result<bool> {
use semver::Version;
Ok(Version::parse(latest)? > Version::parse(current)?)
}
/// Flush a message to stdout and check if they respond `yes`.
/// Interprets a blank response as yes.
///
/// * Errors:
/// * Io flushing
/// * User entered anything other than enter/Y/y
fn confirm(msg: &str) -> Result<()> {
print_flush!("{}", msg);
let mut s = String::new();
io::stdin().read_line(&mut s)?;
let s = s.trim().to_lowercase();
if ! s.is_empty() && s != "y" {
bail!(Error::Update, "Update aborted");
}
Ok(())
}
/// Status returned after updating
///
/// Wrapped `String`s are version tags
#[derive(Debug, Clone)]
pub enum Status {
UpToDate(String),
Updated(String),
}
impl Status {
/// Return the version tag
pub fn version(&self) -> &str {
use Status::*;
match *self {
UpToDate(ref s) => s,
Updated(ref s) => s,
}
}
/// Returns `true` if `Status::UpToDate`
pub fn uptodate(&self) -> bool {
match *self {
Status::UpToDate(_) => true, | pub fn updated(&self) -> bool {
match *self {
Status::Updated(_) => true,
_ => false,
}
}
}
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use Status::*;
match *self {
UpToDate(ref s) => write!(f, "UpToDate({})", s),
Updated(ref s) => write!(f, "Updated({})", s),
}
}
}
/// Supported archive formats
#[derive(Debug)]
pub enum ArchiveKind {
Tar,
Plain,
}
/// Supported encoding formats
#[derive(Debug)]
pub enum EncodingKind {
Gz,
Plain,
}
/// Extract contents of an encoded archive (e.g. tar.gz) file to a specified directory
///
/// * Errors:
/// * Io - opening files
/// * Io - gzip decoding
/// * Io - archive unpacking
#[derive(Debug)]
pub struct Extract<'a> {
source: &'a path::Path,
archive: ArchiveKind,
encoding: EncodingKind,
}
impl<'a> Extract<'a> {
pub fn from_source(source: &'a path::Path) -> Extract<'a> {
Self {
source: source,
archive: ArchiveKind::Plain,
encoding: EncodingKind::Plain,
}
}
pub fn archive(&mut self, kind: ArchiveKind) -> &mut Self {
self.archive = kind;
self
}
pub fn encoding(&mut self, kind: EncodingKind) -> &mut Self {
self.encoding = kind;
self
}
pub fn extract_into(&self, into_dir: &path::Path) -> Result<()> {
let source = fs::File::open(self.source)?;
let archive: Box<io::Read> = match self.encoding {
EncodingKind::Plain => Box::new(source),
EncodingKind::Gz => {
let reader = flate2::read::GzDecoder::new(source);
Box::new(reader)
},
};
match self.archive {
ArchiveKind::Plain => (),
ArchiveKind::Tar => {
let mut archive = tar::Archive::new(archive);
archive.unpack(into_dir)?;
}
};
Ok(())
}
}
/// Moves a file from the given path to the specified destination.
///
/// `source` and `dest` must be on the same filesystem.
/// If `replace_using_temp` is provided, the destination file will be
/// replaced using the given temp path as a backup in case of `io` errors.
///
/// * Errors:
/// * Io - copying / renaming
#[derive(Debug)]
pub struct Move<'a> {
source: &'a path::Path,
temp: Option<&'a path::Path>,
}
impl<'a> Move<'a> {
/// Specify source file
pub fn from_source(source: &'a path::Path) -> Move<'a> {
Self {
source: source,
temp: None,
}
}
/// If specified and the destination file already exists, the destination
/// file will be "safely" replaced using a temp path.
/// The `temp` dir should must be explicitly provided since `replace` operations require
| _ => false,
}
}
/// Returns `true` if `Status::Updated` | random_line_split |
lib.rs | println!("{:#?}\n", releases);
// get the first available release
let asset = releases[0]
.asset_for(&target).unwrap();
let tmp_dir = self_update::TempDir::new_in(::std::env::current_dir()?, "self_update")?;
let tmp_tarball_path = tmp_dir.path().join(&asset.name);
let tmp_tarball = ::std::fs::File::open(&tmp_tarball_path)?;
self_update::Download::from_url(&asset.download_url)
.download_to(&tmp_tarball)?;
self_update::Extract::from_source(&tmp_tarball_path)
.archive(self_update::ArchiveKind::Tar)
.encoding(self_update::EncodingKind::Gz)
.extract_into(&tmp_dir.path())?;
let tmp_file = tmp_dir.path().join("replacement_tmp");
let bin_name = "self_update_bin";
let bin_path = tmp_dir.path().join(bin_name);
self_update::Move::from_source(&bin_path)
.replace_using_temp(&tmp_file)
.to_dest(&::std::env::current_exe()?)?;
Ok(())
}
# fn main() { }
```
*/
extern crate serde_json;
extern crate reqwest;
extern crate tempdir;
extern crate flate2;
extern crate tar;
extern crate semver;
extern crate pbr;
pub use tempdir::TempDir;
use std::fs;
use std::io;
use std::path;
#[macro_use] mod macros;
pub mod errors;
pub mod backends;
pub mod version;
use errors::*;
/// Try to determine the current target triple.
///
/// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an
/// `Error::Config` if the current config cannot be determined or is not some combination of the
/// following values:
/// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc`
///
/// * Errors:
/// * Unexpected system config
pub fn get_target() -> Result<String> {
let arch_config = (cfg!(target_arch = "x86"), cfg!(target_arch = "x86_64"), cfg!(target_arch = "arm"));
let arch = match arch_config {
(true, _, _) => "i686",
(_, true, _) => "x86_64",
(_, _, true) => "armv7",
_ => bail!(Error::Update, "Unable to determine target-architecture"),
};
let os_config = (cfg!(target_os = "linux"), cfg!(target_os = "macos"), cfg!(target_os = "windows"));
let os = match os_config {
(true, _, _) => "unknown-linux",
(_, true, _) => "apple-darwin",
(_, _, true) => "pc-windows",
_ => bail!(Error::Update, "Unable to determine target-os"),
};
let s;
let os = if cfg!(target_os = "macos") {
os
} else {
let env_config = (cfg!(target_env = "gnu"), cfg!(target_env = "musl"), cfg!(target_env = "msvc"));
let env = match env_config {
(true, _, _) => "gnu",
(_, true, _) => "musl",
(_, _, true) => "msvc",
_ => bail!(Error::Update, "Unable to determine target-environment"),
};
s = format!("{}-{}", os, env);
&s
};
Ok(format!("{}-{}", arch, os))
}
/// Check if a version tag is greater than the current
#[deprecated(since="0.4.2", note="`should_update` functionality has been moved to `version::bump_is_greater`.\
`version::bump_is_compatible` should be used instead.")]
pub fn should_update(current: &str, latest: &str) -> Result<bool> {
use semver::Version;
Ok(Version::parse(latest)? > Version::parse(current)?)
}
/// Flush a message to stdout and check if they respond `yes`.
/// Interprets a blank response as yes.
///
/// * Errors:
/// * Io flushing
/// * User entered anything other than enter/Y/y
fn confirm(msg: &str) -> Result<()> {
print_flush!("{}", msg);
let mut s = String::new();
io::stdin().read_line(&mut s)?;
let s = s.trim().to_lowercase();
if ! s.is_empty() && s != "y" {
bail!(Error::Update, "Update aborted");
}
Ok(())
}
/// Status returned after updating
///
/// Wrapped `String`s are version tags
#[derive(Debug, Clone)]
pub enum Status {
UpToDate(String),
Updated(String),
}
impl Status {
/// Return the version tag
pub fn version(&self) -> &str {
use Status::*;
match *self {
UpToDate(ref s) => s,
Updated(ref s) => s,
}
}
/// Returns `true` if `Status::UpToDate`
pub fn uptodate(&self) -> bool {
match *self {
Status::UpToDate(_) => true,
_ => false,
}
}
/// Returns `true` if `Status::Updated`
pub fn updated(&self) -> bool {
match *self {
Status::Updated(_) => true,
_ => false,
}
}
}
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use Status::*;
match *self {
UpToDate(ref s) => write!(f, "UpToDate({})", s),
Updated(ref s) => write!(f, "Updated({})", s),
}
}
}
/// Supported archive formats
#[derive(Debug)]
pub enum ArchiveKind {
Tar,
Plain,
}
/// Supported encoding formats
#[derive(Debug)]
pub enum EncodingKind {
Gz,
Plain,
}
/// Extract contents of an encoded archive (e.g. tar.gz) file to a specified directory
///
/// * Errors:
/// * Io - opening files
/// * Io - gzip decoding
/// * Io - archive unpacking
#[derive(Debug)]
pub struct Extract<'a> {
source: &'a path::Path,
archive: ArchiveKind,
encoding: EncodingKind,
}
impl<'a> Extract<'a> {
pub fn from_source(source: &'a path::Path) -> Extract<'a> {
Self {
source: source,
archive: ArchiveKind::Plain,
encoding: EncodingKind::Plain,
}
}
pub fn archive(&mut self, kind: ArchiveKind) -> &mut Self {
self.archive = kind;
self
}
pub fn encoding(&mut self, kind: EncodingKind) -> &mut Self {
self.encoding = kind;
self
}
pub fn extract_into(&self, into_dir: &path::Path) -> Result<()> {
let source = fs::File::open(self.source)?;
let archive: Box<io::Read> = match self.encoding {
EncodingKind::Plain => Box::new(source),
EncodingKind::Gz => {
let reader = flate2::read::GzDecoder::new(source);
Box::new(reader)
},
};
match self.archive {
ArchiveKind::Plain => (),
ArchiveKind::Tar => {
let mut archive = tar::Archive::new(archive);
archive.unpack(into_dir)?;
}
};
Ok(())
}
}
/// Moves a file from the given path to the specified destination.
///
/// `source` and `dest` must be on the same filesystem.
/// If `replace_using_temp` is provided, the destination file will be
/// replaced using the given temp path as a backup in case of `io` errors.
///
/// * Errors:
/// * Io - copying / renaming
#[derive(Debug)]
pub struct Move<'a> {
source: &'a path::Path,
temp: Option<&'a path::Path>,
}
impl<'a> Move<'a> {
/// Specify source file
pub fn from_source(source: &'a path::Path) -> Move<'a> {
Self {
source: source,
temp: None,
}
}
/// If specified and the destination file already exists, the destination
/// file will be "safely" replaced using a temp path.
/// The `temp` dir should must be explicitly provided since `replace` operations require
/// files to live on the same filesystem.
pub fn replace_using_temp(&mut self, temp: &'a path::Path) -> &mut Self {
self.temp = Some(temp);
self
}
/// Move source file to specified destination
pub fn to_dest(&self, dest: &path::Path) -> Result<()> | {
match self.temp {
None => {
fs::rename(self.source, dest)?;
}
Some(temp) => {
if dest.exists() {
fs::rename(dest, temp)?;
match fs::rename(self.source, dest) {
Err(e) => {
fs::rename(temp, dest)?;
return Err(Error::from(e))
}
Ok(_) => (),
};
} else {
fs::rename(self.source, dest)?;
}
}
}; | identifier_body | |
sample.app.js | UCMS.alert("서버와 통신 중 오류["+textStatus+","+jqXHR.status+"]가 발생하였습니다.<br>잠시 후 다시 시도해주세요.<br>이용에 불편을 드려 죄송합니다!")
.then(
function()
{
reloadApp();
});
}
return true;
}
,
onInitializeBefore: function(options)
{
UCMS.log("onInitializeBefore()");
this._appInfo = options.baangapp || {};
this._param = options;
/**
* PC 브라우저 백 버튼 으로 페이지 이동시 잠긴상태의 스크롤을 해제한다.
*
* */
if( UCMS.SPA.isAppOS() == false )
{
window.onhashchange = function() {
$("body").css("overflow", "auto");
};
};
// XXX 401 발생시 처리 테스트 코드
//setTimeout(TestSessionChecker, 10000);
//
this.initApplication
(
options,
{
home_body : "modules/app/cooker/widgets/home/home-0.8.1.html",
home : "modules/app/cooker/widgets/home/home-0.8.1",
login_body : "modules/widgets/sign/login-0.8.1.html",
login : "modules/widgets/sign/login-0.8.1",
member_body : "modules/widgets/sign/member-0.8.1.html",
member : "modules/widgets/sign/member-0.8.1",
youtube_body : "modules/widgets/youtube/youtube-0.8.1.html",
youtube : "modules/widgets/youtube/youtube-0.8.1",
publicdata_body : "modules/widgets/opendata/publicData-0.8.1.html",
publicdata : "modules/widgets/opendata/publicData-0.8.1",
selectcity_body : "modules/widgets/opendata/selectCity-0.8.1.html",
selectcity : "modules/widgets/opendata/selectCity-0.8.1",
detailToiletInfo_body : "modules/widgets/opendata/detailToiletInfo-0.8.1.html",
detailToiletInfo : "modules/widgets/opendata/detailToiletInfo-0.8.1",
searchResult_body : "modules/widgets/youtube/searchResult-0.8.1.html",
searchResult : "modules/widgets/youtube/searchResult-0.8.1",
//modules을 어디에다가 정의해야하는지 몰라서 우선 여기에다
//youtube modules
AuthYoutube : "modules/widgets/youtube/api/AuthYoutube",
youtubeToken : "modules/widgets/youtube/models/youtubePagetokenModel",
requestApi : "modules/widgets/youtube/api/requestApi",
uploadApi : "modules/widgets/youtube/api/uploadApi",
//toilet modules
toilet : "modules/widgets/opendata/api/toiletInfo",
toiletDetailInfo : "modules/widgets/opendata/model/toiletDetailInfo",
}
,
UCMS.getRootPath()
);
},
_initRoute: function( options )
{
var self = this;
this._route = new (Backbone.Marionette.AppRouter.extend(
{
routes:
{
"": "doHome",
"home": "doHome",
"!login" : "doLogin",
"!join" : "doJoin",
"up_join": "upJoin",
"!member":"doMemberConfirm",
"!youtube": "doYoutube",
"!publicdata": "doPublic",
"!selectCity": "doSelectcity",
"!detailInfo": "doDetailInfo",
"!likedVideolist": "doChkLiked",
"!searchResult": "doSearch",
},
onRoute: function( name, path, route )
{
UCMSPlatform.log("Routing : "+name+", path: "+path+", route: "+route);
var panelTag = Backbone.history.getFragment();
if( panelTag )
{
Logger.debug("Tracking Tag : "+panelTag);
Logger.debug("self._tracker : "+self._tracker);
if(self._tracker != null)
self._tracker.trackingView( panelTag );
}
},
doHome: function()
{
UCMSPlatform.log("apps doHome()");
self._setPanel("doHome");
}
,
doLogin: function()
{
UCMSPlatform.log("apps doLogin()");
self._setPanel("doLogin");
}
,
doMemberConfirm : function()
{
UCMSPlatform.log("apps doMemberConfirm()");
self._setPanel("doMemberConfirm");
}
,
doYoutube: function()
{
UCMSPlatform.log("apps doYoutube()");
self._setPanel("doYoutube");
}
,
doPublic: function()
{
UCMSPlatform.log("apps doPublic()");
self._setPanel("doPublic");
}
,
doSelectcity: function(){
UCMSPlatform.log("apps doSelectcity()");
self._setPanel("doSelectcity");
}
,
doDetailInfo: function(){
UCMSPlatform.log("apps doSelectcity()");
self._setPanel("doDetailInfo");
},
doSearch: function(){
self._setPanel("doSearch");
}
}
));
},
_initUI: function( options )
{
Logger.info("_initUI options " + JSON.stringify(options));
Logger.info(" UCMS.SPA.isDesktop() " + UCMS.SPA.isDesktop());
Logger.info("UCMS.SPA.isAppOS() " + UCMS.SPA.isAppOS());
if( UCMS.SPA.isDesktop() == false && UCMS.SPA.isAppOS() == false )
{
//
// 모바일에서 브라우저로 진입한 경우,
// 앱으로 전환할 수 있는 영역을 확보한다.
//
$("body").append("<div class=switcher_region/><div class=body_region/>");
this.addRegions(
{
switcher: ".switcher_region",
body: ".body_region"
});
}
else
{
this.addRegions(
{
body: options.bodyTag
});
}
// 웹뷰 높이 적용
UCMS.adjustViewHeight($("body"));
// 웹 팝업 이벤트 가로채기
BaroAppBase.hookingHyperLink( options.bodyTag, "web:open" );
if( UCMS.SPA.isAppOS() == true && UCMS.SPA.isAndroid() == false )
{
UCMS.initFixedHandler("input");
}
Logger.info("_initUI options end " );
},
_setPanel: function( moduleName, p_type, container_id, title, item_id )
{
/*
var thePanel = UCMSPlatform.SPA.AppMain.createInstance( moduleName );
if( thePanel )
{
this._showFrame( thePanel );
return;
}
*/
UCMSPlatform.log("Loading a Panel!");
var self = this;
if(p_type != undefined)
self._param.type = p_type;
if( moduleName === "doHome" )
{
self._moduleLoading("home");
}
else if( moduleName === "doHome" )
{
self._moduleLoading("home");
}
else if( moduleName === "doLogin" )
{
self._moduleLoading("login");
}
else if( moduleName === "doMemberConfirm" )
{
self._moduleLoading("member");
}
else if( moduleName === "doYoutube" )
{
self._moduleLoading("youtube");
}
else if( moduleName === "doPublic")
{
self._moduleLoading("publicdata");
}
else if( moduleName === "doSelectcity")
{
self._moduleLoading("selectcity");
}
else if( moduleName === "doDetailInfo")
{
self._moduleLoading("detailToiletInfo");
}
else if( moduleName === "doSearch")
{
self._moduleLoading("searchResult");
}
},
_showFrame: function(framePanel)
{
this.body.show( framePanel ); | },
| random_line_split | |
mod.rs | -> Range<usize> {
start + offset..end + offset
}
impl SubMatch {
pub fn match_indices(&self, offset: usize) -> Range<usize> {
range(self.start, self.end, offset)
}
// FIXME find the word in non-utf8?
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> {
// The text in SubMatch is not exactly the search word itself in some cases,
// we need to first find the offset of search word in the SubMatch text manually.
match search_word.find(&self.m.text()) {
Some(search_word_offset) => {
let start = self.start + search_word_offset;
range(start, start + search_word.len, offset)
}
None => Default::default(),
}
}
}
impl PartialEq for Match {
fn eq(&self, other: &Match) -> bool {
// Ignore the `submatches` field.
//
// Given a certain search word, if all the other fields are same, especially the
// `absolute_offset` equals, these two Match can be considered the same.
self.path == other.path
&& self.lines == other.lines
&& self.line_number == other.line_number
&& self.absolute_offset == other.absolute_offset
}
}
impl Eq for Match {}
impl Match {
pub fn path(&self) -> Cow<str> {
self.path.text()
}
pub fn line_number(&self) -> u64 {
self.line_number.unwrap_or_default()
}
pub fn column(&self) -> usize {
self.submatches.get(0).map(|x| x.start).unwrap_or_default()
}
/// Returns true if the text line starts with `pat`.
pub fn line_starts_with(&self, pat: &str) -> bool {
self.lines.text().trim_start().starts_with(pat)
}
pub fn match_indices(&self, offset: usize) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices(offset))
.collect()
}
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word))
.collect()
}
}
impl TryFrom<&[u8]> for Match {
type Error = Cow<'static, str>;
fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> {
let msg = serde_json::from_slice::<Message>(byte_line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl TryFrom<&str> for Match {
type Error = Cow<'static, str>;
fn try_from(line: &str) -> Result<Self, Self::Error> {
let msg = serde_json::from_str::<Message>(line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl Match {
/// Returns a pair of the formatted `String` and the offset of origin match indices.
///
/// The formatted String is same with the output line using rg's -vimgrep option.
fn grep_line_format(&self, enable_icon: bool) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
// filepath:line_number:column:text, 3 extra `:` in the formatted String.
let mut offset =
path.len() + display_width(line_number as usize) + display_width(column) + 3;
let formatted_line = if enable_icon {
let icon = icon::file_icon(&path);
offset += icon.len_utf8() + 1;
format!("{icon} {path}:{line_number}:{column}:{pattern}")
} else {
format!("{path}:{line_number}:{column}:{pattern}")
};
(formatted_line, offset)
}
pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) {
let (formatted, offset) = self.grep_line_format(enable_icon);
let indices = self.match_indices(offset);
(formatted, indices)
}
#[inline]
pub fn pattern(&self) -> Cow<str> {
self.lines.text()
}
pub fn pattern_priority(&self) -> dumb_analyzer::Priority {
self.path()
.rsplit_once('.')
.and_then(|(_, file_ext)| {
dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext)
})
.unwrap_or_default()
}
/// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider.
///
/// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed.
fn jump_line_format(&self, kind: &str) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",);
let offset = kind.len()
+ path.len()
+ display_width(line_number as usize)
+ display_width(column)
+ 6; // `[r]` + 3 `:`
(formatted_line, offset)
}
pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format(kind);
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
fn jump_line_format_bare(&self) -> (String, usize) {
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_string = format!(" {line_number}:{column}:{pattern}");
let offset = display_width(line_number as usize) + display_width(column) + 2 + 2;
(formatted_string, offset)
}
pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format_bare();
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
}
const RG_ARGS: &[&str] = &[
"rg",
"--column",
"--line-number",
"--no-heading",
"--color=never",
"--smart-case",
"",
".",
];
// Ref https://github.com/liuchengxu/vim-clap/issues/533
// Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711.
pub const RG_EXEC_CMD: &str =
"rg --column --line-number --no-heading --color=never --smart-case '' .";
// Used for creating the cache in async context.
#[derive(Debug, Clone, Hash)]
pub struct RgTokioCommand {
shell_cmd: ShellCommand,
}
impl RgTokioCommand {
pub fn new(dir: PathBuf) -> Self {
let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir);
Self { shell_cmd }
}
pub fn cache_digest(&self) -> Option<Digest> {
self.shell_cmd.cache_digest()
}
pub async fn create_cache(self) -> Result<Digest> {
let cache_file = self.shell_cmd.cache_file_path()?;
let std_cmd = rg_command(&self.shell_cmd.dir);
let mut tokio_cmd = tokio::process::Command::from(std_cmd);
crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?;
let digest = crate::cache::store_cache_digest(self.shell_cmd.clone(), cache_file)?;
Ok(digest)
}
}
pub fn rg_command<P: AsRef<Path>>(dir: P) -> Command {
// Can not use StdCommand as it joins the args which does not work somehow.
let mut cmd = Command::new(RG_ARGS[0]);
// Do not use --vimgrep here.
cmd.args(&RG_ARGS[1..]).current_dir(dir);
cmd
}
pub fn refresh_cache(dir: impl AsRef<Path>) -> Result<Digest> {
let shell_cmd = rg_shell_command(dir.as_ref());
let cache_file_path = shell_cmd.cache_file_path()?;
let mut cmd = rg_command(dir.as_ref());
crate::process::write_stdout_to_file(&mut cmd, &cache_file_path)?;
let digest = crate::cache::store_cache_digest(shell_cmd, cache_file_path)?;
Ok(digest)
}
#[inline]
pub fn rg_shell_command<P: AsRef<Path>>(dir: P) -> ShellCommand | {
ShellCommand::new(RG_EXEC_CMD.into(), PathBuf::from(dir.as_ref()))
} | identifier_body | |
mod.rs | ()
.map(|exit_status| exit_status.success())
.unwrap_or(false)
});
/// Map of file extension to ripgrep language.
///
/// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs
static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
default_types::DEFAULT_TYPES
.iter()
.flat_map(|(lang, values)| {
values.iter().filter_map(|v| {
v.split('.').last().and_then(|ext| {
// Simply ignore the abnormal cases.
if ext.contains('[') || ext.contains('*') {
None
} else {
Some((ext, *lang))
}
})
})
})
.collect()
});
/// Finds the ripgrep language given the file extension `ext`.
pub fn get_language(file_extension: &str) -> Option<&&str> {
RG_LANGUAGE_EXT_TABLE.get(file_extension)
}
/// Word represents the input query around by word boundries.
#[derive(Clone, Debug)]
pub struct Word {
pub raw: String,
pub len: usize, | pub re: regex::Regex,
}
impl Word {
pub fn new(re_word: String, re: regex::Regex) -> Word {
Self {
len: re_word.len(),
raw: re_word,
re,
}
}
pub fn find(&self, line: &str) -> Option<usize> {
self.re.find(line).map(|mat| mat.start())
}
}
#[inline]
fn range(start: usize, end: usize, offset: usize) -> Range<usize> {
start + offset..end + offset
}
impl SubMatch {
pub fn match_indices(&self, offset: usize) -> Range<usize> {
range(self.start, self.end, offset)
}
// FIXME find the word in non-utf8?
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> {
// The text in SubMatch is not exactly the search word itself in some cases,
// we need to first find the offset of search word in the SubMatch text manually.
match search_word.find(&self.m.text()) {
Some(search_word_offset) => {
let start = self.start + search_word_offset;
range(start, start + search_word.len, offset)
}
None => Default::default(),
}
}
}
impl PartialEq for Match {
fn eq(&self, other: &Match) -> bool {
// Ignore the `submatches` field.
//
// Given a certain search word, if all the other fields are same, especially the
// `absolute_offset` equals, these two Match can be considered the same.
self.path == other.path
&& self.lines == other.lines
&& self.line_number == other.line_number
&& self.absolute_offset == other.absolute_offset
}
}
impl Eq for Match {}
impl Match {
pub fn path(&self) -> Cow<str> {
self.path.text()
}
pub fn line_number(&self) -> u64 {
self.line_number.unwrap_or_default()
}
pub fn column(&self) -> usize {
self.submatches.get(0).map(|x| x.start).unwrap_or_default()
}
/// Returns true if the text line starts with `pat`.
pub fn line_starts_with(&self, pat: &str) -> bool {
self.lines.text().trim_start().starts_with(pat)
}
pub fn match_indices(&self, offset: usize) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices(offset))
.collect()
}
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word))
.collect()
}
}
impl TryFrom<&[u8]> for Match {
type Error = Cow<'static, str>;
fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> {
let msg = serde_json::from_slice::<Message>(byte_line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl TryFrom<&str> for Match {
type Error = Cow<'static, str>;
fn try_from(line: &str) -> Result<Self, Self::Error> {
let msg = serde_json::from_str::<Message>(line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl Match {
/// Returns a pair of the formatted `String` and the offset of origin match indices.
///
/// The formatted String is same with the output line using rg's -vimgrep option.
fn grep_line_format(&self, enable_icon: bool) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
// filepath:line_number:column:text, 3 extra `:` in the formatted String.
let mut offset =
path.len() + display_width(line_number as usize) + display_width(column) + 3;
let formatted_line = if enable_icon {
let icon = icon::file_icon(&path);
offset += icon.len_utf8() + 1;
format!("{icon} {path}:{line_number}:{column}:{pattern}")
} else {
format!("{path}:{line_number}:{column}:{pattern}")
};
(formatted_line, offset)
}
pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) {
let (formatted, offset) = self.grep_line_format(enable_icon);
let indices = self.match_indices(offset);
(formatted, indices)
}
#[inline]
pub fn pattern(&self) -> Cow<str> {
self.lines.text()
}
pub fn pattern_priority(&self) -> dumb_analyzer::Priority {
self.path()
.rsplit_once('.')
.and_then(|(_, file_ext)| {
dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext)
})
.unwrap_or_default()
}
/// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider.
///
/// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed.
fn jump_line_format(&self, kind: &str) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",);
let offset = kind.len()
+ path.len()
+ display_width(line_number as usize)
+ display_width(column)
+ 6; // `[r]` + 3 `:`
(formatted_line, offset)
}
pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format(kind);
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
fn jump_line_format_bare(&self) -> (String, usize) {
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_string = format!(" {line_number}:{column}:{pattern}");
let offset = display_width(line_number as usize) + display_width(column) + 2 + 2;
(formatted_string, offset)
}
pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format_bare();
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
}
const RG_ARGS: &[&str] = &[
"rg",
"--column",
"--line-number",
"--no-heading",
"--color=never",
"--smart-case",
"",
".",
];
// Ref https://github.com/liuchengxu/vim-clap/issues/533
// Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711.
pub const RG_EXEC_CMD: &str =
"rg --column --line-number --no-heading --color=never --smart-case '' .";
// Used for creating the cache in async context.
#[derive(Debug, Clone, Hash)]
pub struct RgTokioCommand {
shell_cmd: ShellCommand,
}
impl RgTokioCommand {
pub fn new(dir: PathBuf) -> Self {
let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir);
Self | random_line_split | |
mod.rs | ()
.map(|exit_status| exit_status.success())
.unwrap_or(false)
});
/// Map of file extension to ripgrep language.
///
/// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs
static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
default_types::DEFAULT_TYPES
.iter()
.flat_map(|(lang, values)| {
values.iter().filter_map(|v| {
v.split('.').last().and_then(|ext| {
// Simply ignore the abnormal cases.
if ext.contains('[') || ext.contains('*') {
None
} else {
Some((ext, *lang))
}
})
})
})
.collect()
});
/// Finds the ripgrep language given the file extension `ext`.
pub fn get_language(file_extension: &str) -> Option<&&str> {
RG_LANGUAGE_EXT_TABLE.get(file_extension)
}
/// Word represents the input query around by word boundries.
#[derive(Clone, Debug)]
pub struct Word {
pub raw: String,
pub len: usize,
pub re: regex::Regex,
}
impl Word {
pub fn new(re_word: String, re: regex::Regex) -> Word {
Self {
len: re_word.len(),
raw: re_word,
re,
}
}
pub fn find(&self, line: &str) -> Option<usize> {
self.re.find(line).map(|mat| mat.start())
}
}
#[inline]
fn range(start: usize, end: usize, offset: usize) -> Range<usize> {
start + offset..end + offset
}
impl SubMatch {
pub fn match_indices(&self, offset: usize) -> Range<usize> {
range(self.start, self.end, offset)
}
// FIXME find the word in non-utf8?
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> {
// The text in SubMatch is not exactly the search word itself in some cases,
// we need to first find the offset of search word in the SubMatch text manually.
match search_word.find(&self.m.text()) {
Some(search_word_offset) => {
let start = self.start + search_word_offset;
range(start, start + search_word.len, offset)
}
None => Default::default(),
}
}
}
impl PartialEq for Match {
fn eq(&self, other: &Match) -> bool {
// Ignore the `submatches` field.
//
// Given a certain search word, if all the other fields are same, especially the
// `absolute_offset` equals, these two Match can be considered the same.
self.path == other.path
&& self.lines == other.lines
&& self.line_number == other.line_number
&& self.absolute_offset == other.absolute_offset
}
}
impl Eq for Match {}
impl Match {
pub fn path(&self) -> Cow<str> {
self.path.text()
}
pub fn line_number(&self) -> u64 {
self.line_number.unwrap_or_default()
}
pub fn column(&self) -> usize {
self.submatches.get(0).map(|x| x.start).unwrap_or_default()
}
/// Returns true if the text line starts with `pat`.
pub fn line_starts_with(&self, pat: &str) -> bool {
self.lines.text().trim_start().starts_with(pat)
}
pub fn match_indices(&self, offset: usize) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices(offset))
.collect()
}
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word))
.collect()
}
}
impl TryFrom<&[u8]> for Match {
type Error = Cow<'static, str>;
fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> {
let msg = serde_json::from_slice::<Message>(byte_line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl TryFrom<&str> for Match {
type Error = Cow<'static, str>;
fn try_from(line: &str) -> Result<Self, Self::Error> {
let msg = serde_json::from_str::<Message>(line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else |
}
}
impl Match {
/// Returns a pair of the formatted `String` and the offset of origin match indices.
///
/// The formatted String is same with the output line using rg's -vimgrep option.
fn grep_line_format(&self, enable_icon: bool) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
// filepath:line_number:column:text, 3 extra `:` in the formatted String.
let mut offset =
path.len() + display_width(line_number as usize) + display_width(column) + 3;
let formatted_line = if enable_icon {
let icon = icon::file_icon(&path);
offset += icon.len_utf8() + 1;
format!("{icon} {path}:{line_number}:{column}:{pattern}")
} else {
format!("{path}:{line_number}:{column}:{pattern}")
};
(formatted_line, offset)
}
pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) {
let (formatted, offset) = self.grep_line_format(enable_icon);
let indices = self.match_indices(offset);
(formatted, indices)
}
#[inline]
pub fn pattern(&self) -> Cow<str> {
self.lines.text()
}
pub fn pattern_priority(&self) -> dumb_analyzer::Priority {
self.path()
.rsplit_once('.')
.and_then(|(_, file_ext)| {
dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext)
})
.unwrap_or_default()
}
/// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider.
///
/// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed.
fn jump_line_format(&self, kind: &str) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",);
let offset = kind.len()
+ path.len()
+ display_width(line_number as usize)
+ display_width(column)
+ 6; // `[r]` + 3 `:`
(formatted_line, offset)
}
pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format(kind);
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
fn jump_line_format_bare(&self) -> (String, usize) {
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_string = format!(" {line_number}:{column}:{pattern}");
let offset = display_width(line_number as usize) + display_width(column) + 2 + 2;
(formatted_string, offset)
}
pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format_bare();
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
}
const RG_ARGS: &[&str] = &[
"rg",
"--column",
"--line-number",
"--no-heading",
"--color=never",
"--smart-case",
"",
".",
];
// Ref https://github.com/liuchengxu/vim-clap/issues/533
// Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711.
pub const RG_EXEC_CMD: &str =
"rg --column --line-number --no-heading --color=never --smart-case '' .";
// Used for creating the cache in async context.
#[derive(Debug, Clone, Hash)]
pub struct RgTokioCommand {
shell_cmd: ShellCommand,
}
impl RgTokioCommand {
pub fn new(dir: PathBuf) -> Self {
let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir);
| {
Err("Not Message::Match type".into())
} | conditional_block |
mod.rs | ()
.map(|exit_status| exit_status.success())
.unwrap_or(false)
});
/// Map of file extension to ripgrep language.
///
/// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs
static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
default_types::DEFAULT_TYPES
.iter()
.flat_map(|(lang, values)| {
values.iter().filter_map(|v| {
v.split('.').last().and_then(|ext| {
// Simply ignore the abnormal cases.
if ext.contains('[') || ext.contains('*') {
None
} else {
Some((ext, *lang))
}
})
})
})
.collect()
});
/// Finds the ripgrep language given the file extension `ext`.
pub fn get_language(file_extension: &str) -> Option<&&str> {
RG_LANGUAGE_EXT_TABLE.get(file_extension)
}
/// Word represents the input query around by word boundries.
#[derive(Clone, Debug)]
pub struct Word {
pub raw: String,
pub len: usize,
pub re: regex::Regex,
}
impl Word {
pub fn new(re_word: String, re: regex::Regex) -> Word {
Self {
len: re_word.len(),
raw: re_word,
re,
}
}
pub fn find(&self, line: &str) -> Option<usize> {
self.re.find(line).map(|mat| mat.start())
}
}
#[inline]
fn range(start: usize, end: usize, offset: usize) -> Range<usize> {
start + offset..end + offset
}
impl SubMatch {
pub fn match_indices(&self, offset: usize) -> Range<usize> {
range(self.start, self.end, offset)
}
// FIXME find the word in non-utf8?
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> {
// The text in SubMatch is not exactly the search word itself in some cases,
// we need to first find the offset of search word in the SubMatch text manually.
match search_word.find(&self.m.text()) {
Some(search_word_offset) => {
let start = self.start + search_word_offset;
range(start, start + search_word.len, offset)
}
None => Default::default(),
}
}
}
impl PartialEq for Match {
fn eq(&self, other: &Match) -> bool {
// Ignore the `submatches` field.
//
// Given a certain search word, if all the other fields are same, especially the
// `absolute_offset` equals, these two Match can be considered the same.
self.path == other.path
&& self.lines == other.lines
&& self.line_number == other.line_number
&& self.absolute_offset == other.absolute_offset
}
}
impl Eq for Match {}
impl Match {
pub fn path(&self) -> Cow<str> {
self.path.text()
}
pub fn line_number(&self) -> u64 {
self.line_number.unwrap_or_default()
}
pub fn column(&self) -> usize {
self.submatches.get(0).map(|x| x.start).unwrap_or_default()
}
/// Returns true if the text line starts with `pat`.
pub fn line_starts_with(&self, pat: &str) -> bool {
self.lines.text().trim_start().starts_with(pat)
}
pub fn match_indices(&self, offset: usize) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices(offset))
.collect()
}
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word))
.collect()
}
}
impl TryFrom<&[u8]> for Match {
type Error = Cow<'static, str>;
fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> {
let msg = serde_json::from_slice::<Message>(byte_line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl TryFrom<&str> for Match {
type Error = Cow<'static, str>;
fn try_from(line: &str) -> Result<Self, Self::Error> {
let msg = serde_json::from_str::<Message>(line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl Match {
/// Returns a pair of the formatted `String` and the offset of origin match indices.
///
/// The formatted String is same with the output line using rg's -vimgrep option.
fn grep_line_format(&self, enable_icon: bool) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
// filepath:line_number:column:text, 3 extra `:` in the formatted String.
let mut offset =
path.len() + display_width(line_number as usize) + display_width(column) + 3;
let formatted_line = if enable_icon {
let icon = icon::file_icon(&path);
offset += icon.len_utf8() + 1;
format!("{icon} {path}:{line_number}:{column}:{pattern}")
} else {
format!("{path}:{line_number}:{column}:{pattern}")
};
(formatted_line, offset)
}
pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) {
let (formatted, offset) = self.grep_line_format(enable_icon);
let indices = self.match_indices(offset);
(formatted, indices)
}
#[inline]
pub fn pattern(&self) -> Cow<str> {
self.lines.text()
}
pub fn | (&self) -> dumb_analyzer::Priority {
self.path()
.rsplit_once('.')
.and_then(|(_, file_ext)| {
dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext)
})
.unwrap_or_default()
}
/// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider.
///
/// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed.
fn jump_line_format(&self, kind: &str) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",);
let offset = kind.len()
+ path.len()
+ display_width(line_number as usize)
+ display_width(column)
+ 6; // `[r]` + 3 `:`
(formatted_line, offset)
}
pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format(kind);
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
fn jump_line_format_bare(&self) -> (String, usize) {
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_string = format!(" {line_number}:{column}:{pattern}");
let offset = display_width(line_number as usize) + display_width(column) + 2 + 2;
(formatted_string, offset)
}
pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format_bare();
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
}
const RG_ARGS: &[&str] = &[
"rg",
"--column",
"--line-number",
"--no-heading",
"--color=never",
"--smart-case",
"",
".",
];
// Ref https://github.com/liuchengxu/vim-clap/issues/533
// Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711.
pub const RG_EXEC_CMD: &str =
"rg --column --line-number --no-heading --color=never --smart-case '' .";
// Used for creating the cache in async context.
#[derive(Debug, Clone, Hash)]
pub struct RgTokioCommand {
shell_cmd: ShellCommand,
}
impl RgTokioCommand {
pub fn new(dir: PathBuf) -> Self {
let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir);
| pattern_priority | identifier_name |
tls_accept.rs | fn plaintext() {
let (client_result, server_result) = run_test(
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|conn| write_then_read(conn, PING),
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), false);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_works() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
let client_tls = test_util::BAR_NS1.validate().unwrap();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, server_tls.tls_server_name())),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), true);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), true);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
// Misuse the client's identity instead of the server's identity. Any
// identity other than `server_tls.server_identity` would work.
let client_tls = test_util::BAR_NS1.validate().expect("valid client cert");
let client_target = test_util::BAR_NS1.crt().name().clone();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, client_target)),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, START_OF_TLS.len(), PONG),
);
// The server's connection will succeed with the TLS client hello passed
// through, because the SNI doesn't match its identity.
assert_eq!(client_result.is_tls(), false);
assert!(client_result.result.is_err());
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.unwrap()[..], START_OF_TLS);
}
struct Transported<R> {
/// The value of `Connection::peer_identity()` for the established connection.
///
/// This will be `None` if we never even get a `Connection`.
peer_identity: Option<tls::PeerIdentity>,
/// The connection's result.
result: Result<R, io::Error>,
}
impl<R> Transported<R> {
fn is_tls(&self) -> bool {
self.peer_identity
.as_ref()
.map(|i| i.is_some())
.unwrap_or(false)
}
}
/// Runs a test for a single TCP connection. `client` processes the connection
/// on the client side and `server` processes the connection on the server
/// side.
fn run_test<C, CF, CR, S, SF, SR>(
client_tls: tls::Conditional<(CrtKey, Name)>,
client: C,
server_tls: tls::Conditional<CrtKey>,
server: S,
) -> (Transported<CR>, Transported<SR>)
where
// Client
C: FnOnce(ClientConnection) -> CF + Clone + Send + 'static,
CF: Future<Item = CR, Error = io::Error> + Send + 'static,
CR: Send + 'static,
// Server
S: Fn(ServerConnection) -> SF + Clone + Send + 'static,
SF: Future<Item = SR, Error = io::Error> + Send + 'static,
SR: Send + 'static,
{
{
use tracing_subscriber::{fmt, EnvFilter};
let sub = fmt::Subscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.finish();
let _ = tracing::subscriber::set_global_default(sub);
}
let (client_tls, client_target_name) = match client_tls {
Conditional::Some((crtkey, name)) => (
Conditional::Some(ClientTls(crtkey)),
Conditional::Some(name),
),
Conditional::None(reason) => (Conditional::None(reason.clone()), Conditional::None(reason)),
};
// A future that will receive a single connection.
let (server, server_addr, server_result) = {
// Saves the result of every connection.
let (sender, receiver) = mpsc::channel::<Transported<SR>>();
// Let the OS decide the port number and then return the resulting
// `SocketAddr` so the client can connect to it. This allows multiple
// tests to run at once, which wouldn't work if they all were bound on
// a fixed port.
let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let listen = Bind::new(addr, None).bind().expect("must bind");
let listen_addr = listen.listen_addr();
let sender = service_fn(move |(meta, conn): ServerConnection| {
let sender = sender.clone();
let peer_identity = Some(meta.peer_identity.clone());
let server = Box::new(server((meta, conn)).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
future::ok::<(), Never>(())
}));
Box::new(future::ok::<_, Never>(server))
});
let accept = AcceptTls::new(server_tls, sender);
let server = Server::Init { listen, accept };
(server, listen_addr, receiver)
};
// A future that will open a single connection to the server.
let (client, client_result) = {
// Saves the result of the single connection. This could be a simpler
// type, e.g. `Arc<Mutex>`, but using a channel simplifies the code and
// parallels the server side.
let (sender, receiver) = mpsc::channel::<Transported<CR>>();
let sender_clone = sender.clone();
let peer_identity = Some(client_target_name.clone());
let client = tls::ConnectLayer::new(client_tls)
.layer(connect::Connect::new(None))
.oneshot(Target(server_addr, client_target_name))
.map_err(move |e| {
sender_clone
.send(Transported {
peer_identity: None,
result: Err(e),
})
.expect("send result");
()
})
.and_then(move |conn| {
client(conn).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
Ok(())
})
});
(client, receiver)
};
tokio::run(server.join(client).map(|_| ()));
let client_result = client_result.try_recv().expect("client complete");
// XXX: This assumes that only one connection is accepted. TODO: allow the
// caller to observe the results for every connection, once we have tests
// that allow accepting multiple connections.
let server_result = server_result.try_recv().expect("server complete");
(client_result, server_result)
}
/// Writes `to_write` and shuts down the write side, then reads until EOF,
/// returning the bytes read.
fn write_then_read(
conn: impl AsyncRead + AsyncWrite,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
write_and_shutdown(conn, to_write)
.and_then(|conn| io::read_to_end(conn, Vec::new()))
.map(|(_conn, r)| r)
}
/// Reads until EOF then writes `to_write` and shuts down the write side,
/// returning the bytes read.
fn read_then_write(
conn: impl AsyncRead + AsyncWrite,
read_prefix_len: usize,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
io::read_exact(conn, vec![0; read_prefix_len])
.and_then(move |(conn, r)| write_and_shutdown(conn, to_write).map(|_conn| r))
}
/// writes `to_write` to `conn` and then shuts down the write side of `conn`.
fn write_and_shutdown<T: AsyncRead + AsyncWrite>(
conn: T,
to_write: &'static [u8],
) -> impl Future<Item = T, Error = io::Error> {
io::write_all(conn, to_write).and_then(|(mut conn, _)| {
conn.shutdown()?;
Ok(conn)
})
}
const PING: &[u8] = b"ping";
const PONG: &[u8] = b"pong";
const START_OF_TLS: &[u8] = &[22, 3, 1]; // ContentType::handshake version 3.1
enum Server<A: Accept<ServerConnection>>
where | AcceptTls<A, CrtKey>: Accept<<Listen as CoreListen>::Connection>,
{
Init {
listen: Listen,
accept: AcceptTls<A, CrtKey>, | random_line_split | |
tls_accept.rs | plaintext() {
let (client_result, server_result) = run_test(
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|conn| write_then_read(conn, PING),
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), false);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_works() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
let client_tls = test_util::BAR_NS1.validate().unwrap();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, server_tls.tls_server_name())),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), true);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), true);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
// Misuse the client's identity instead of the server's identity. Any
// identity other than `server_tls.server_identity` would work.
let client_tls = test_util::BAR_NS1.validate().expect("valid client cert");
let client_target = test_util::BAR_NS1.crt().name().clone();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, client_target)),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, START_OF_TLS.len(), PONG),
);
// The server's connection will succeed with the TLS client hello passed
// through, because the SNI doesn't match its identity.
assert_eq!(client_result.is_tls(), false);
assert!(client_result.result.is_err());
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.unwrap()[..], START_OF_TLS);
}
struct Transported<R> {
/// The value of `Connection::peer_identity()` for the established connection.
///
/// This will be `None` if we never even get a `Connection`.
peer_identity: Option<tls::PeerIdentity>,
/// The connection's result.
result: Result<R, io::Error>,
}
impl<R> Transported<R> {
fn is_tls(&self) -> bool |
}
/// Runs a test for a single TCP connection. `client` processes the connection
/// on the client side and `server` processes the connection on the server
/// side.
fn run_test<C, CF, CR, S, SF, SR>(
client_tls: tls::Conditional<(CrtKey, Name)>,
client: C,
server_tls: tls::Conditional<CrtKey>,
server: S,
) -> (Transported<CR>, Transported<SR>)
where
// Client
C: FnOnce(ClientConnection) -> CF + Clone + Send + 'static,
CF: Future<Item = CR, Error = io::Error> + Send + 'static,
CR: Send + 'static,
// Server
S: Fn(ServerConnection) -> SF + Clone + Send + 'static,
SF: Future<Item = SR, Error = io::Error> + Send + 'static,
SR: Send + 'static,
{
{
use tracing_subscriber::{fmt, EnvFilter};
let sub = fmt::Subscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.finish();
let _ = tracing::subscriber::set_global_default(sub);
}
let (client_tls, client_target_name) = match client_tls {
Conditional::Some((crtkey, name)) => (
Conditional::Some(ClientTls(crtkey)),
Conditional::Some(name),
),
Conditional::None(reason) => (Conditional::None(reason.clone()), Conditional::None(reason)),
};
// A future that will receive a single connection.
let (server, server_addr, server_result) = {
// Saves the result of every connection.
let (sender, receiver) = mpsc::channel::<Transported<SR>>();
// Let the OS decide the port number and then return the resulting
// `SocketAddr` so the client can connect to it. This allows multiple
// tests to run at once, which wouldn't work if they all were bound on
// a fixed port.
let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let listen = Bind::new(addr, None).bind().expect("must bind");
let listen_addr = listen.listen_addr();
let sender = service_fn(move |(meta, conn): ServerConnection| {
let sender = sender.clone();
let peer_identity = Some(meta.peer_identity.clone());
let server = Box::new(server((meta, conn)).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
future::ok::<(), Never>(())
}));
Box::new(future::ok::<_, Never>(server))
});
let accept = AcceptTls::new(server_tls, sender);
let server = Server::Init { listen, accept };
(server, listen_addr, receiver)
};
// A future that will open a single connection to the server.
let (client, client_result) = {
// Saves the result of the single connection. This could be a simpler
// type, e.g. `Arc<Mutex>`, but using a channel simplifies the code and
// parallels the server side.
let (sender, receiver) = mpsc::channel::<Transported<CR>>();
let sender_clone = sender.clone();
let peer_identity = Some(client_target_name.clone());
let client = tls::ConnectLayer::new(client_tls)
.layer(connect::Connect::new(None))
.oneshot(Target(server_addr, client_target_name))
.map_err(move |e| {
sender_clone
.send(Transported {
peer_identity: None,
result: Err(e),
})
.expect("send result");
()
})
.and_then(move |conn| {
client(conn).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
Ok(())
})
});
(client, receiver)
};
tokio::run(server.join(client).map(|_| ()));
let client_result = client_result.try_recv().expect("client complete");
// XXX: This assumes that only one connection is accepted. TODO: allow the
// caller to observe the results for every connection, once we have tests
// that allow accepting multiple connections.
let server_result = server_result.try_recv().expect("server complete");
(client_result, server_result)
}
/// Writes `to_write` and shuts down the write side, then reads until EOF,
/// returning the bytes read.
fn write_then_read(
conn: impl AsyncRead + AsyncWrite,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
write_and_shutdown(conn, to_write)
.and_then(|conn| io::read_to_end(conn, Vec::new()))
.map(|(_conn, r)| r)
}
/// Reads until EOF then writes `to_write` and shuts down the write side,
/// returning the bytes read.
fn read_then_write(
conn: impl AsyncRead + AsyncWrite,
read_prefix_len: usize,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
io::read_exact(conn, vec![0; read_prefix_len])
.and_then(move |(conn, r)| write_and_shutdown(conn, to_write).map(|_conn| r))
}
/// writes `to_write` to `conn` and then shuts down the write side of `conn`.
fn write_and_shutdown<T: AsyncRead + AsyncWrite>(
conn: T,
to_write: &'static [u8],
) -> impl Future<Item = T, Error = io::Error> {
io::write_all(conn, to_write).and_then(|(mut conn, _)| {
conn.shutdown()?;
Ok(conn)
})
}
const PING: &[u8] = b"ping";
const PONG: &[u8] = b"pong";
const START_OF_TLS: &[u8] = &[22, 3, 1]; // ContentType::handshake version 3.1
enum Server<A: Accept<ServerConnection>>
where
AcceptTls<A, CrtKey>: Accept<<Listen as CoreListen>::Connection>,
{
Init {
listen: Listen,
accept: AcceptTls<A, Crt | {
self.peer_identity
.as_ref()
.map(|i| i.is_some())
.unwrap_or(false)
} | identifier_body |
tls_accept.rs | assert_eq!(client_result.is_tls(), false);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_works() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
let client_tls = test_util::BAR_NS1.validate().unwrap();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, server_tls.tls_server_name())),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), true);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), true);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
// Misuse the client's identity instead of the server's identity. Any
// identity other than `server_tls.server_identity` would work.
let client_tls = test_util::BAR_NS1.validate().expect("valid client cert");
let client_target = test_util::BAR_NS1.crt().name().clone();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, client_target)),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, START_OF_TLS.len(), PONG),
);
// The server's connection will succeed with the TLS client hello passed
// through, because the SNI doesn't match its identity.
assert_eq!(client_result.is_tls(), false);
assert!(client_result.result.is_err());
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.unwrap()[..], START_OF_TLS);
}
struct Transported<R> {
/// The value of `Connection::peer_identity()` for the established connection.
///
/// This will be `None` if we never even get a `Connection`.
peer_identity: Option<tls::PeerIdentity>,
/// The connection's result.
result: Result<R, io::Error>,
}
impl<R> Transported<R> {
fn is_tls(&self) -> bool {
self.peer_identity
.as_ref()
.map(|i| i.is_some())
.unwrap_or(false)
}
}
/// Runs a test for a single TCP connection. `client` processes the connection
/// on the client side and `server` processes the connection on the server
/// side.
fn run_test<C, CF, CR, S, SF, SR>(
client_tls: tls::Conditional<(CrtKey, Name)>,
client: C,
server_tls: tls::Conditional<CrtKey>,
server: S,
) -> (Transported<CR>, Transported<SR>)
where
// Client
C: FnOnce(ClientConnection) -> CF + Clone + Send + 'static,
CF: Future<Item = CR, Error = io::Error> + Send + 'static,
CR: Send + 'static,
// Server
S: Fn(ServerConnection) -> SF + Clone + Send + 'static,
SF: Future<Item = SR, Error = io::Error> + Send + 'static,
SR: Send + 'static,
{
{
use tracing_subscriber::{fmt, EnvFilter};
let sub = fmt::Subscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.finish();
let _ = tracing::subscriber::set_global_default(sub);
}
let (client_tls, client_target_name) = match client_tls {
Conditional::Some((crtkey, name)) => (
Conditional::Some(ClientTls(crtkey)),
Conditional::Some(name),
),
Conditional::None(reason) => (Conditional::None(reason.clone()), Conditional::None(reason)),
};
// A future that will receive a single connection.
let (server, server_addr, server_result) = {
// Saves the result of every connection.
let (sender, receiver) = mpsc::channel::<Transported<SR>>();
// Let the OS decide the port number and then return the resulting
// `SocketAddr` so the client can connect to it. This allows multiple
// tests to run at once, which wouldn't work if they all were bound on
// a fixed port.
let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let listen = Bind::new(addr, None).bind().expect("must bind");
let listen_addr = listen.listen_addr();
let sender = service_fn(move |(meta, conn): ServerConnection| {
let sender = sender.clone();
let peer_identity = Some(meta.peer_identity.clone());
let server = Box::new(server((meta, conn)).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
future::ok::<(), Never>(())
}));
Box::new(future::ok::<_, Never>(server))
});
let accept = AcceptTls::new(server_tls, sender);
let server = Server::Init { listen, accept };
(server, listen_addr, receiver)
};
// A future that will open a single connection to the server.
let (client, client_result) = {
// Saves the result of the single connection. This could be a simpler
// type, e.g. `Arc<Mutex>`, but using a channel simplifies the code and
// parallels the server side.
let (sender, receiver) = mpsc::channel::<Transported<CR>>();
let sender_clone = sender.clone();
let peer_identity = Some(client_target_name.clone());
let client = tls::ConnectLayer::new(client_tls)
.layer(connect::Connect::new(None))
.oneshot(Target(server_addr, client_target_name))
.map_err(move |e| {
sender_clone
.send(Transported {
peer_identity: None,
result: Err(e),
})
.expect("send result");
()
})
.and_then(move |conn| {
client(conn).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
Ok(())
})
});
(client, receiver)
};
tokio::run(server.join(client).map(|_| ()));
let client_result = client_result.try_recv().expect("client complete");
// XXX: This assumes that only one connection is accepted. TODO: allow the
// caller to observe the results for every connection, once we have tests
// that allow accepting multiple connections.
let server_result = server_result.try_recv().expect("server complete");
(client_result, server_result)
}
/// Writes `to_write` and shuts down the write side, then reads until EOF,
/// returning the bytes read.
fn write_then_read(
conn: impl AsyncRead + AsyncWrite,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
write_and_shutdown(conn, to_write)
.and_then(|conn| io::read_to_end(conn, Vec::new()))
.map(|(_conn, r)| r)
}
/// Reads until EOF then writes `to_write` and shuts down the write side,
/// returning the bytes read.
fn read_then_write(
conn: impl AsyncRead + AsyncWrite,
read_prefix_len: usize,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
io::read_exact(conn, vec![0; read_prefix_len])
.and_then(move |(conn, r)| write_and_shutdown(conn, to_write).map(|_conn| r))
}
/// writes `to_write` to `conn` and then shuts down the write side of `conn`.
fn write_and_shutdown<T: AsyncRead + AsyncWrite>(
conn: T,
to_write: &'static [u8],
) -> impl Future<Item = T, Error = io::Error> {
io::write_all(conn, to_write).and_then(|(mut conn, _)| {
conn.shutdown()?;
Ok(conn)
})
}
const PING: &[u8] = b"ping";
const PONG: &[u8] = b"pong";
const START_OF_TLS: &[u8] = &[22, 3, 1]; // ContentType::handshake version 3.1
enum Server<A: Accept<ServerConnection>>
where
AcceptTls<A, CrtKey>: Accept<<Listen as CoreListen>::Connection>,
{
Init {
listen: Listen,
accept: AcceptTls<A, CrtKey>,
},
Accepting(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::Future),
Serving(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::ConnectionFuture),
}
#[derive(Clone)]
struct Target(SocketAddr, Conditional<Name>);
#[derive(Clone)]
struct | ClientTls | identifier_name | |
AutomobilesOnSale.py | df_auto[df_auto.monthOfRegistration != 12]
# Univariate Analysis of : Sellers
sns.barplot(df_auto.seller.value_counts().index, df_auto.seller.value_counts().values, alpha=0.9)
plt.xlabel('Sellers')
plt.ylabel('Count')
plt.title('Distribution Of Car Sellers');
# As almost all of the Sellers are from private we can drop this feature
df_auto = df_auto.drop(['seller'], axis=1)
# Univariate Analysis of : Offer Type
sns.barplot(df_auto.offerType.value_counts().index, df_auto.offerType.value_counts().values, alpha=0.9)
plt.xlabel('Offer Type')
plt.ylabel('Count')
plt.title('Distribution Of Car Offers');
# As almost all of the Offers are from Angebot we can drop this feature
df_auto = df_auto.drop(['offerType'], axis=1)
print('Number of observation where price is 0 : ', df_auto[df_auto.price == 0]['price'].count())
# Number of observation where price is > 200000
df_auto[df_auto.price > 200000]['price'].count()
# Number of observation where price is < 200
df_auto[df_auto.price < 200]['price'].count()
# Considering outlier, selecting observations in between $200 & $200000
df_auto = df_auto[(df_auto.price > 200) & (df_auto.price < 200000)]
# Distribution of Price
sns.distplot(df_auto.price)
plt.xlabel("Price")
plt.ylabel('Frequency')
plt.title("Distribution of Car's Price");
# Logarithm of Price Distribution
sns.distplot(np.log(df_auto.price))
plt.xlabel("Logarithm of Car's Price")
plt.ylabel('Frequency')
plt.title("Distribution Log of Car's Price");
# Univariate Analysis of : AB Testing
sns.barplot(df_auto.abtest.value_counts().index, df_auto.abtest.value_counts().values, alpha=0.9)
plt.xlabel('Type of Testing')
plt.ylabel('Count')
plt.title('Distribution Of Car Testing');
# Univariate Analysis of : Vehicle Type
plt.figure(figsize=(12,6))
sns.barplot(df_auto.vehicleType.value_counts().index, df_auto.vehicleType.value_counts().values, alpha=0.9)
plt.xlabel('Type of Vehicle')
plt.ylabel('Count')
plt.title('Distribution Of Vehicle Types');
# Univariate Analysis of : Gear Type
sns.barplot(df_auto.gearbox.value_counts().index, df_auto.gearbox.value_counts().values, alpha=0.9)
plt.xlabel('Type of Gears')
plt.ylabel('Count')
plt.title('Distribution Of Types of Gears');
print('No of PowerPS is having value of 0 : ', df_auto[df_auto.powerPS == 0]['powerPS'].count())
print('No of PowerPS is having value of more than 662 is : ', df_auto[df_auto.powerPS > 662]['powerPS'].count())
# Removng cars having HP of 662 as the latest technology doesn't have HP > 662
# Removing observations having HP of 0 - as its meaningless
df_auto = df_auto[(df_auto.powerPS > 0) & (df_auto.powerPS < 663)]
# Distribution of Top 10 Horse Powered car sold
plt.figure(figsize=(16,6))
sns.lineplot(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index,
df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].values)
plt.xticks(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index)
plt.xlabel('Horse Power')
plt.ylabel('No. of Car Sold With Available Horse Power')
plt.title('Top 10 Car Sold with Horse Power Variation');
# Distribution of Top 10 car's moel sold
sns.lineplot(df_auto.model.value_counts()[:10].index, df_auto.model.value_counts()[:10].values)
plt.xticks(df_auto.model.value_counts()[:10].index)
plt.xlabel('Cars Model')
plt.ylabel('Frequency')
plt.title('Top 10 Cars Model Sold');
# Ditribution of Mesurement of KM a car ran before coming for sale
plt.figure(figsize=(12,6))
sns.distplot(df_auto.kilometer)
plt.xlabel("KM's Car Ran")
plt.ylabel('Frequency')
plt.title('Car was Driven in KM');
# No. of car registerd in a month for sale
plt.figure(figsize=(12,6))
sns.lineplot(df_auto.monthOfRegistration.value_counts().index, df_auto.monthOfRegistration.value_counts().values)
plt.xticks(df_auto.monthOfRegistration.value_counts().index.sort_values(),
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
plt.xlabel("Month Of Registration")
plt.ylabel('Frequency')
plt.title('No. Of Cars Sold In Month');
# Univariate Analysis of : fuel Type
plt.figure(figsize=(12,6))
sns.barplot(df_auto.fuelType.value_counts().index, df_auto.fuelType.value_counts().values, alpha=0.9)
plt.xlabel('Types of Fuel')
plt.ylabel('Frequency')
plt.title('Distribution Of Car with Types of Fuel');
# Univariate Analysis of : Top 10 Car's Brand
plt.figure(figsize=(12,6))
sns.barplot(df_auto.brand.value_counts()[:10].index, df_auto.brand.value_counts()[:10].values, alpha=0.9)
plt.xlabel("Car's Brand")
plt.ylabel('Frequency')
plt.title("Top 10 Car's Brand Sold");
# Univariate Analysis of : Car was Repaired: yes/no before sale
sns.barplot(df_auto.notRepairedDamage.value_counts().index, df_auto.notRepairedDamage.value_counts().values, alpha=0.9)
plt.xlabel('Repaired Post Damage')
plt.ylabel('Frequency')
plt.title('Distribution Of Car Not Repaired Damaged');
# Investigating overall structure of feature : yearOfRegistration
df_auto.yearOfRegistration.describe()
# Observation which is older than 1989
df_auto[df_auto.yearOfRegistration < 1989]['yearOfRegistration'].count()
# Observation which is more than 2019
df_auto[df_auto.yearOfRegistration > 2019]['yearOfRegistration'].count()
# Taking into considearion which is in the year of between 1989 & 2019
df_auto = df_auto[(df_auto.yearOfRegistration >= 1989) & (df_auto.yearOfRegistration <= 2019)]
# No of car was registered for sale throughout the year | plt.xlabel('Years of Registration')
plt.ylabel('Price')
plt.title('Variation Of Price with Year');
# No of days it took to sold while purchasing from E-bay
days = []
for time1, time2 in zip(df_auto['dateCrawled'], df_auto['lastSeen']):
time = datetime.strptime(time2, '%Y-%m-%d %H:%M:%S') - datetime.strptime(time1, '%Y-%m-%d %H:%M:%S')
days.append(time.days)
df_auto['Sold_In_Days'] = days
# Investigating the feature : Sold_In_Days
df_auto.Sold_In_Days.describe()
# Removing the observations having negative values as it doesn't make any sense
df_auto = df_auto[df_auto.Sold_In_Days >= 0]
# Distribution of no. of cars sold in days
plt.figure(figsize=(12,6))
sns.barplot(df_auto.Sold_In_Days.value_counts().index, df_auto.Sold_In_Days.value_counts().values, alpha=0.9)
plt.xlabel('Sold In Days')
plt.ylabel('Frequency')
plt.title('No. Of Cars Sold in Days');
# Dropping the below mentioned features as they are unnecesary now while building models
# All the postal code is from Germany only
df_auto = df_auto.drop(['dateCrawled', 'lastSeen', 'dateCreated', 'nrOfPictures', 'model', 'abtest', 'postalCode'], axis=1)
# Corelation matrix with Heatmap annotation
sns.heatmap(df_auto.corr(), annot=True);
# Function to get the Chi Square value & P value
def chi_p_value(cat1, cat2):
table = pd.crosstab(df_auto[cat1], df_auto[cat2])
chi2, p, dof, expected = chi2_contingency(table.values)
if p < 0.05:
print("Chi Square Statistics and p value of {} and {} is {}, {}".format(cat1, cat2, chi2, p))
# Extracting Chi Square value & p value
for i in range(len(df_auto.select_dtypes(include=['object']).columns)):
for cat2 in df_auto.select_dtypes(include=['object']).columns[df_auto.select_dtypes(include=['object']).columns !=
df_auto.select_dtypes(include=['object']).columns[i]]:
chi_p_value(df_auto.select_dtypes(include=['object']).columns[i], cat2)
# Taking into consideration of Sold_In_Dyas which is <= 5 days for bi-var | sns.lineplot(df_auto.groupby('yearOfRegistration')['price'].count().index,
df_auto.groupby('yearOfRegistration')['price'].count().values,
data=df_auto) | random_line_split |
AutomobilesOnSale.py | values, alpha=0.9)
plt.xlabel('Type of Vehicle')
plt.ylabel('Count')
plt.title('Distribution Of Vehicle Types');
# Univariate Analysis of : Gear Type
sns.barplot(df_auto.gearbox.value_counts().index, df_auto.gearbox.value_counts().values, alpha=0.9)
plt.xlabel('Type of Gears')
plt.ylabel('Count')
plt.title('Distribution Of Types of Gears');
print('No of PowerPS is having value of 0 : ', df_auto[df_auto.powerPS == 0]['powerPS'].count())
print('No of PowerPS is having value of more than 662 is : ', df_auto[df_auto.powerPS > 662]['powerPS'].count())
# Removng cars having HP of 662 as the latest technology doesn't have HP > 662
# Removing observations having HP of 0 - as its meaningless
df_auto = df_auto[(df_auto.powerPS > 0) & (df_auto.powerPS < 663)]
# Distribution of Top 10 Horse Powered car sold
plt.figure(figsize=(16,6))
sns.lineplot(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index,
df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].values)
plt.xticks(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index)
plt.xlabel('Horse Power')
plt.ylabel('No. of Car Sold With Available Horse Power')
plt.title('Top 10 Car Sold with Horse Power Variation');
# Distribution of Top 10 car's moel sold
sns.lineplot(df_auto.model.value_counts()[:10].index, df_auto.model.value_counts()[:10].values)
plt.xticks(df_auto.model.value_counts()[:10].index)
plt.xlabel('Cars Model')
plt.ylabel('Frequency')
plt.title('Top 10 Cars Model Sold');
# Ditribution of Mesurement of KM a car ran before coming for sale
plt.figure(figsize=(12,6))
sns.distplot(df_auto.kilometer)
plt.xlabel("KM's Car Ran")
plt.ylabel('Frequency')
plt.title('Car was Driven in KM');
# No. of car registerd in a month for sale
plt.figure(figsize=(12,6))
sns.lineplot(df_auto.monthOfRegistration.value_counts().index, df_auto.monthOfRegistration.value_counts().values)
plt.xticks(df_auto.monthOfRegistration.value_counts().index.sort_values(),
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
plt.xlabel("Month Of Registration")
plt.ylabel('Frequency')
plt.title('No. Of Cars Sold In Month');
# Univariate Analysis of : fuel Type
plt.figure(figsize=(12,6))
sns.barplot(df_auto.fuelType.value_counts().index, df_auto.fuelType.value_counts().values, alpha=0.9)
plt.xlabel('Types of Fuel')
plt.ylabel('Frequency')
plt.title('Distribution Of Car with Types of Fuel');
# Univariate Analysis of : Top 10 Car's Brand
plt.figure(figsize=(12,6))
sns.barplot(df_auto.brand.value_counts()[:10].index, df_auto.brand.value_counts()[:10].values, alpha=0.9)
plt.xlabel("Car's Brand")
plt.ylabel('Frequency')
plt.title("Top 10 Car's Brand Sold");
# Univariate Analysis of : Car was Repaired: yes/no before sale
sns.barplot(df_auto.notRepairedDamage.value_counts().index, df_auto.notRepairedDamage.value_counts().values, alpha=0.9)
plt.xlabel('Repaired Post Damage')
plt.ylabel('Frequency')
plt.title('Distribution Of Car Not Repaired Damaged');
# Investigating overall structure of feature : yearOfRegistration
df_auto.yearOfRegistration.describe()
# Observation which is older than 1989
df_auto[df_auto.yearOfRegistration < 1989]['yearOfRegistration'].count()
# Observation which is more than 2019
df_auto[df_auto.yearOfRegistration > 2019]['yearOfRegistration'].count()
# Taking into considearion which is in the year of between 1989 & 2019
df_auto = df_auto[(df_auto.yearOfRegistration >= 1989) & (df_auto.yearOfRegistration <= 2019)]
# No of car was registered for sale throughout the year
sns.lineplot(df_auto.groupby('yearOfRegistration')['price'].count().index,
df_auto.groupby('yearOfRegistration')['price'].count().values,
data=df_auto)
plt.xlabel('Years of Registration')
plt.ylabel('Price')
plt.title('Variation Of Price with Year');
# No of days it took to sold while purchasing from E-bay
days = []
for time1, time2 in zip(df_auto['dateCrawled'], df_auto['lastSeen']):
time = datetime.strptime(time2, '%Y-%m-%d %H:%M:%S') - datetime.strptime(time1, '%Y-%m-%d %H:%M:%S')
days.append(time.days)
df_auto['Sold_In_Days'] = days
# Investigating the feature : Sold_In_Days
df_auto.Sold_In_Days.describe()
# Removing the observations having negative values as it doesn't make any sense
df_auto = df_auto[df_auto.Sold_In_Days >= 0]
# Distribution of no. of cars sold in days
plt.figure(figsize=(12,6))
sns.barplot(df_auto.Sold_In_Days.value_counts().index, df_auto.Sold_In_Days.value_counts().values, alpha=0.9)
plt.xlabel('Sold In Days')
plt.ylabel('Frequency')
plt.title('No. Of Cars Sold in Days');
# Dropping the below mentioned features as they are unnecesary now while building models
# All the postal code is from Germany only
df_auto = df_auto.drop(['dateCrawled', 'lastSeen', 'dateCreated', 'nrOfPictures', 'model', 'abtest', 'postalCode'], axis=1)
# Corelation matrix with Heatmap annotation
sns.heatmap(df_auto.corr(), annot=True);
# Function to get the Chi Square value & P value
def chi_p_value(cat1, cat2):
table = pd.crosstab(df_auto[cat1], df_auto[cat2])
chi2, p, dof, expected = chi2_contingency(table.values)
if p < 0.05:
print("Chi Square Statistics and p value of {} and {} is {}, {}".format(cat1, cat2, chi2, p))
# Extracting Chi Square value & p value
for i in range(len(df_auto.select_dtypes(include=['object']).columns)):
for cat2 in df_auto.select_dtypes(include=['object']).columns[df_auto.select_dtypes(include=['object']).columns !=
df_auto.select_dtypes(include=['object']).columns[i]]:
chi_p_value(df_auto.select_dtypes(include=['object']).columns[i], cat2)
# Taking into consideration of Sold_In_Dyas which is <= 5 days for bi-variate analysis
# It will give us the top most sold cars in first consecutive 5 days
df_auto_sold = df_auto[df_auto.Sold_In_Days < 5]
# Function to visualize bivariate analysis
def bivariate_analysis(param, xlabel):
df_auto_sold.groupby([param, 'Sold_In_Days'])['price'].count().unstack().plot(kind='bar')
plt.xticks(rotation=360)
plt.xlabel(xlabel)
plt.ylabel('Price')
plt.title('Price Distribution of ' + xlabel + ' Sold within 0-4 days');
bivariate_analysis('vehicleType', 'Types Of Vehicle')
bivariate_analysis('gearbox', 'Types Of Gear')
bivariate_analysis('fuelType', 'Types Of Fuel')
print("No. Of cars sold on the day the ad was published : ", df_auto[df_auto.Sold_In_Days == 0].count()[0])
print("No. Of cars sold on the 1st day the ad was published : ", df_auto[df_auto.Sold_In_Days == 1].count()[0])
print("No. Of cars sold on the 2nd day the ad was published : ", df_auto[df_auto.Sold_In_Days == 2].count()[0])
df_auto.head()
# Investigating the count of individual Categorical Features
for col in df_auto.select_dtypes(include=['object']).columns:
print(col, len(df_auto[col].unique()))
# Interactive Distribution of Horsepower with Price
# Visualization possible among year/month/days/gearbox/damage
def plot_year(year, month, days, gearbox, damage):
| data = df_auto[(df_auto.yearOfRegistration == year) & (df_auto.monthOfRegistration == month) &
(df_auto.Sold_In_Days == days) & (df_auto.gearbox == gearbox) &
(df_auto.notRepairedDamage == damage)]
area = 2 * df_auto.powerPS
data.plot.scatter('powerPS', 'price', s = area, linewidth = 1, edgecolor='k', figsize=(12,8), alpha=0.7)
plt.xlabel('Horse Power')
plt.ylabel('Price')
title = 'Variation of Price with Horse Power in ' + str(year)
plt.title(title) | identifier_body | |
AutomobilesOnSale.py | .price)
plt.xlabel("Price")
plt.ylabel('Frequency')
plt.title("Distribution of Car's Price");
# Logarithm of Price Distribution
sns.distplot(np.log(df_auto.price))
plt.xlabel("Logarithm of Car's Price")
plt.ylabel('Frequency')
plt.title("Distribution Log of Car's Price");
# Univariate Analysis of : AB Testing
sns.barplot(df_auto.abtest.value_counts().index, df_auto.abtest.value_counts().values, alpha=0.9)
plt.xlabel('Type of Testing')
plt.ylabel('Count')
plt.title('Distribution Of Car Testing');
# Univariate Analysis of : Vehicle Type
plt.figure(figsize=(12,6))
sns.barplot(df_auto.vehicleType.value_counts().index, df_auto.vehicleType.value_counts().values, alpha=0.9)
plt.xlabel('Type of Vehicle')
plt.ylabel('Count')
plt.title('Distribution Of Vehicle Types');
# Univariate Analysis of : Gear Type
sns.barplot(df_auto.gearbox.value_counts().index, df_auto.gearbox.value_counts().values, alpha=0.9)
plt.xlabel('Type of Gears')
plt.ylabel('Count')
plt.title('Distribution Of Types of Gears');
print('No of PowerPS is having value of 0 : ', df_auto[df_auto.powerPS == 0]['powerPS'].count())
print('No of PowerPS is having value of more than 662 is : ', df_auto[df_auto.powerPS > 662]['powerPS'].count())
# Removng cars having HP of 662 as the latest technology doesn't have HP > 662
# Removing observations having HP of 0 - as its meaningless
df_auto = df_auto[(df_auto.powerPS > 0) & (df_auto.powerPS < 663)]
# Distribution of Top 10 Horse Powered car sold
plt.figure(figsize=(16,6))
sns.lineplot(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index,
df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].values)
plt.xticks(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index)
plt.xlabel('Horse Power')
plt.ylabel('No. of Car Sold With Available Horse Power')
plt.title('Top 10 Car Sold with Horse Power Variation');
# Distribution of Top 10 car's moel sold
sns.lineplot(df_auto.model.value_counts()[:10].index, df_auto.model.value_counts()[:10].values)
plt.xticks(df_auto.model.value_counts()[:10].index)
plt.xlabel('Cars Model')
plt.ylabel('Frequency')
plt.title('Top 10 Cars Model Sold');
# Ditribution of Mesurement of KM a car ran before coming for sale
plt.figure(figsize=(12,6))
sns.distplot(df_auto.kilometer)
plt.xlabel("KM's Car Ran")
plt.ylabel('Frequency')
plt.title('Car was Driven in KM');
# No. of car registerd in a month for sale
plt.figure(figsize=(12,6))
sns.lineplot(df_auto.monthOfRegistration.value_counts().index, df_auto.monthOfRegistration.value_counts().values)
plt.xticks(df_auto.monthOfRegistration.value_counts().index.sort_values(),
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
plt.xlabel("Month Of Registration")
plt.ylabel('Frequency')
plt.title('No. Of Cars Sold In Month');
# Univariate Analysis of : fuel Type
plt.figure(figsize=(12,6))
sns.barplot(df_auto.fuelType.value_counts().index, df_auto.fuelType.value_counts().values, alpha=0.9)
plt.xlabel('Types of Fuel')
plt.ylabel('Frequency')
plt.title('Distribution Of Car with Types of Fuel');
# Univariate Analysis of : Top 10 Car's Brand
plt.figure(figsize=(12,6))
sns.barplot(df_auto.brand.value_counts()[:10].index, df_auto.brand.value_counts()[:10].values, alpha=0.9)
plt.xlabel("Car's Brand")
plt.ylabel('Frequency')
plt.title("Top 10 Car's Brand Sold");
# Univariate Analysis of : Car was Repaired: yes/no before sale
sns.barplot(df_auto.notRepairedDamage.value_counts().index, df_auto.notRepairedDamage.value_counts().values, alpha=0.9)
plt.xlabel('Repaired Post Damage')
plt.ylabel('Frequency')
plt.title('Distribution Of Car Not Repaired Damaged');
# Investigating overall structure of feature : yearOfRegistration
df_auto.yearOfRegistration.describe()
# Observation which is older than 1989
df_auto[df_auto.yearOfRegistration < 1989]['yearOfRegistration'].count()
# Observation which is more than 2019
df_auto[df_auto.yearOfRegistration > 2019]['yearOfRegistration'].count()
# Taking into considearion which is in the year of between 1989 & 2019
df_auto = df_auto[(df_auto.yearOfRegistration >= 1989) & (df_auto.yearOfRegistration <= 2019)]
# No of car was registered for sale throughout the year
sns.lineplot(df_auto.groupby('yearOfRegistration')['price'].count().index,
df_auto.groupby('yearOfRegistration')['price'].count().values,
data=df_auto)
plt.xlabel('Years of Registration')
plt.ylabel('Price')
plt.title('Variation Of Price with Year');
# No of days it took to sold while purchasing from E-bay
days = []
for time1, time2 in zip(df_auto['dateCrawled'], df_auto['lastSeen']):
time = datetime.strptime(time2, '%Y-%m-%d %H:%M:%S') - datetime.strptime(time1, '%Y-%m-%d %H:%M:%S')
days.append(time.days)
df_auto['Sold_In_Days'] = days
# Investigating the feature : Sold_In_Days
df_auto.Sold_In_Days.describe()
# Removing the observations having negative values as it doesn't make any sense
df_auto = df_auto[df_auto.Sold_In_Days >= 0]
# Distribution of no. of cars sold in days
plt.figure(figsize=(12,6))
sns.barplot(df_auto.Sold_In_Days.value_counts().index, df_auto.Sold_In_Days.value_counts().values, alpha=0.9)
plt.xlabel('Sold In Days')
plt.ylabel('Frequency')
plt.title('No. Of Cars Sold in Days');
# Dropping the below mentioned features as they are unnecesary now while building models
# All the postal code is from Germany only
df_auto = df_auto.drop(['dateCrawled', 'lastSeen', 'dateCreated', 'nrOfPictures', 'model', 'abtest', 'postalCode'], axis=1)
# Corelation matrix with Heatmap annotation
sns.heatmap(df_auto.corr(), annot=True);
# Function to get the Chi Square value & P value
def chi_p_value(cat1, cat2):
table = pd.crosstab(df_auto[cat1], df_auto[cat2])
chi2, p, dof, expected = chi2_contingency(table.values)
if p < 0.05:
print("Chi Square Statistics and p value of {} and {} is {}, {}".format(cat1, cat2, chi2, p))
# Extracting Chi Square value & p value
for i in range(len(df_auto.select_dtypes(include=['object']).columns)):
for cat2 in df_auto.select_dtypes(include=['object']).columns[df_auto.select_dtypes(include=['object']).columns !=
df_auto.select_dtypes(include=['object']).columns[i]]:
chi_p_value(df_auto.select_dtypes(include=['object']).columns[i], cat2)
# Taking into consideration of Sold_In_Dyas which is <= 5 days for bi-variate analysis
# It will give us the top most sold cars in first consecutive 5 days
df_auto_sold = df_auto[df_auto.Sold_In_Days < 5]
# Function to visualize bivariate analysis
def bivariate_analysis(param, xlabel):
df_auto_sold.groupby([param, 'Sold_In_Days'])['price'].count().unstack().plot(kind='bar')
plt.xticks(rotation=360)
plt.xlabel(xlabel)
plt.ylabel('Price')
plt.title('Price Distribution of ' + xlabel + ' Sold within 0-4 days');
bivariate_analysis('vehicleType', 'Types Of Vehicle')
bivariate_analysis('gearbox', 'Types Of Gear')
bivariate_analysis('fuelType', 'Types Of Fuel')
print("No. Of cars sold on the day the ad was published : ", df_auto[df_auto.Sold_In_Days == 0].count()[0])
print("No. Of cars sold on the 1st day the ad was published : ", df_auto[df_auto.Sold_In_Days == 1].count()[0])
print("No. Of cars sold on the 2nd day the ad was published : ", df_auto[df_auto.Sold_In_Days == 2].count()[0])
df_auto.head()
# Investigating the count of individual Categorical Features
for col in df_auto.select_dtypes(include=['object']).columns:
print(col, len(df_auto[col].unique()))
# Interactive Distribution of Horsepower with Price
# Visualization possible among year/month/days/gearbox/damage
def | plot_year | identifier_name | |
AutomobilesOnSale.py | 00) & (df_auto.price < 200000)]
# Distribution of Price
sns.distplot(df_auto.price)
plt.xlabel("Price")
plt.ylabel('Frequency')
plt.title("Distribution of Car's Price");
# Logarithm of Price Distribution
sns.distplot(np.log(df_auto.price))
plt.xlabel("Logarithm of Car's Price")
plt.ylabel('Frequency')
plt.title("Distribution Log of Car's Price");
# Univariate Analysis of : AB Testing
sns.barplot(df_auto.abtest.value_counts().index, df_auto.abtest.value_counts().values, alpha=0.9)
plt.xlabel('Type of Testing')
plt.ylabel('Count')
plt.title('Distribution Of Car Testing');
# Univariate Analysis of : Vehicle Type
plt.figure(figsize=(12,6))
sns.barplot(df_auto.vehicleType.value_counts().index, df_auto.vehicleType.value_counts().values, alpha=0.9)
plt.xlabel('Type of Vehicle')
plt.ylabel('Count')
plt.title('Distribution Of Vehicle Types');
# Univariate Analysis of : Gear Type
sns.barplot(df_auto.gearbox.value_counts().index, df_auto.gearbox.value_counts().values, alpha=0.9)
plt.xlabel('Type of Gears')
plt.ylabel('Count')
plt.title('Distribution Of Types of Gears');
print('No of PowerPS is having value of 0 : ', df_auto[df_auto.powerPS == 0]['powerPS'].count())
print('No of PowerPS is having value of more than 662 is : ', df_auto[df_auto.powerPS > 662]['powerPS'].count())
# Removng cars having HP of 662 as the latest technology doesn't have HP > 662
# Removing observations having HP of 0 - as its meaningless
df_auto = df_auto[(df_auto.powerPS > 0) & (df_auto.powerPS < 663)]
# Distribution of Top 10 Horse Powered car sold
plt.figure(figsize=(16,6))
sns.lineplot(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index,
df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].values)
plt.xticks(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index)
plt.xlabel('Horse Power')
plt.ylabel('No. of Car Sold With Available Horse Power')
plt.title('Top 10 Car Sold with Horse Power Variation');
# Distribution of Top 10 car's moel sold
sns.lineplot(df_auto.model.value_counts()[:10].index, df_auto.model.value_counts()[:10].values)
plt.xticks(df_auto.model.value_counts()[:10].index)
plt.xlabel('Cars Model')
plt.ylabel('Frequency')
plt.title('Top 10 Cars Model Sold');
# Ditribution of Mesurement of KM a car ran before coming for sale
plt.figure(figsize=(12,6))
sns.distplot(df_auto.kilometer)
plt.xlabel("KM's Car Ran")
plt.ylabel('Frequency')
plt.title('Car was Driven in KM');
# No. of car registerd in a month for sale
plt.figure(figsize=(12,6))
sns.lineplot(df_auto.monthOfRegistration.value_counts().index, df_auto.monthOfRegistration.value_counts().values)
plt.xticks(df_auto.monthOfRegistration.value_counts().index.sort_values(),
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
plt.xlabel("Month Of Registration")
plt.ylabel('Frequency')
plt.title('No. Of Cars Sold In Month');
# Univariate Analysis of : fuel Type
plt.figure(figsize=(12,6))
sns.barplot(df_auto.fuelType.value_counts().index, df_auto.fuelType.value_counts().values, alpha=0.9)
plt.xlabel('Types of Fuel')
plt.ylabel('Frequency')
plt.title('Distribution Of Car with Types of Fuel');
# Univariate Analysis of : Top 10 Car's Brand
plt.figure(figsize=(12,6))
sns.barplot(df_auto.brand.value_counts()[:10].index, df_auto.brand.value_counts()[:10].values, alpha=0.9)
plt.xlabel("Car's Brand")
plt.ylabel('Frequency')
plt.title("Top 10 Car's Brand Sold");
# Univariate Analysis of : Car was Repaired: yes/no before sale
sns.barplot(df_auto.notRepairedDamage.value_counts().index, df_auto.notRepairedDamage.value_counts().values, alpha=0.9)
plt.xlabel('Repaired Post Damage')
plt.ylabel('Frequency')
plt.title('Distribution Of Car Not Repaired Damaged');
# Investigating overall structure of feature : yearOfRegistration
df_auto.yearOfRegistration.describe()
# Observation which is older than 1989
df_auto[df_auto.yearOfRegistration < 1989]['yearOfRegistration'].count()
# Observation which is more than 2019
df_auto[df_auto.yearOfRegistration > 2019]['yearOfRegistration'].count()
# Taking into considearion which is in the year of between 1989 & 2019
df_auto = df_auto[(df_auto.yearOfRegistration >= 1989) & (df_auto.yearOfRegistration <= 2019)]
# No of car was registered for sale throughout the year
sns.lineplot(df_auto.groupby('yearOfRegistration')['price'].count().index,
df_auto.groupby('yearOfRegistration')['price'].count().values,
data=df_auto)
plt.xlabel('Years of Registration')
plt.ylabel('Price')
plt.title('Variation Of Price with Year');
# No of days it took to sold while purchasing from E-bay
days = []
for time1, time2 in zip(df_auto['dateCrawled'], df_auto['lastSeen']):
time = datetime.strptime(time2, '%Y-%m-%d %H:%M:%S') - datetime.strptime(time1, '%Y-%m-%d %H:%M:%S')
days.append(time.days)
df_auto['Sold_In_Days'] = days
# Investigating the feature : Sold_In_Days
df_auto.Sold_In_Days.describe()
# Removing the observations having negative values as it doesn't make any sense
df_auto = df_auto[df_auto.Sold_In_Days >= 0]
# Distribution of no. of cars sold in days
plt.figure(figsize=(12,6))
sns.barplot(df_auto.Sold_In_Days.value_counts().index, df_auto.Sold_In_Days.value_counts().values, alpha=0.9)
plt.xlabel('Sold In Days')
plt.ylabel('Frequency')
plt.title('No. Of Cars Sold in Days');
# Dropping the below mentioned features as they are unnecesary now while building models
# All the postal code is from Germany only
df_auto = df_auto.drop(['dateCrawled', 'lastSeen', 'dateCreated', 'nrOfPictures', 'model', 'abtest', 'postalCode'], axis=1)
# Corelation matrix with Heatmap annotation
sns.heatmap(df_auto.corr(), annot=True);
# Function to get the Chi Square value & P value
def chi_p_value(cat1, cat2):
table = pd.crosstab(df_auto[cat1], df_auto[cat2])
chi2, p, dof, expected = chi2_contingency(table.values)
if p < 0.05:
print("Chi Square Statistics and p value of {} and {} is {}, {}".format(cat1, cat2, chi2, p))
# Extracting Chi Square value & p value
for i in range(len(df_auto.select_dtypes(include=['object']).columns)):
for cat2 in df_auto.select_dtypes(include=['object']).columns[df_auto.select_dtypes(include=['object']).columns !=
df_auto.select_dtypes(include=['object']).columns[i]]:
chi_p_value(df_auto.select_dtypes(include=['object']).columns[i], cat2)
# Taking into consideration of Sold_In_Dyas which is <= 5 days for bi-variate analysis
# It will give us the top most sold cars in first consecutive 5 days
df_auto_sold = df_auto[df_auto.Sold_In_Days < 5]
# Function to visualize bivariate analysis
def bivariate_analysis(param, xlabel):
df_auto_sold.groupby([param, 'Sold_In_Days'])['price'].count().unstack().plot(kind='bar')
plt.xticks(rotation=360)
plt.xlabel(xlabel)
plt.ylabel('Price')
plt.title('Price Distribution of ' + xlabel + ' Sold within 0-4 days');
bivariate_analysis('vehicleType', 'Types Of Vehicle')
bivariate_analysis('gearbox', 'Types Of Gear')
bivariate_analysis('fuelType', 'Types Of Fuel')
print("No. Of cars sold on the day the ad was published : ", df_auto[df_auto.Sold_In_Days == 0].count()[0])
print("No. Of cars sold on the 1st day the ad was published : ", df_auto[df_auto.Sold_In_Days == 1].count()[0])
print("No. Of cars sold on the 2nd day the ad was published : ", df_auto[df_auto.Sold_In_Days == 2].count()[0])
df_auto.head()
# Investigating the count of individual Categorical Features
for col in df_auto.select_dtypes(include=['object']).columns:
| print(col, len(df_auto[col].unique())) | conditional_block | |
metapipeline.go | the generation of the effective jenkins-x pipeline config
createEffectivePipelineStepName = "create-effective-pipeline"
// createTektonCRDsStepName is the meta pipeline step name for the Tekton CRD creation
createTektonCRDsStepName = "create-tekton-crds"
tektonBaseDir = "/workspace"
)
// CRDCreationParameters are the parameters needed to create the Tekton CRDs
type CRDCreationParameters struct {
Namespace string
Context string
PipelineName string
ResourceName string
PipelineKind string
BuildNumber string
GitInfo gits.GitRepository
BranchIdentifier string
PullRef prow.PullRefs
SourceDir string
PodTemplates map[string]*corev1.Pod
ServiceAccount string
Labels []string
EnvVars []string
DefaultImage string
Apps []jenkinsv1.App
VersionsDir string
}
// CreateMetaPipelineCRDs creates the Tekton CRDs needed to execute the meta pipeline.
// The meta pipeline is responsible to checkout the source repository at the right revision, allows Jenkins-X Apps
// to modify the pipeline (via modifying the configuration on the file system) and finally triggering the actual
// pipeline build.
// An error is returned in case the creation of the Tekton CRDs fails.
func CreateMetaPipelineCRDs(params CRDCreationParameters) (*tekton.CRDWrapper, error) {
parsedPipeline, err := createPipeline(params)
if err != nil {
return nil, err
}
labels, err := buildLabels(params)
if err != nil {
return nil, err
}
pipeline, tasks, structure, err := parsedPipeline.GenerateCRDs(params.PipelineName, params.BuildNumber, params.ResourceName, params.Namespace, params.PodTemplates, params.VersionsDir, nil, params.SourceDir, labels, params.DefaultImage)
if err != nil {
return nil, err
}
revision := params.PullRef.BaseSha
if revision == "" {
revision = params.PullRef.BaseBranch
}
resources := []*pipelineapi.PipelineResource{tekton.GenerateSourceRepoResource(params.ResourceName, ¶ms.GitInfo, revision)}
run := tekton.CreatePipelineRun(resources, pipeline.Name, pipeline.APIVersion, labels, params.ServiceAccount, nil, nil, nil)
tektonCRDs, err := tekton.NewCRDWrapper(pipeline, tasks, resources, structure, run)
if err != nil {
return nil, err
}
return tektonCRDs, nil
}
// GetExtendingApps returns the list of apps which are installed in the cluster registered for extending the pipeline.
// An app registers its interest in extending the pipeline by having the 'pipeline-extension' label set.
func GetExtendingApps(jxClient versioned.Interface, namespace string) ([]jenkinsv1.App, error) {
listOptions := metav1.ListOptions{}
listOptions.LabelSelector = fmt.Sprintf(apps.AppTypeLabel+" in (%s)", apps.PipelineExtension)
appsList, err := jxClient.JenkinsV1().Apps(namespace).List(listOptions)
if err != nil {
return nil, errors.Wrap(err, "error retrieving pipeline contributor apps")
}
return appsList.Items, nil
}
// createPipeline builds the parsed/typed pipeline which servers as input for the Tekton CRD creation.
func createPipeline(params CRDCreationParameters) (*syntax.ParsedPipeline, error) {
steps, err := buildSteps(params)
if err != nil {
return nil, errors.Wrap(err, "unable to create app extending pipeline steps")
}
stage := syntax.Stage{
Name: appExtensionStageName,
Steps: steps,
Agent: &syntax.Agent{
Image: determineDefaultStepImage(params.DefaultImage),
},
}
parsedPipeline := &syntax.ParsedPipeline{
Stages: []syntax.Stage{stage},
}
env := buildEnvParams(params)
parsedPipeline.AddContainerEnvVarsToPipeline(env)
return parsedPipeline, nil
}
// buildSteps builds the meta pipeline steps.
// The tasks of the meta pipeline are:
// 1) make sure the right commits are merged
// 2) create the effective pipeline and write it to disk
// 3) one step for each extending app
// 4) create Tekton CRDs for the meta pipeline
func | (params CRDCreationParameters) ([]syntax.Step, error) {
var steps []syntax.Step
// 1)
step := stepMergePullRefs(params.PullRef)
steps = append(steps, step)
// 2)
step = stepEffectivePipeline(params)
steps = append(steps, step)
log.Logger().Debugf("creating pipeline steps for extending apps")
// 3)
for _, app := range params.Apps {
if app.Spec.PipelineExtension == nil {
log.Logger().Warnf("Skipping app %s in meta pipeline. It contains label %s with value %s, but does not contain PipelineExtension fields.", app.Name, apps.AppTypeLabel, apps.PipelineExtension)
continue
}
extension := app.Spec.PipelineExtension
step := syntax.Step{
Name: extension.Name,
Image: extension.Image,
Command: extension.Command,
Arguments: extension.Args,
}
log.Logger().Debugf("App %s contributes with step %s", app.Name, util.PrettyPrint(step))
steps = append(steps, step)
}
// 4)
step = stepCreateTektonCRDs(params)
steps = append(steps, step)
return steps, nil
}
func stepMergePullRefs(pullRefs prow.PullRefs) syntax.Step {
// we only need to run the merge step in case there is anything to merge
// Tekton has at this stage the base branch already checked out
if len(pullRefs.ToMerge) == 0 {
return stepSkip(mergePullRefsStepName, "Nothing to merge")
}
args := []string{"--verbose", "--baseBranch", pullRefs.BaseBranch, "--baseSHA", pullRefs.BaseSha}
for _, mergeSha := range pullRefs.ToMerge {
args = append(args, "--sha", mergeSha)
}
step := syntax.Step{
Name: mergePullRefsStepName,
Comment: "Pipeline step merging pull refs",
Command: "jx step git merge",
Arguments: args,
}
return step
}
func stepEffectivePipeline(params CRDCreationParameters) syntax.Step {
args := []string{"--output-dir", "."}
if params.Context != "" {
args = append(args, "--context", params.Context)
}
step := syntax.Step{
Name: createEffectivePipelineStepName,
Comment: "Pipeline step creating the effective pipeline configuration",
Command: "jx step syntax effective",
Arguments: args,
}
return step
}
func stepCreateTektonCRDs(params CRDCreationParameters) syntax.Step {
args := []string{"--clone-dir", filepath.Join(tektonBaseDir, params.SourceDir)}
args = append(args, "--kind", params.PipelineKind)
for prID := range params.PullRef.ToMerge {
args = append(args, "--pr-number", prID)
// there might be a batch build building multiple PRs, in which case we just use the first in this case
break
}
args = append(args, "--service-account", params.ServiceAccount)
args = append(args, "--source", params.SourceDir)
args = append(args, "--branch", params.BranchIdentifier)
args = append(args, "--build-number", params.BuildNumber)
if params.Context != "" {
args = append(args, "--context", params.Context)
}
for _, l := range params.Labels {
args = append(args, "--label", l)
}
for _, e := range params.EnvVars {
args = append(args, "--env", e)
}
step := syntax.Step{
Name: createTektonCRDsStepName,
Comment: "Pipeline step to create the Tekton CRDs for the actual pipeline run",
Command: "jx step create task",
Arguments: args,
}
return step
}
func stepSkip(stepName string, msg string) syntax.Step {
skipMsg := fmt.Sprintf("SKIP %s: %s", stepName, msg)
step := syntax.Step{
Name: stepName,
Comment: skipMsg,
Command: "echo",
Arguments: []string{fmt.Sprintf("'%s'", skipMsg)},
}
return step
}
func determineDefaultStepImage(defaultImage string) string {
if defaultImage != "" {
return defaultImage
}
return syntax.DefaultContainerImage
}
func buildEnvParams(params CRDCreationParameters) []corev1.EnvVar {
var envVars []corev1.EnvVar
envVars = append(envVars, corev1.EnvVar{
Name: "JX_LOG_FORMAT",
Value: "json",
})
envVars = append(envVars, corev1.EnvVar{
Name: "BUILD_NUMBER",
Value: params.BuildNumber,
})
envVars = append(envVars, corev1.EnvVar{
Name: "PIPELINE_KIND",
Value: params.PipelineKind,
})
envVars = append(envVars, corev1.EnvVar{
Name: "PULL_REFS",
Value: params.PullRef.String(),
})
context := params.Context
if context != "" {
envVars = append(envVars, corev1.EnvVar{
Name: "PIPELINE_CONTEXT",
| buildSteps | identifier_name |
metapipeline.go | generation of the effective jenkins-x pipeline config
createEffectivePipelineStepName = "create-effective-pipeline"
// createTektonCRDsStepName is the meta pipeline step name for the Tekton CRD creation
createTektonCRDsStepName = "create-tekton-crds"
tektonBaseDir = "/workspace"
)
// CRDCreationParameters are the parameters needed to create the Tekton CRDs
type CRDCreationParameters struct {
Namespace string
Context string
PipelineName string
ResourceName string
PipelineKind string
BuildNumber string
GitInfo gits.GitRepository
BranchIdentifier string
PullRef prow.PullRefs
SourceDir string
PodTemplates map[string]*corev1.Pod
ServiceAccount string
Labels []string
EnvVars []string
DefaultImage string
Apps []jenkinsv1.App
VersionsDir string
}
// CreateMetaPipelineCRDs creates the Tekton CRDs needed to execute the meta pipeline.
// The meta pipeline is responsible to checkout the source repository at the right revision, allows Jenkins-X Apps
// to modify the pipeline (via modifying the configuration on the file system) and finally triggering the actual
// pipeline build.
// An error is returned in case the creation of the Tekton CRDs fails.
func CreateMetaPipelineCRDs(params CRDCreationParameters) (*tekton.CRDWrapper, error) {
parsedPipeline, err := createPipeline(params)
if err != nil {
return nil, err
}
labels, err := buildLabels(params)
if err != nil {
return nil, err
}
pipeline, tasks, structure, err := parsedPipeline.GenerateCRDs(params.PipelineName, params.BuildNumber, params.ResourceName, params.Namespace, params.PodTemplates, params.VersionsDir, nil, params.SourceDir, labels, params.DefaultImage)
if err != nil {
return nil, err
}
revision := params.PullRef.BaseSha
if revision == "" {
revision = params.PullRef.BaseBranch
}
resources := []*pipelineapi.PipelineResource{tekton.GenerateSourceRepoResource(params.ResourceName, ¶ms.GitInfo, revision)}
run := tekton.CreatePipelineRun(resources, pipeline.Name, pipeline.APIVersion, labels, params.ServiceAccount, nil, nil, nil)
tektonCRDs, err := tekton.NewCRDWrapper(pipeline, tasks, resources, structure, run)
if err != nil {
return nil, err
}
return tektonCRDs, nil
}
// GetExtendingApps returns the list of apps which are installed in the cluster registered for extending the pipeline.
// An app registers its interest in extending the pipeline by having the 'pipeline-extension' label set.
func GetExtendingApps(jxClient versioned.Interface, namespace string) ([]jenkinsv1.App, error) {
listOptions := metav1.ListOptions{}
listOptions.LabelSelector = fmt.Sprintf(apps.AppTypeLabel+" in (%s)", apps.PipelineExtension)
appsList, err := jxClient.JenkinsV1().Apps(namespace).List(listOptions)
if err != nil {
return nil, errors.Wrap(err, "error retrieving pipeline contributor apps")
}
return appsList.Items, nil
}
// createPipeline builds the parsed/typed pipeline which servers as input for the Tekton CRD creation.
func createPipeline(params CRDCreationParameters) (*syntax.ParsedPipeline, error) |
return parsedPipeline, nil
}
// buildSteps builds the meta pipeline steps.
// The tasks of the meta pipeline are:
// 1) make sure the right commits are merged
// 2) create the effective pipeline and write it to disk
// 3) one step for each extending app
// 4) create Tekton CRDs for the meta pipeline
func buildSteps(params CRDCreationParameters) ([]syntax.Step, error) {
var steps []syntax.Step
// 1)
step := stepMergePullRefs(params.PullRef)
steps = append(steps, step)
// 2)
step = stepEffectivePipeline(params)
steps = append(steps, step)
log.Logger().Debugf("creating pipeline steps for extending apps")
// 3)
for _, app := range params.Apps {
if app.Spec.PipelineExtension == nil {
log.Logger().Warnf("Skipping app %s in meta pipeline. It contains label %s with value %s, but does not contain PipelineExtension fields.", app.Name, apps.AppTypeLabel, apps.PipelineExtension)
continue
}
extension := app.Spec.PipelineExtension
step := syntax.Step{
Name: extension.Name,
Image: extension.Image,
Command: extension.Command,
Arguments: extension.Args,
}
log.Logger().Debugf("App %s contributes with step %s", app.Name, util.PrettyPrint(step))
steps = append(steps, step)
}
// 4)
step = stepCreateTektonCRDs(params)
steps = append(steps, step)
return steps, nil
}
func stepMergePullRefs(pullRefs prow.PullRefs) syntax.Step {
// we only need to run the merge step in case there is anything to merge
// Tekton has at this stage the base branch already checked out
if len(pullRefs.ToMerge) == 0 {
return stepSkip(mergePullRefsStepName, "Nothing to merge")
}
args := []string{"--verbose", "--baseBranch", pullRefs.BaseBranch, "--baseSHA", pullRefs.BaseSha}
for _, mergeSha := range pullRefs.ToMerge {
args = append(args, "--sha", mergeSha)
}
step := syntax.Step{
Name: mergePullRefsStepName,
Comment: "Pipeline step merging pull refs",
Command: "jx step git merge",
Arguments: args,
}
return step
}
func stepEffectivePipeline(params CRDCreationParameters) syntax.Step {
args := []string{"--output-dir", "."}
if params.Context != "" {
args = append(args, "--context", params.Context)
}
step := syntax.Step{
Name: createEffectivePipelineStepName,
Comment: "Pipeline step creating the effective pipeline configuration",
Command: "jx step syntax effective",
Arguments: args,
}
return step
}
func stepCreateTektonCRDs(params CRDCreationParameters) syntax.Step {
args := []string{"--clone-dir", filepath.Join(tektonBaseDir, params.SourceDir)}
args = append(args, "--kind", params.PipelineKind)
for prID := range params.PullRef.ToMerge {
args = append(args, "--pr-number", prID)
// there might be a batch build building multiple PRs, in which case we just use the first in this case
break
}
args = append(args, "--service-account", params.ServiceAccount)
args = append(args, "--source", params.SourceDir)
args = append(args, "--branch", params.BranchIdentifier)
args = append(args, "--build-number", params.BuildNumber)
if params.Context != "" {
args = append(args, "--context", params.Context)
}
for _, l := range params.Labels {
args = append(args, "--label", l)
}
for _, e := range params.EnvVars {
args = append(args, "--env", e)
}
step := syntax.Step{
Name: createTektonCRDsStepName,
Comment: "Pipeline step to create the Tekton CRDs for the actual pipeline run",
Command: "jx step create task",
Arguments: args,
}
return step
}
func stepSkip(stepName string, msg string) syntax.Step {
skipMsg := fmt.Sprintf("SKIP %s: %s", stepName, msg)
step := syntax.Step{
Name: stepName,
Comment: skipMsg,
Command: "echo",
Arguments: []string{fmt.Sprintf("'%s'", skipMsg)},
}
return step
}
func determineDefaultStepImage(defaultImage string) string {
if defaultImage != "" {
return defaultImage
}
return syntax.DefaultContainerImage
}
func buildEnvParams(params CRDCreationParameters) []corev1.EnvVar {
var envVars []corev1.EnvVar
envVars = append(envVars, corev1.EnvVar{
Name: "JX_LOG_FORMAT",
Value: "json",
})
envVars = append(envVars, corev1.EnvVar{
Name: "BUILD_NUMBER",
Value: params.BuildNumber,
})
envVars = append(envVars, corev1.EnvVar{
Name: "PIPELINE_KIND",
Value: params.PipelineKind,
})
envVars = append(envVars, corev1.EnvVar{
Name: "PULL_REFS",
Value: params.PullRef.String(),
})
context := params.Context
if context != "" {
envVars = append(envVars, corev1.EnvVar{
Name: "PIPELINE_CONTEXT",
| {
steps, err := buildSteps(params)
if err != nil {
return nil, errors.Wrap(err, "unable to create app extending pipeline steps")
}
stage := syntax.Stage{
Name: appExtensionStageName,
Steps: steps,
Agent: &syntax.Agent{
Image: determineDefaultStepImage(params.DefaultImage),
},
}
parsedPipeline := &syntax.ParsedPipeline{
Stages: []syntax.Stage{stage},
}
env := buildEnvParams(params)
parsedPipeline.AddContainerEnvVarsToPipeline(env) | identifier_body |
metapipeline.go | the generation of the effective jenkins-x pipeline config
createEffectivePipelineStepName = "create-effective-pipeline"
// createTektonCRDsStepName is the meta pipeline step name for the Tekton CRD creation
createTektonCRDsStepName = "create-tekton-crds"
tektonBaseDir = "/workspace"
)
// CRDCreationParameters are the parameters needed to create the Tekton CRDs
type CRDCreationParameters struct {
Namespace string
Context string
PipelineName string | BranchIdentifier string
PullRef prow.PullRefs
SourceDir string
PodTemplates map[string]*corev1.Pod
ServiceAccount string
Labels []string
EnvVars []string
DefaultImage string
Apps []jenkinsv1.App
VersionsDir string
}
// CreateMetaPipelineCRDs creates the Tekton CRDs needed to execute the meta pipeline.
// The meta pipeline is responsible to checkout the source repository at the right revision, allows Jenkins-X Apps
// to modify the pipeline (via modifying the configuration on the file system) and finally triggering the actual
// pipeline build.
// An error is returned in case the creation of the Tekton CRDs fails.
func CreateMetaPipelineCRDs(params CRDCreationParameters) (*tekton.CRDWrapper, error) {
parsedPipeline, err := createPipeline(params)
if err != nil {
return nil, err
}
labels, err := buildLabels(params)
if err != nil {
return nil, err
}
pipeline, tasks, structure, err := parsedPipeline.GenerateCRDs(params.PipelineName, params.BuildNumber, params.ResourceName, params.Namespace, params.PodTemplates, params.VersionsDir, nil, params.SourceDir, labels, params.DefaultImage)
if err != nil {
return nil, err
}
revision := params.PullRef.BaseSha
if revision == "" {
revision = params.PullRef.BaseBranch
}
resources := []*pipelineapi.PipelineResource{tekton.GenerateSourceRepoResource(params.ResourceName, ¶ms.GitInfo, revision)}
run := tekton.CreatePipelineRun(resources, pipeline.Name, pipeline.APIVersion, labels, params.ServiceAccount, nil, nil, nil)
tektonCRDs, err := tekton.NewCRDWrapper(pipeline, tasks, resources, structure, run)
if err != nil {
return nil, err
}
return tektonCRDs, nil
}
// GetExtendingApps returns the list of apps which are installed in the cluster registered for extending the pipeline.
// An app registers its interest in extending the pipeline by having the 'pipeline-extension' label set.
func GetExtendingApps(jxClient versioned.Interface, namespace string) ([]jenkinsv1.App, error) {
listOptions := metav1.ListOptions{}
listOptions.LabelSelector = fmt.Sprintf(apps.AppTypeLabel+" in (%s)", apps.PipelineExtension)
appsList, err := jxClient.JenkinsV1().Apps(namespace).List(listOptions)
if err != nil {
return nil, errors.Wrap(err, "error retrieving pipeline contributor apps")
}
return appsList.Items, nil
}
// createPipeline builds the parsed/typed pipeline which servers as input for the Tekton CRD creation.
func createPipeline(params CRDCreationParameters) (*syntax.ParsedPipeline, error) {
steps, err := buildSteps(params)
if err != nil {
return nil, errors.Wrap(err, "unable to create app extending pipeline steps")
}
stage := syntax.Stage{
Name: appExtensionStageName,
Steps: steps,
Agent: &syntax.Agent{
Image: determineDefaultStepImage(params.DefaultImage),
},
}
parsedPipeline := &syntax.ParsedPipeline{
Stages: []syntax.Stage{stage},
}
env := buildEnvParams(params)
parsedPipeline.AddContainerEnvVarsToPipeline(env)
return parsedPipeline, nil
}
// buildSteps builds the meta pipeline steps.
// The tasks of the meta pipeline are:
// 1) make sure the right commits are merged
// 2) create the effective pipeline and write it to disk
// 3) one step for each extending app
// 4) create Tekton CRDs for the meta pipeline
func buildSteps(params CRDCreationParameters) ([]syntax.Step, error) {
var steps []syntax.Step
// 1)
step := stepMergePullRefs(params.PullRef)
steps = append(steps, step)
// 2)
step = stepEffectivePipeline(params)
steps = append(steps, step)
log.Logger().Debugf("creating pipeline steps for extending apps")
// 3)
for _, app := range params.Apps {
if app.Spec.PipelineExtension == nil {
log.Logger().Warnf("Skipping app %s in meta pipeline. It contains label %s with value %s, but does not contain PipelineExtension fields.", app.Name, apps.AppTypeLabel, apps.PipelineExtension)
continue
}
extension := app.Spec.PipelineExtension
step := syntax.Step{
Name: extension.Name,
Image: extension.Image,
Command: extension.Command,
Arguments: extension.Args,
}
log.Logger().Debugf("App %s contributes with step %s", app.Name, util.PrettyPrint(step))
steps = append(steps, step)
}
// 4)
step = stepCreateTektonCRDs(params)
steps = append(steps, step)
return steps, nil
}
func stepMergePullRefs(pullRefs prow.PullRefs) syntax.Step {
// we only need to run the merge step in case there is anything to merge
// Tekton has at this stage the base branch already checked out
if len(pullRefs.ToMerge) == 0 {
return stepSkip(mergePullRefsStepName, "Nothing to merge")
}
args := []string{"--verbose", "--baseBranch", pullRefs.BaseBranch, "--baseSHA", pullRefs.BaseSha}
for _, mergeSha := range pullRefs.ToMerge {
args = append(args, "--sha", mergeSha)
}
step := syntax.Step{
Name: mergePullRefsStepName,
Comment: "Pipeline step merging pull refs",
Command: "jx step git merge",
Arguments: args,
}
return step
}
func stepEffectivePipeline(params CRDCreationParameters) syntax.Step {
args := []string{"--output-dir", "."}
if params.Context != "" {
args = append(args, "--context", params.Context)
}
step := syntax.Step{
Name: createEffectivePipelineStepName,
Comment: "Pipeline step creating the effective pipeline configuration",
Command: "jx step syntax effective",
Arguments: args,
}
return step
}
func stepCreateTektonCRDs(params CRDCreationParameters) syntax.Step {
args := []string{"--clone-dir", filepath.Join(tektonBaseDir, params.SourceDir)}
args = append(args, "--kind", params.PipelineKind)
for prID := range params.PullRef.ToMerge {
args = append(args, "--pr-number", prID)
// there might be a batch build building multiple PRs, in which case we just use the first in this case
break
}
args = append(args, "--service-account", params.ServiceAccount)
args = append(args, "--source", params.SourceDir)
args = append(args, "--branch", params.BranchIdentifier)
args = append(args, "--build-number", params.BuildNumber)
if params.Context != "" {
args = append(args, "--context", params.Context)
}
for _, l := range params.Labels {
args = append(args, "--label", l)
}
for _, e := range params.EnvVars {
args = append(args, "--env", e)
}
step := syntax.Step{
Name: createTektonCRDsStepName,
Comment: "Pipeline step to create the Tekton CRDs for the actual pipeline run",
Command: "jx step create task",
Arguments: args,
}
return step
}
func stepSkip(stepName string, msg string) syntax.Step {
skipMsg := fmt.Sprintf("SKIP %s: %s", stepName, msg)
step := syntax.Step{
Name: stepName,
Comment: skipMsg,
Command: "echo",
Arguments: []string{fmt.Sprintf("'%s'", skipMsg)},
}
return step
}
func determineDefaultStepImage(defaultImage string) string {
if defaultImage != "" {
return defaultImage
}
return syntax.DefaultContainerImage
}
func buildEnvParams(params CRDCreationParameters) []corev1.EnvVar {
var envVars []corev1.EnvVar
envVars = append(envVars, corev1.EnvVar{
Name: "JX_LOG_FORMAT",
Value: "json",
})
envVars = append(envVars, corev1.EnvVar{
Name: "BUILD_NUMBER",
Value: params.BuildNumber,
})
envVars = append(envVars, corev1.EnvVar{
Name: "PIPELINE_KIND",
Value: params.PipelineKind,
})
envVars = append(envVars, corev1.EnvVar{
Name: "PULL_REFS",
Value: params.PullRef.String(),
})
context := params.Context
if context != "" {
envVars = append(envVars, corev1.EnvVar{
Name: "PIPELINE_CONTEXT",
Value: context | ResourceName string
PipelineKind string
BuildNumber string
GitInfo gits.GitRepository | random_line_split |
metapipeline.go | generation of the effective jenkins-x pipeline config
createEffectivePipelineStepName = "create-effective-pipeline"
// createTektonCRDsStepName is the meta pipeline step name for the Tekton CRD creation
createTektonCRDsStepName = "create-tekton-crds"
tektonBaseDir = "/workspace"
)
// CRDCreationParameters are the parameters needed to create the Tekton CRDs
type CRDCreationParameters struct {
Namespace string
Context string
PipelineName string
ResourceName string
PipelineKind string
BuildNumber string
GitInfo gits.GitRepository
BranchIdentifier string
PullRef prow.PullRefs
SourceDir string
PodTemplates map[string]*corev1.Pod
ServiceAccount string
Labels []string
EnvVars []string
DefaultImage string
Apps []jenkinsv1.App
VersionsDir string
}
// CreateMetaPipelineCRDs creates the Tekton CRDs needed to execute the meta pipeline.
// The meta pipeline is responsible to checkout the source repository at the right revision, allows Jenkins-X Apps
// to modify the pipeline (via modifying the configuration on the file system) and finally triggering the actual
// pipeline build.
// An error is returned in case the creation of the Tekton CRDs fails.
func CreateMetaPipelineCRDs(params CRDCreationParameters) (*tekton.CRDWrapper, error) {
parsedPipeline, err := createPipeline(params)
if err != nil {
return nil, err
}
labels, err := buildLabels(params)
if err != nil {
return nil, err
}
pipeline, tasks, structure, err := parsedPipeline.GenerateCRDs(params.PipelineName, params.BuildNumber, params.ResourceName, params.Namespace, params.PodTemplates, params.VersionsDir, nil, params.SourceDir, labels, params.DefaultImage)
if err != nil {
return nil, err
}
revision := params.PullRef.BaseSha
if revision == "" {
revision = params.PullRef.BaseBranch
}
resources := []*pipelineapi.PipelineResource{tekton.GenerateSourceRepoResource(params.ResourceName, ¶ms.GitInfo, revision)}
run := tekton.CreatePipelineRun(resources, pipeline.Name, pipeline.APIVersion, labels, params.ServiceAccount, nil, nil, nil)
tektonCRDs, err := tekton.NewCRDWrapper(pipeline, tasks, resources, structure, run)
if err != nil {
return nil, err
}
return tektonCRDs, nil
}
// GetExtendingApps returns the list of apps which are installed in the cluster registered for extending the pipeline.
// An app registers its interest in extending the pipeline by having the 'pipeline-extension' label set.
func GetExtendingApps(jxClient versioned.Interface, namespace string) ([]jenkinsv1.App, error) {
listOptions := metav1.ListOptions{}
listOptions.LabelSelector = fmt.Sprintf(apps.AppTypeLabel+" in (%s)", apps.PipelineExtension)
appsList, err := jxClient.JenkinsV1().Apps(namespace).List(listOptions)
if err != nil {
return nil, errors.Wrap(err, "error retrieving pipeline contributor apps")
}
return appsList.Items, nil
}
// createPipeline builds the parsed/typed pipeline which servers as input for the Tekton CRD creation.
func createPipeline(params CRDCreationParameters) (*syntax.ParsedPipeline, error) {
steps, err := buildSteps(params)
if err != nil {
return nil, errors.Wrap(err, "unable to create app extending pipeline steps")
}
stage := syntax.Stage{
Name: appExtensionStageName,
Steps: steps,
Agent: &syntax.Agent{
Image: determineDefaultStepImage(params.DefaultImage),
},
}
parsedPipeline := &syntax.ParsedPipeline{
Stages: []syntax.Stage{stage},
}
env := buildEnvParams(params)
parsedPipeline.AddContainerEnvVarsToPipeline(env)
return parsedPipeline, nil
}
// buildSteps builds the meta pipeline steps.
// The tasks of the meta pipeline are:
// 1) make sure the right commits are merged
// 2) create the effective pipeline and write it to disk
// 3) one step for each extending app
// 4) create Tekton CRDs for the meta pipeline
func buildSteps(params CRDCreationParameters) ([]syntax.Step, error) {
var steps []syntax.Step
// 1)
step := stepMergePullRefs(params.PullRef)
steps = append(steps, step)
// 2)
step = stepEffectivePipeline(params)
steps = append(steps, step)
log.Logger().Debugf("creating pipeline steps for extending apps")
// 3)
for _, app := range params.Apps |
// 4)
step = stepCreateTektonCRDs(params)
steps = append(steps, step)
return steps, nil
}
func stepMergePullRefs(pullRefs prow.PullRefs) syntax.Step {
// we only need to run the merge step in case there is anything to merge
// Tekton has at this stage the base branch already checked out
if len(pullRefs.ToMerge) == 0 {
return stepSkip(mergePullRefsStepName, "Nothing to merge")
}
args := []string{"--verbose", "--baseBranch", pullRefs.BaseBranch, "--baseSHA", pullRefs.BaseSha}
for _, mergeSha := range pullRefs.ToMerge {
args = append(args, "--sha", mergeSha)
}
step := syntax.Step{
Name: mergePullRefsStepName,
Comment: "Pipeline step merging pull refs",
Command: "jx step git merge",
Arguments: args,
}
return step
}
func stepEffectivePipeline(params CRDCreationParameters) syntax.Step {
args := []string{"--output-dir", "."}
if params.Context != "" {
args = append(args, "--context", params.Context)
}
step := syntax.Step{
Name: createEffectivePipelineStepName,
Comment: "Pipeline step creating the effective pipeline configuration",
Command: "jx step syntax effective",
Arguments: args,
}
return step
}
func stepCreateTektonCRDs(params CRDCreationParameters) syntax.Step {
args := []string{"--clone-dir", filepath.Join(tektonBaseDir, params.SourceDir)}
args = append(args, "--kind", params.PipelineKind)
for prID := range params.PullRef.ToMerge {
args = append(args, "--pr-number", prID)
// there might be a batch build building multiple PRs, in which case we just use the first in this case
break
}
args = append(args, "--service-account", params.ServiceAccount)
args = append(args, "--source", params.SourceDir)
args = append(args, "--branch", params.BranchIdentifier)
args = append(args, "--build-number", params.BuildNumber)
if params.Context != "" {
args = append(args, "--context", params.Context)
}
for _, l := range params.Labels {
args = append(args, "--label", l)
}
for _, e := range params.EnvVars {
args = append(args, "--env", e)
}
step := syntax.Step{
Name: createTektonCRDsStepName,
Comment: "Pipeline step to create the Tekton CRDs for the actual pipeline run",
Command: "jx step create task",
Arguments: args,
}
return step
}
func stepSkip(stepName string, msg string) syntax.Step {
skipMsg := fmt.Sprintf("SKIP %s: %s", stepName, msg)
step := syntax.Step{
Name: stepName,
Comment: skipMsg,
Command: "echo",
Arguments: []string{fmt.Sprintf("'%s'", skipMsg)},
}
return step
}
func determineDefaultStepImage(defaultImage string) string {
if defaultImage != "" {
return defaultImage
}
return syntax.DefaultContainerImage
}
func buildEnvParams(params CRDCreationParameters) []corev1.EnvVar {
var envVars []corev1.EnvVar
envVars = append(envVars, corev1.EnvVar{
Name: "JX_LOG_FORMAT",
Value: "json",
})
envVars = append(envVars, corev1.EnvVar{
Name: "BUILD_NUMBER",
Value: params.BuildNumber,
})
envVars = append(envVars, corev1.EnvVar{
Name: "PIPELINE_KIND",
Value: params.PipelineKind,
})
envVars = append(envVars, corev1.EnvVar{
Name: "PULL_REFS",
Value: params.PullRef.String(),
})
context := params.Context
if context != "" {
envVars = append(envVars, corev1.EnvVar{
Name: "PIPELINE_CONTEXT",
| {
if app.Spec.PipelineExtension == nil {
log.Logger().Warnf("Skipping app %s in meta pipeline. It contains label %s with value %s, but does not contain PipelineExtension fields.", app.Name, apps.AppTypeLabel, apps.PipelineExtension)
continue
}
extension := app.Spec.PipelineExtension
step := syntax.Step{
Name: extension.Name,
Image: extension.Image,
Command: extension.Command,
Arguments: extension.Args,
}
log.Logger().Debugf("App %s contributes with step %s", app.Name, util.PrettyPrint(step))
steps = append(steps, step)
} | conditional_block |
distributed_dqn_v2.py | dqn_model import _DQNModel
from memory import ReplayBuffer
from memory_remote import ReplayBuffer_remote
import matplotlib.pyplot as plt
from custom_cartpole import CartPoleEnv
FloatTensor = torch.FloatTensor
# =================== Helper Function ===================
def plot_result(total_rewards ,learning_num, legend):
print("\nLearning Performance:\n")
episodes = []
for i in range(len(total_rewards)):
episodes.append(i * learning_num + 1)
plt.figure(num = 1)
fig, ax = plt.subplots()
plt.plot(episodes, total_rewards)
plt.title('performance')
plt.legend(legend)
plt.xlabel("Episodes")
plt.ylabel("total rewards")
plt.show()
# =================== Hyperparams ===================
hyperparams_CartPole = {
'epsilon_decay_steps' : 100000,
'final_epsilon' : 0.1,
'batch_size' : 32,
'update_steps' : 10,
'memory_size' : 2000,
'beta' : 0.99,
'model_replace_freq' : 2000,
'learning_rate' : 0.0003,
'use_target_model': True
}
# =================== Initialize Environment ===================
# Set the Env name and action space for CartPole
ENV_NAME = 'CartPole_distributed'
# Move left, Move right
ACTION_DICT = {
"LEFT": 0,
"RIGHT":1
}
# Register the environment
env_CartPole = CartPoleEnv()
# =================== Ray Init ===================
ray.shutdown()
# ray.init(include_webui=False, ignore_reinit_error=True, redis_max_memory=500000000, object_store_memory=5000000000)
ray.init()
# =================== DQN ===================
class DQN_agent(object):
| learning: The trigger of agent learning. It is on while training agent. It is off while testing agent.
action_space: The action space of the current environment, e.g 2.
"""
self.episode = 0
self.steps = 0
self.best_reward = 0
self.learning = True
self.action_space = action_space
"""
input_len: The input length of the neural network. It equals to the length of the state vector.
output_len: The output length of the neural network. It is equal to the action space.
eval_model: The model for predicting action for the agent.
target_model: The model for calculating Q-value of next_state to update 'eval_model'.
use_target_model: Trigger for turn 'target_model' on/off
"""
state = env.reset()
input_len = len(state)
output_len = action_space
self.eval_model = DQNModel(input_len, output_len, learning_rate = hyper_params['learning_rate'])
self.use_target_model = hyper_params['use_target_model']
if self.use_target_model:
self.target_model = DQNModel(input_len, output_len)
# memory: Store and sample experience replay.
# self.memory = ReplayBuffer(hyper_params['memory_size'])
"""
batch_size: Mini batch size for training model.
update_steps: The frequence of traning model
model_replace_freq: The frequence of replacing 'target_model' by 'eval_model'
"""
self.batch_size = hyper_params['batch_size']
self.update_steps = hyper_params['update_steps']
self.model_replace_freq = hyper_params['model_replace_freq']
def linear_decrease(self, initial_value, final_value, curr_steps, final_decay_steps):
decay_rate = curr_steps / final_decay_steps
if decay_rate > 1:
decay_rate = 1
return initial_value - (initial_value - final_value) * decay_rate
def explore_or_exploit_policy(self, state):
p = uniform(0, 1)
# Get decreased epsilon
epsilon = self.linear_decrease(self.initial_epsilon,
self.final_epsilon,
self.steps,
self.epsilon_decay_steps)
if p < epsilon:
#return action
return randint(0, self.action_space - 1)
else:
#return action
return self.greedy_policy(state)
def greedy_policy(self, state):
return self.eval_model.predict(state)
# =================== Ray Servers ===================
@ray.remote
class DQNModel_server(DQN_agent):
def __init__(self, env, hyper_params, memory):
super().__init__(env, hyper_params)
self.memory_server = memory
def update_batch(self):
batch = ray.get(self.memory_server.sample.remote(self.batch_size))
if not batch:
return
(states, actions, reward, next_states,
is_terminal) = batch
states = states
next_states = next_states
terminal = FloatTensor([0 if t else 1 for t in is_terminal])
reward = FloatTensor(reward)
batch_index = torch.arange(self.batch_size,
dtype=torch.long)
# Current Q Values
_, q_values = self.eval_model.predict_batch(states)
q_values = q_values[batch_index, actions]
# Calculate target
if self.use_target_model:
actions, q_next = self.target_model.predict_batch(next_states)
else:
actions, q_next = self.eval_model.predict_batch(next_states)
q_target = reward + self.beta * torch.max(q_next, dim=1)[0] * terminal
# update model
self.eval_model.fit(q_values, q_target)
def getReturn(self):
return self.eval_model, self.steps
def learn(self):
self.steps += 1
if self.steps % self.update_steps == 0:
self.update_batch()
if self.steps % self.model_replace_freq == 0 and self.use_target_model:
self.target_model.replace(self.eval_model)
# =================== Workers ===================
@ray.remote
def collecting_worker(model, env, hyper_params, max_episode_steps, memory, tasks_num):
for _ in range(tasks_num):
state = env.reset()
done = False
steps = 0
eval_model, total_steps = ray.get(model.getReturn.remote())
while steps < max_episode_steps and not done:
steps += 1
a = ray.get(model.explore_or_exploit_policy.remote(state))
s_, reward, done, info = env.step(a)
memory.add.remote(state, a, reward, s_, done)
state = s_
model.learn.remote()
@ray.remote
def evaluation_worker(model, env, max_episode_steps, tasks_num):
total_reward = 0
for _ in range(tasks_num):
state = env.reset()
done = False
steps = 0
while steps < max_episode_steps and not done:
steps += 1
action = ray.get(model.greedy_policy.remote(state))
state, reward, done, _ = env.step(action)
total_reward += reward
return total_reward / tasks_num
# =================== Agent ===================
class distributed_DQN_agent():
def __init__(self, env, hyper_params, training_episodes, test_interval, cw_num = 4, ew_num = 4, trials = 30):
self.memory_server = ReplayBuffer_remote.remote(hyper_params['memory_size'])
self.model_server = DQNModel_server.remote(env, hyper_params, self.memory_server)
self.env = env
self.max_episode_steps = env._max_episode_steps
self.cw_num = cw_num
self.ew_num = ew_num
self.hyper_params = hyper_params
self.training_episodes = training_episodes
self.test_interval = test_interval
self.trials = trials
def learn_and_evaluate(self):
results = []
num = 0
for _ in range(self.training_episodes // self.test_interval):
num += 1
collector_ids, evaluators_ids = [], []
# learn
for _ in range(self.cw_num):
collector_ids.append(collecting_worker.remote(self.model_server, self.env, self.hyper_params,
self.max_episode_steps, self.memory_server, test_interval//self.cw_num))
# evaluate
for _ in range(self.ew_num):
evaluators_ids.append(evaluation_worker.remote(self.model_server, self.env, self.max_episode_steps,
self.trials//self.ew_num))
total_reward = sum(ray.get(evaluators_ids))
avg_reward = total | def __init__(self, env, hyper_params, action_space = len(ACTION_DICT)):
self.env = env
self.max_episode_steps = env._max_episode_steps
"""
beta: The discounted factor of Q-value function
(epsilon): The explore or exploit policy epsilon.
initial_epsilon: When the 'steps' is 0, the epsilon is initial_epsilon, 1
final_epsilon: After the number of 'steps' reach 'epsilon_decay_steps',
The epsilon set to the 'final_epsilon' determinately.
epsilon_decay_steps: The epsilon will decrease linearly along with the steps from 0 to 'epsilon_decay_steps'.
"""
self.beta = hyper_params['beta']
self.initial_epsilon = 1
self.final_epsilon = hyper_params['final_epsilon']
self.epsilon_decay_steps = hyper_params['epsilon_decay_steps']
"""
episode: Record training episode
steps: Add 1 when predicting an action | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.