file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
role_conversion.py
#!/usr/bin/env python2.5 # # Copyright 2011 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The role conversion updates are defined in this module. """ __authors__ = [ '"Daniel Hans" <daniel.m.hans@gmail.com>', ] import gae_django from google.appengine.ext import db from google.appengine.ext import deferred from google.appengine.runtime import DeadlineExceededError from django import http from soc.models.host import Host from soc.models.linkable import Linkable from soc.models.mentor import Mentor from soc.models.org_admin import OrgAdmin from soc.models.role import StudentInfo from soc.modules.gsoc.models.mentor import GSoCMentor from soc.modules.gsoc.models.organization import GSoCOrganization from soc.modules.gsoc.models.org_admin import GSoCOrgAdmin from soc.modules.gsoc.models.profile import GSoCProfile from soc.modules.gsoc.models.program import GSoCProgram from soc.modules.gsoc.models.student import GSoCStudent from soc.modules.gsoc.models.student_project import StudentProject from soc.modules.gsoc.models.student_proposal import StudentProposal ROLE_MODELS = [GSoCMentor, GSoCOrgAdmin, GSoCStudent] POPULATED_PROFILE_PROPS = set( GSoCProfile.properties()) - set(Linkable.properties()) POPULATED_STUDENT_PROPS = StudentInfo.properties() def getDjangoURLPatterns(): """Returns the URL patterns for the tasks in this module. """ patterns = [ (r'^tasks/role_conversion/update_references', 'soc.tasks.updates.role_conversion.updateReferences'), (r'^tasks/role_conversion/update_project_references', 'soc.tasks.updates.role_conversion.updateStudentProjectReferences'), (r'^tasks/role_conversion/update_proposal_references', 'soc.tasks.updates.role_conversion.updateStudentProposalReferences'), (r'^tasks/role_conversion/update_roles$', 'soc.tasks.updates.role_conversion.updateRoles'), (r'^tasks/role_conversion/update_mentors$', 'soc.tasks.updates.role_conversion.updateMentors'), (r'^tasks/role_conversion/update_org_admins$', 'soc.tasks.updates.role_conversion.updateOrgAdmins'), (r'^tasks/role_conversion/update_students$', 'soc.tasks.updates.role_conversion.updateStudents'), (r'^tasks/role_conversion/update_hosts$', 'soc.tasks.updates.role_conversion.updateHosts'), ] return patterns class HostUpdater(object): """Class which is responsible for updating Host entities. """ def run(self, batch_size=25): """Starts the updater. """ self._process(None, batch_size) def _process(self, start_key, batch_size): """Retrieves Host entities and updates them. """ query = Host.all() if start_key: query.filter('__key__ > ', start_key) try: entities = query.fetch(batch_size) if not entities: # all entities has already been processed return for entity in entities:
# process the next batch of entities start_key = entities[-1].key() deferred.defer(self._process, start_key, batch_size) except DeadlineExceededError: # here we should probably be more careful deferred.defer(self._process, start_key, batch_size) class RoleUpdater(object): """Class which is responsible for updating the entities. """ def __init__(self, model, profile_model, program_field, role_field=None): self.MODEL = model self.PROFILE_MODEL = profile_model self.PROGRAM_FIELD = program_field self.ROLE_FIELD = role_field def run(self, batch_size=25): """Starts the updater. """ self._process(None, batch_size) def _processEntity(self, entity): program = getattr(entity, self.PROGRAM_FIELD) user = entity.user # try to find an existing Profile entity or create a new one key_name = program.key().name() + '/' + user.link_id properties = { 'link_id': entity.link_id, 'scope_path': program.key().name(), 'scope': program, 'parent': user, } for prop in POPULATED_PROFILE_PROPS: properties[prop] = getattr(entity, prop) profile = self.PROFILE_MODEL.get_or_insert( key_name=key_name, **properties) # do not update anything if the role is already in the profile if profile.student_info and self.MODEL == GSoCStudent: return elif self.ROLE_FIELD: if entity.scope.key() in getattr(profile, self.ROLE_FIELD): return to_put = [profile] # a non-invalid role is found, we should re-populate the profile if profile.status == 'invalid' and entity.status != 'invalid': for prop_name in entity.properties(): value = getattr(entity, prop_name) setattr(profile, prop_name, value) if profile.student_info: profile.student_info = None if self.ROLE_FIELD: # the role is either Mentor or OrgAdmin getattr(profile, self.ROLE_FIELD).append(entity.scope.key()) else: # the role is certainly Student; we have to create a new StudentInfo properties = {} for prop in POPULATED_STUDENT_PROPS: properties[prop] = getattr(entity, prop) key_name = profile.key().name() student_info = StudentInfo(key_name=key_name, parent=profile, **properties) profile.student_info = student_info to_put.append(student_info) db.run_in_transaction(db.put, to_put) def _process(self, start_key, batch_size): """Retrieves entities and creates or updates a corresponding Profile entity. """ query = self.MODEL.all() if start_key: query.filter('__key__ > ', start_key) try: entities = query.fetch(batch_size) if not entities: # all entities has already been processed return for entity in entities: try: self._processEntity(entity) except db.Error, e: import logging logging.exception(e) logging.error("Broke on %s: %s" % (entity.key().name(), self.MODEL)) # process the next batch of entities start_key = entities[-1].key() deferred.defer(self._process, start_key, batch_size) except DeadlineExceededError: # here we should probably be more careful deferred.defer(self._process, start_key, batch_size) def updateHosts(request): """Starts a task which updates Host entities. """ updater = HostUpdater() updater.run() return http.HttpResponse("Ok") def updateRole(role_name): """Starts a task which updates a particular role. """ if role_name == 'gsoc_mentor': updater = RoleUpdater(GSoCMentor, GSoCProfile, 'program', 'mentor_for') elif role_name == 'gsoc_org_admin': updater = RoleUpdater( GSoCOrgAdmin, GSoCProfile, 'program', 'org_admin_for') elif role_name == 'gsoc_student': updater = RoleUpdater(GSoCStudent, GSoCProfile, 'scope') updater.run() return http.HttpResponse("Ok") def updateRoles(request): """Starts a bunch of iterative tasks which update particular roles. In order to prevent issues with concurrent access to entities, we set ETA so that each role is processed in separation. """ # update org admins #updateRole('gsoc_org_admin') # update mentors #updateRole('gsoc_mentor') # update students # we can assume that students cannot have any other roles, so we do not # need to set ETA updateRole('gsoc_student') def updateMentors(request): """Starts an iterative task which update mentors. """ return updateRole('gsoc_mentor') def updateOrgAdmins(request): """Starts an iterative task which update org admins. """ return updateRole('gsoc_org_admin') def updateStudents(request): """Starts an iterative task which update students. """ return updateRole('gsoc_student') def _getProfileForRole(entity, profile_model): """Returns GSoCProfile or GCIProfile which corresponds to the specified entity. """ if isinstance(entity, profile_model): return entity if isinstance(entity, OrgAdmin) or isinstance(entity, Mentor): key_name = entity.program.key().name() + '/' + entity.user.key().name() else: key_name = entity.key().name() parent = entity.user return profile_model.get_by_key_name(key_name, parent=parent) def _getProfileKeyForRoleKey(key, profile_model): """Returns Key instance of the Profile which corresponds to the Role which is represented by the specified Key. """ entity = db.get(key) profile = _getProfileForRole(entity, profile_model) return profile.key() class ReferenceUpdater(object): """Class which is responsible for updating references to Profile in the specified model. """ def __init__(self, model, profile_model, fields_to_update, lists_to_update=[]): self.MODEL = model self.PROFILE_MODEL = profile_model self.FIELDS_TO_UPDATE = fields_to_update self.LISTS_TO_UPDATE = lists_to_update def run(self, batch_size=25): """Starts the updater. """ self._process(None, batch_size) def _process(self, start_key, batch_size): """Iterates through the entities and updates the references. """ query = self.MODEL.all() if start_key: query.filter('__key__ > ', start_key) try: entities = query.fetch(batch_size) if not entities: # all entities has already been processed return for entity in entities: for field in self.FIELDS_TO_UPDATE: old_reference = getattr(entity, field) if not old_reference: continue # check if the field has not been updated if isinstance(old_reference, self.PROFILE_MODEL): continue profile = _getProfileForRole(old_reference, self.PROFILE_MODEL) setattr(entity, field, profile) for list_property in self.LISTS_TO_UPDATE: l = getattr(entity, list_property) new_l = [] for key in l: new_l.append(_getProfileKeyForRoleKey(key, self.PROFILE_MODEL)) setattr(entity, list_property, new_l) db.put(entities) start_key = entities[-1].key() deferred.defer(self._process, start_key, batch_size) except DeadlineExceededError: # here we should probably be more careful deferred.defer(self._process, start_key, batch_size) def updateReferencesForModel(model): """Starts a task which updates references for a particular model. """ if model == 'student_proposal': updater = ReferenceUpdater(StudentProposal, GSoCProfile, ['scope', 'mentor'], ['possible_mentors']) elif model == 'student_project': updater = ReferenceUpdater(StudentProject, GSoCProfile, ['mentor', 'student'], ['additional_mentors']) updater.run() return http.HttpResponse("Ok") def updateStudentProjectReferences(request): """Starts a bunch of iterative tasks which update references in StudentProjects. """ return updateReferencesForModel('student_project') def updateStudentProposalReferences(request): """Starts a bunch of iterative tasks which update references in StudentProposals. """ return updateReferencesForModel('student_proposal') def updateReferences(request): """Starts a bunch of iterative tasks which update references to various roles. """ # updates student proposals updateReferencesForModel('student_proposal') # updates student projects updateReferencesForModel('student_project') return http.HttpResponse("Ok")
sponsor = entity.scope host_for = entity.user.host_for if not host_for: host_for = [] user = entity.user if sponsor.key() not in host_for: host_for.append(sponsor.key()) user.host_for = host_for db.put(user)
conditional_block
http.rs
//! This example uses [hyper][] to create a http server which handles requests asynchronously in //! gluon. To do this we define a few types and functions in Rust with which we register in gluon //! so that we can communicate with `hyper`. The rest of the implementation is done in gluon, //! routing the requests and constructing the responses. //! //! [hyper]:https://hyper.rs extern crate gluon; extern crate gluon_base as base; #[macro_use] extern crate gluon_vm as vm; #[macro_use] extern crate collect_mac; extern crate env_logger; extern crate futures; extern crate hyper; #[macro_use] extern crate log; use std::env; use std::fmt; use std::error::Error as StdError; use std::fs::File; use std::io::{stderr, Read, Write}; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use hyper::{Chunk, Method, StatusCode}; use hyper::server::Service; use futures::Async; use futures::future::Future; use futures::sink::Sink; use futures::stream::Stream; use futures::sync::mpsc::Sender; use base::types::{ArcType, Type}; use vm::{Error as VmError, ExternModule, Result as VmResult}; use vm::thread::ThreadInternal; use vm::thread::{Context, RootedThread, Thread}; use vm::Variants; use vm::api::{Function, FunctionRef, FutureResult, Getable, OpaqueValue, PushAsRef, Pushable, Userdata, ValueRef, VmType, WithVM, IO}; use vm::gc::{Gc, Traverseable}; use gluon::import::add_extern_module; use vm::internal::Value; use gluon::{new_vm, Compiler}; // `Handler` is a type defined in http.glu but since we need to refer to it in the signature of // listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust struct Handler<T>(PhantomData<T>); impl<T: VmType + 'static> VmType for Handler<T> { type Type = Self; fn make_type(vm: &Thread) -> ArcType { let typ = (*vm.global_env() .get_env() .find_type_info("examples.http_types.Handler") .unwrap()) .clone() .into_type(); Type::app(typ, collect![T::make_type(vm)]) } } // Rust does not let us define traits on types defined in a different crate such as `hyper`. We can // however work around this by defining a wrapper type which we are then able to define the traits // on. struct Wrap<T>(T); macro_rules! define_vmtype { ($name: ident) => { impl VmType for Wrap<$name> { type Type = $name; fn make_type(vm: &Thread) -> ArcType { let typ = concat!("examples.http_types.", stringify!($name)); (*vm.global_env().get_env().find_type_info(typ).unwrap()) .clone() .into_type() } } } } define_vmtype! { Method } impl<'vm> Pushable<'vm> for Wrap<Method> { fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> { use hyper::Method::*; context.stack.push(Value::tag(match self.0 { Get => 0, Post => 1, Delete => 2, _ => { return Err(VmError::Message(format!( "Method `{:?}` does not exist in gluon", self.0 )).into()) } })); Ok(()) } } define_vmtype! { StatusCode } impl<'vm> Getable<'vm> for Wrap<StatusCode> { fn from_value(_: &'vm Thread, value: Variants) -> Self { use hyper::StatusCode::*; match value.as_ref() { ValueRef::Data(data) => Wrap(match data.tag() { 0 => Ok, 1 => NotFound, 2 => InternalServerError, _ => panic!("Unexpected tag"), }), _ => panic!(), } } } // Representation of a http body that is in the prograss of being read pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>); // By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon // threads impl Userdata for Body {} // Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed impl fmt::Debug for Body { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Body") } } // `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the // value for garbage collected references. Normally objects do not contain any references so this // can be empty impl Traverseable for Body { fn traverse(&self, _: &mut Gc) {} } // `VmType` is the last trait required for a type to implement `Userdata` and defines the type used // in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough // as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine // which should have been registered earlier with `Thread::register_type` impl VmType for Body { type Type = Self; } // Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation // into `&Body` argument fn read_chunk( body: &Body, ) -> FutureResult< Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send + 'static>, > { use futures::future::poll_fn; let body = body.0.clone(); // `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is
stream.poll().map(|async| async.map(IO::Value)) }))) } // A http body that is being written pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>); impl fmt::Debug for ResponseBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Response") } } impl Userdata for ResponseBody {} impl Traverseable for ResponseBody { fn traverse(&self, _: &mut Gc) {} } impl VmType for ResponseBody { type Type = Self; } fn write_response( response: &ResponseBody, bytes: &[u8], ) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send + 'static>> { use futures::future::poll_fn; use futures::AsyncSink; // Turn `bytes´ into a `Chunk` which can be sent to the http body let mut unsent_chunk = Some(Ok(bytes.to_owned().into())); let response = response.0.clone(); FutureResult(Box::new(poll_fn(move || { info!("Starting response send"); let mut sender = response.lock().unwrap(); let sender = sender .as_mut() .expect("Sender has been dropped while still in use"); if let Some(chunk) = unsent_chunk.take() { match sender.start_send(chunk) { Ok(AsyncSink::NotReady(chunk)) => { unsent_chunk = Some(chunk); return Ok(Async::NotReady); } Ok(AsyncSink::Ready) => (), Err(_) => { info!("Could not send http response"); return Ok(Async::Ready(IO::Value(()))); } } } match sender.poll_complete() { Ok(async) => Ok(async.map(IO::Value)), Err(_) => { info!("Could not send http response"); Ok(Async::Ready(IO::Value(()))) } } }))) } // Next we define some record types which are marshalled to and from gluon. These have equivalent // definitions in http_types.glu field_decl! { method, uri, status, body, request, response } type Request = record_type!{ method => Wrap<Method>, uri => String, body => Body }; type Response = record_type!{ status => Wrap<StatusCode> }; type HttpState = record_type!{ request => Request, response => ResponseBody }; fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> { let WithVM { value: handler, vm: thread, } = value; use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse}; // Retrieve the `handle` function from the http module which we use to evaluate values of type // `Handler Response` type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>; let handle: Function<RootedThread, ListenFn> = thread .get_global("examples.http.handle") .unwrap_or_else(|err| panic!("{}", err)); struct Listen { handle: Function<RootedThread, ListenFn>, handler: OpaqueValue<RootedThread, Handler<Response>>, } impl Service for Listen { type Request = HyperRequest; type Response = HyperResponse; type Error = hyper::Error; type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send + 'static>; fn call(&self, request: HyperRequest) -> Self::Future { let gluon_request = record_no_decl! { // Here we use to `Wrap` type to make `hyper::Request` into a type that can be // pushed to gluon method => Wrap(request.method().clone()), uri => request.uri().to_string(), // Since `Body` implements `Userdata` it can be directly pushed to gluon body => Body(Arc::new(Mutex::new(Box::new(request.body() .map_err(|err| VmError::Message(format!("{}", err))) // `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is // marshalled to `Array Byte` in gluon .map(PushAsRef::<_, [u8]>::new))))) }; let (response_sender, response_body) = hyper::Body::pair(); let response_sender = Arc::new(Mutex::new(Some(response_sender))); let http_state = record_no_decl!{ request => gluon_request, response => ResponseBody(response_sender.clone()) }; Box::new( self.handle .clone() .call_async(self.handler.clone(), http_state) .then(move |result| match result { Ok(value) => { match value { IO::Value(record_p!{ status }) => { // Drop the sender to so that it the receiver stops waiting for // more chunks *response_sender.lock().unwrap() = None; Ok( HyperResponse::new() .with_status(status.0) .with_body(response_body), ) } IO::Exception(err) => { let _ = stderr().write(err.as_bytes()); Ok( HyperResponse::new() .with_status(StatusCode::InternalServerError), ) } } } Err(err) => { let _ = stderr().write(format!("{}", err).as_bytes()); Ok(HyperResponse::new().with_status(StatusCode::InternalServerError)) } }), ) } } let addr = format!("127.0.0.1:{}", port).parse().unwrap(); let result = Http::new() .bind(&addr, move || { Ok(Listen { handle: handle.clone(), handler: handler.clone(), }) }) .and_then(|server| server.run()); match result { Ok(()) => IO::Value(()), Err(err) => IO::Exception(format!("{}", err)), } } // To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a // separate function which is called before loading `http_types` pub fn load_types(vm: &Thread) -> VmResult<()> { vm.register_type::<Body>("Body", &[])?; vm.register_type::<ResponseBody>("ResponseBody", &[])?; Ok(()) } pub fn load(vm: &Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record! { listen => primitive!(2 listen), read_chunk => primitive!(1 read_chunk), write_response => primitive!(2 write_response) }, ) } fn main() { if let Err(err) = main_() { panic!("{}", err) } } fn main_() -> Result<(), Box<StdError>> { let _ = env_logger::try_init(); let port = env::args() .nth(1) .map(|port| port.parse::<i32>().expect("port")) .unwrap_or(80); let thread = new_vm(); // First load all the http types so we can refer to them from gluon load_types(&thread)?; Compiler::new().run_expr::<()>( &thread, "", r#"let _ = import! "examples/http_types.glu" in () "#, )?; // Load the primitive functions we define in this module add_extern_module(&thread, "http.prim", load); // Last we run our `http_server.glu` module which returns a function which starts listening // on the port we passed from the command line let mut expr = String::new(); { let mut file = File::open("examples/http_server.glu")?; file.read_to_string(&mut expr)?; } let (mut listen, _) = Compiler::new().run_expr::<FunctionRef<fn(i32) -> IO<()>>>(&thread, "http_test", &expr)?; listen.call(port)?; Ok(()) }
// polled until completion. After `poll` returns `Ready` the value is then returned to the // gluon function which called `read_chunk` FutureResult(Box::new(poll_fn(move || { let mut stream = body.lock().unwrap();
random_line_split
http.rs
//! This example uses [hyper][] to create a http server which handles requests asynchronously in //! gluon. To do this we define a few types and functions in Rust with which we register in gluon //! so that we can communicate with `hyper`. The rest of the implementation is done in gluon, //! routing the requests and constructing the responses. //! //! [hyper]:https://hyper.rs extern crate gluon; extern crate gluon_base as base; #[macro_use] extern crate gluon_vm as vm; #[macro_use] extern crate collect_mac; extern crate env_logger; extern crate futures; extern crate hyper; #[macro_use] extern crate log; use std::env; use std::fmt; use std::error::Error as StdError; use std::fs::File; use std::io::{stderr, Read, Write}; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use hyper::{Chunk, Method, StatusCode}; use hyper::server::Service; use futures::Async; use futures::future::Future; use futures::sink::Sink; use futures::stream::Stream; use futures::sync::mpsc::Sender; use base::types::{ArcType, Type}; use vm::{Error as VmError, ExternModule, Result as VmResult}; use vm::thread::ThreadInternal; use vm::thread::{Context, RootedThread, Thread}; use vm::Variants; use vm::api::{Function, FunctionRef, FutureResult, Getable, OpaqueValue, PushAsRef, Pushable, Userdata, ValueRef, VmType, WithVM, IO}; use vm::gc::{Gc, Traverseable}; use gluon::import::add_extern_module; use vm::internal::Value; use gluon::{new_vm, Compiler}; // `Handler` is a type defined in http.glu but since we need to refer to it in the signature of // listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust struct Handler<T>(PhantomData<T>); impl<T: VmType + 'static> VmType for Handler<T> { type Type = Self; fn make_type(vm: &Thread) -> ArcType { let typ = (*vm.global_env() .get_env() .find_type_info("examples.http_types.Handler") .unwrap()) .clone() .into_type(); Type::app(typ, collect![T::make_type(vm)]) } } // Rust does not let us define traits on types defined in a different crate such as `hyper`. We can // however work around this by defining a wrapper type which we are then able to define the traits // on. struct Wrap<T>(T); macro_rules! define_vmtype { ($name: ident) => { impl VmType for Wrap<$name> { type Type = $name; fn make_type(vm: &Thread) -> ArcType { let typ = concat!("examples.http_types.", stringify!($name)); (*vm.global_env().get_env().find_type_info(typ).unwrap()) .clone() .into_type() } } } } define_vmtype! { Method } impl<'vm> Pushable<'vm> for Wrap<Method> { fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()>
} define_vmtype! { StatusCode } impl<'vm> Getable<'vm> for Wrap<StatusCode> { fn from_value(_: &'vm Thread, value: Variants) -> Self { use hyper::StatusCode::*; match value.as_ref() { ValueRef::Data(data) => Wrap(match data.tag() { 0 => Ok, 1 => NotFound, 2 => InternalServerError, _ => panic!("Unexpected tag"), }), _ => panic!(), } } } // Representation of a http body that is in the prograss of being read pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>); // By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon // threads impl Userdata for Body {} // Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed impl fmt::Debug for Body { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Body") } } // `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the // value for garbage collected references. Normally objects do not contain any references so this // can be empty impl Traverseable for Body { fn traverse(&self, _: &mut Gc) {} } // `VmType` is the last trait required for a type to implement `Userdata` and defines the type used // in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough // as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine // which should have been registered earlier with `Thread::register_type` impl VmType for Body { type Type = Self; } // Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation // into `&Body` argument fn read_chunk( body: &Body, ) -> FutureResult< Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send + 'static>, > { use futures::future::poll_fn; let body = body.0.clone(); // `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is // polled until completion. After `poll` returns `Ready` the value is then returned to the // gluon function which called `read_chunk` FutureResult(Box::new(poll_fn(move || { let mut stream = body.lock().unwrap(); stream.poll().map(|async| async.map(IO::Value)) }))) } // A http body that is being written pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>); impl fmt::Debug for ResponseBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Response") } } impl Userdata for ResponseBody {} impl Traverseable for ResponseBody { fn traverse(&self, _: &mut Gc) {} } impl VmType for ResponseBody { type Type = Self; } fn write_response( response: &ResponseBody, bytes: &[u8], ) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send + 'static>> { use futures::future::poll_fn; use futures::AsyncSink; // Turn `bytes´ into a `Chunk` which can be sent to the http body let mut unsent_chunk = Some(Ok(bytes.to_owned().into())); let response = response.0.clone(); FutureResult(Box::new(poll_fn(move || { info!("Starting response send"); let mut sender = response.lock().unwrap(); let sender = sender .as_mut() .expect("Sender has been dropped while still in use"); if let Some(chunk) = unsent_chunk.take() { match sender.start_send(chunk) { Ok(AsyncSink::NotReady(chunk)) => { unsent_chunk = Some(chunk); return Ok(Async::NotReady); } Ok(AsyncSink::Ready) => (), Err(_) => { info!("Could not send http response"); return Ok(Async::Ready(IO::Value(()))); } } } match sender.poll_complete() { Ok(async) => Ok(async.map(IO::Value)), Err(_) => { info!("Could not send http response"); Ok(Async::Ready(IO::Value(()))) } } }))) } // Next we define some record types which are marshalled to and from gluon. These have equivalent // definitions in http_types.glu field_decl! { method, uri, status, body, request, response } type Request = record_type!{ method => Wrap<Method>, uri => String, body => Body }; type Response = record_type!{ status => Wrap<StatusCode> }; type HttpState = record_type!{ request => Request, response => ResponseBody }; fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> { let WithVM { value: handler, vm: thread, } = value; use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse}; // Retrieve the `handle` function from the http module which we use to evaluate values of type // `Handler Response` type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>; let handle: Function<RootedThread, ListenFn> = thread .get_global("examples.http.handle") .unwrap_or_else(|err| panic!("{}", err)); struct Listen { handle: Function<RootedThread, ListenFn>, handler: OpaqueValue<RootedThread, Handler<Response>>, } impl Service for Listen { type Request = HyperRequest; type Response = HyperResponse; type Error = hyper::Error; type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send + 'static>; fn call(&self, request: HyperRequest) -> Self::Future { let gluon_request = record_no_decl! { // Here we use to `Wrap` type to make `hyper::Request` into a type that can be // pushed to gluon method => Wrap(request.method().clone()), uri => request.uri().to_string(), // Since `Body` implements `Userdata` it can be directly pushed to gluon body => Body(Arc::new(Mutex::new(Box::new(request.body() .map_err(|err| VmError::Message(format!("{}", err))) // `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is // marshalled to `Array Byte` in gluon .map(PushAsRef::<_, [u8]>::new))))) }; let (response_sender, response_body) = hyper::Body::pair(); let response_sender = Arc::new(Mutex::new(Some(response_sender))); let http_state = record_no_decl!{ request => gluon_request, response => ResponseBody(response_sender.clone()) }; Box::new( self.handle .clone() .call_async(self.handler.clone(), http_state) .then(move |result| match result { Ok(value) => { match value { IO::Value(record_p!{ status }) => { // Drop the sender to so that it the receiver stops waiting for // more chunks *response_sender.lock().unwrap() = None; Ok( HyperResponse::new() .with_status(status.0) .with_body(response_body), ) } IO::Exception(err) => { let _ = stderr().write(err.as_bytes()); Ok( HyperResponse::new() .with_status(StatusCode::InternalServerError), ) } } } Err(err) => { let _ = stderr().write(format!("{}", err).as_bytes()); Ok(HyperResponse::new().with_status(StatusCode::InternalServerError)) } }), ) } } let addr = format!("127.0.0.1:{}", port).parse().unwrap(); let result = Http::new() .bind(&addr, move || { Ok(Listen { handle: handle.clone(), handler: handler.clone(), }) }) .and_then(|server| server.run()); match result { Ok(()) => IO::Value(()), Err(err) => IO::Exception(format!("{}", err)), } } // To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a // separate function which is called before loading `http_types` pub fn load_types(vm: &Thread) -> VmResult<()> { vm.register_type::<Body>("Body", &[])?; vm.register_type::<ResponseBody>("ResponseBody", &[])?; Ok(()) } pub fn load(vm: &Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record! { listen => primitive!(2 listen), read_chunk => primitive!(1 read_chunk), write_response => primitive!(2 write_response) }, ) } fn main() { if let Err(err) = main_() { panic!("{}", err) } } fn main_() -> Result<(), Box<StdError>> { let _ = env_logger::try_init(); let port = env::args() .nth(1) .map(|port| port.parse::<i32>().expect("port")) .unwrap_or(80); let thread = new_vm(); // First load all the http types so we can refer to them from gluon load_types(&thread)?; Compiler::new().run_expr::<()>( &thread, "", r#"let _ = import! "examples/http_types.glu" in () "#, )?; // Load the primitive functions we define in this module add_extern_module(&thread, "http.prim", load); // Last we run our `http_server.glu` module which returns a function which starts listening // on the port we passed from the command line let mut expr = String::new(); { let mut file = File::open("examples/http_server.glu")?; file.read_to_string(&mut expr)?; } let (mut listen, _) = Compiler::new().run_expr::<FunctionRef<fn(i32) -> IO<()>>>(&thread, "http_test", &expr)?; listen.call(port)?; Ok(()) }
{ use hyper::Method::*; context.stack.push(Value::tag(match self.0 { Get => 0, Post => 1, Delete => 2, _ => { return Err(VmError::Message(format!( "Method `{:?}` does not exist in gluon", self.0 )).into()) } })); Ok(()) }
identifier_body
http.rs
//! This example uses [hyper][] to create a http server which handles requests asynchronously in //! gluon. To do this we define a few types and functions in Rust with which we register in gluon //! so that we can communicate with `hyper`. The rest of the implementation is done in gluon, //! routing the requests and constructing the responses. //! //! [hyper]:https://hyper.rs extern crate gluon; extern crate gluon_base as base; #[macro_use] extern crate gluon_vm as vm; #[macro_use] extern crate collect_mac; extern crate env_logger; extern crate futures; extern crate hyper; #[macro_use] extern crate log; use std::env; use std::fmt; use std::error::Error as StdError; use std::fs::File; use std::io::{stderr, Read, Write}; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use hyper::{Chunk, Method, StatusCode}; use hyper::server::Service; use futures::Async; use futures::future::Future; use futures::sink::Sink; use futures::stream::Stream; use futures::sync::mpsc::Sender; use base::types::{ArcType, Type}; use vm::{Error as VmError, ExternModule, Result as VmResult}; use vm::thread::ThreadInternal; use vm::thread::{Context, RootedThread, Thread}; use vm::Variants; use vm::api::{Function, FunctionRef, FutureResult, Getable, OpaqueValue, PushAsRef, Pushable, Userdata, ValueRef, VmType, WithVM, IO}; use vm::gc::{Gc, Traverseable}; use gluon::import::add_extern_module; use vm::internal::Value; use gluon::{new_vm, Compiler}; // `Handler` is a type defined in http.glu but since we need to refer to it in the signature of // listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust struct Handler<T>(PhantomData<T>); impl<T: VmType + 'static> VmType for Handler<T> { type Type = Self; fn make_type(vm: &Thread) -> ArcType { let typ = (*vm.global_env() .get_env() .find_type_info("examples.http_types.Handler") .unwrap()) .clone() .into_type(); Type::app(typ, collect![T::make_type(vm)]) } } // Rust does not let us define traits on types defined in a different crate such as `hyper`. We can // however work around this by defining a wrapper type which we are then able to define the traits // on. struct Wrap<T>(T); macro_rules! define_vmtype { ($name: ident) => { impl VmType for Wrap<$name> { type Type = $name; fn make_type(vm: &Thread) -> ArcType { let typ = concat!("examples.http_types.", stringify!($name)); (*vm.global_env().get_env().find_type_info(typ).unwrap()) .clone() .into_type() } } } } define_vmtype! { Method } impl<'vm> Pushable<'vm> for Wrap<Method> { fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> { use hyper::Method::*; context.stack.push(Value::tag(match self.0 { Get => 0, Post => 1, Delete => 2, _ => { return Err(VmError::Message(format!( "Method `{:?}` does not exist in gluon", self.0 )).into()) } })); Ok(()) } } define_vmtype! { StatusCode } impl<'vm> Getable<'vm> for Wrap<StatusCode> { fn from_value(_: &'vm Thread, value: Variants) -> Self { use hyper::StatusCode::*; match value.as_ref() { ValueRef::Data(data) => Wrap(match data.tag() { 0 => Ok, 1 => NotFound, 2 => InternalServerError, _ => panic!("Unexpected tag"), }), _ => panic!(), } } } // Representation of a http body that is in the prograss of being read pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>); // By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon // threads impl Userdata for Body {} // Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed impl fmt::Debug for Body { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Body") } } // `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the // value for garbage collected references. Normally objects do not contain any references so this // can be empty impl Traverseable for Body { fn traverse(&self, _: &mut Gc) {} } // `VmType` is the last trait required for a type to implement `Userdata` and defines the type used // in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough // as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine // which should have been registered earlier with `Thread::register_type` impl VmType for Body { type Type = Self; } // Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation // into `&Body` argument fn read_chunk( body: &Body, ) -> FutureResult< Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send + 'static>, > { use futures::future::poll_fn; let body = body.0.clone(); // `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is // polled until completion. After `poll` returns `Ready` the value is then returned to the // gluon function which called `read_chunk` FutureResult(Box::new(poll_fn(move || { let mut stream = body.lock().unwrap(); stream.poll().map(|async| async.map(IO::Value)) }))) } // A http body that is being written pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>); impl fmt::Debug for ResponseBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Response") } } impl Userdata for ResponseBody {} impl Traverseable for ResponseBody { fn traverse(&self, _: &mut Gc) {} } impl VmType for ResponseBody { type Type = Self; } fn write_response( response: &ResponseBody, bytes: &[u8], ) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send + 'static>> { use futures::future::poll_fn; use futures::AsyncSink; // Turn `bytes´ into a `Chunk` which can be sent to the http body let mut unsent_chunk = Some(Ok(bytes.to_owned().into())); let response = response.0.clone(); FutureResult(Box::new(poll_fn(move || { info!("Starting response send"); let mut sender = response.lock().unwrap(); let sender = sender .as_mut() .expect("Sender has been dropped while still in use"); if let Some(chunk) = unsent_chunk.take() { match sender.start_send(chunk) { Ok(AsyncSink::NotReady(chunk)) => { unsent_chunk = Some(chunk); return Ok(Async::NotReady); } Ok(AsyncSink::Ready) => (), Err(_) => { info!("Could not send http response"); return Ok(Async::Ready(IO::Value(()))); } } } match sender.poll_complete() { Ok(async) => Ok(async.map(IO::Value)), Err(_) => { info!("Could not send http response"); Ok(Async::Ready(IO::Value(()))) } } }))) } // Next we define some record types which are marshalled to and from gluon. These have equivalent // definitions in http_types.glu field_decl! { method, uri, status, body, request, response } type Request = record_type!{ method => Wrap<Method>, uri => String, body => Body }; type Response = record_type!{ status => Wrap<StatusCode> }; type HttpState = record_type!{ request => Request, response => ResponseBody }; fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> { let WithVM { value: handler, vm: thread, } = value; use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse}; // Retrieve the `handle` function from the http module which we use to evaluate values of type // `Handler Response` type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>; let handle: Function<RootedThread, ListenFn> = thread .get_global("examples.http.handle") .unwrap_or_else(|err| panic!("{}", err)); struct Listen { handle: Function<RootedThread, ListenFn>, handler: OpaqueValue<RootedThread, Handler<Response>>, } impl Service for Listen { type Request = HyperRequest; type Response = HyperResponse; type Error = hyper::Error; type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send + 'static>; fn call(&self, request: HyperRequest) -> Self::Future { let gluon_request = record_no_decl! { // Here we use to `Wrap` type to make `hyper::Request` into a type that can be // pushed to gluon method => Wrap(request.method().clone()), uri => request.uri().to_string(), // Since `Body` implements `Userdata` it can be directly pushed to gluon body => Body(Arc::new(Mutex::new(Box::new(request.body() .map_err(|err| VmError::Message(format!("{}", err))) // `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is // marshalled to `Array Byte` in gluon .map(PushAsRef::<_, [u8]>::new))))) }; let (response_sender, response_body) = hyper::Body::pair(); let response_sender = Arc::new(Mutex::new(Some(response_sender))); let http_state = record_no_decl!{ request => gluon_request, response => ResponseBody(response_sender.clone()) }; Box::new( self.handle .clone() .call_async(self.handler.clone(), http_state) .then(move |result| match result { Ok(value) => { match value { IO::Value(record_p!{ status }) => { // Drop the sender to so that it the receiver stops waiting for // more chunks *response_sender.lock().unwrap() = None; Ok( HyperResponse::new() .with_status(status.0) .with_body(response_body), ) } IO::Exception(err) => {
} } Err(err) => { let _ = stderr().write(format!("{}", err).as_bytes()); Ok(HyperResponse::new().with_status(StatusCode::InternalServerError)) } }), ) } } let addr = format!("127.0.0.1:{}", port).parse().unwrap(); let result = Http::new() .bind(&addr, move || { Ok(Listen { handle: handle.clone(), handler: handler.clone(), }) }) .and_then(|server| server.run()); match result { Ok(()) => IO::Value(()), Err(err) => IO::Exception(format!("{}", err)), } } // To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a // separate function which is called before loading `http_types` pub fn load_types(vm: &Thread) -> VmResult<()> { vm.register_type::<Body>("Body", &[])?; vm.register_type::<ResponseBody>("ResponseBody", &[])?; Ok(()) } pub fn load(vm: &Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record! { listen => primitive!(2 listen), read_chunk => primitive!(1 read_chunk), write_response => primitive!(2 write_response) }, ) } fn main() { if let Err(err) = main_() { panic!("{}", err) } } fn main_() -> Result<(), Box<StdError>> { let _ = env_logger::try_init(); let port = env::args() .nth(1) .map(|port| port.parse::<i32>().expect("port")) .unwrap_or(80); let thread = new_vm(); // First load all the http types so we can refer to them from gluon load_types(&thread)?; Compiler::new().run_expr::<()>( &thread, "", r#"let _ = import! "examples/http_types.glu" in () "#, )?; // Load the primitive functions we define in this module add_extern_module(&thread, "http.prim", load); // Last we run our `http_server.glu` module which returns a function which starts listening // on the port we passed from the command line let mut expr = String::new(); { let mut file = File::open("examples/http_server.glu")?; file.read_to_string(&mut expr)?; } let (mut listen, _) = Compiler::new().run_expr::<FunctionRef<fn(i32) -> IO<()>>>(&thread, "http_test", &expr)?; listen.call(port)?; Ok(()) }
let _ = stderr().write(err.as_bytes()); Ok( HyperResponse::new() .with_status(StatusCode::InternalServerError), ) }
conditional_block
http.rs
//! This example uses [hyper][] to create a http server which handles requests asynchronously in //! gluon. To do this we define a few types and functions in Rust with which we register in gluon //! so that we can communicate with `hyper`. The rest of the implementation is done in gluon, //! routing the requests and constructing the responses. //! //! [hyper]:https://hyper.rs extern crate gluon; extern crate gluon_base as base; #[macro_use] extern crate gluon_vm as vm; #[macro_use] extern crate collect_mac; extern crate env_logger; extern crate futures; extern crate hyper; #[macro_use] extern crate log; use std::env; use std::fmt; use std::error::Error as StdError; use std::fs::File; use std::io::{stderr, Read, Write}; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use hyper::{Chunk, Method, StatusCode}; use hyper::server::Service; use futures::Async; use futures::future::Future; use futures::sink::Sink; use futures::stream::Stream; use futures::sync::mpsc::Sender; use base::types::{ArcType, Type}; use vm::{Error as VmError, ExternModule, Result as VmResult}; use vm::thread::ThreadInternal; use vm::thread::{Context, RootedThread, Thread}; use vm::Variants; use vm::api::{Function, FunctionRef, FutureResult, Getable, OpaqueValue, PushAsRef, Pushable, Userdata, ValueRef, VmType, WithVM, IO}; use vm::gc::{Gc, Traverseable}; use gluon::import::add_extern_module; use vm::internal::Value; use gluon::{new_vm, Compiler}; // `Handler` is a type defined in http.glu but since we need to refer to it in the signature of // listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust struct Handler<T>(PhantomData<T>); impl<T: VmType + 'static> VmType for Handler<T> { type Type = Self; fn make_type(vm: &Thread) -> ArcType { let typ = (*vm.global_env() .get_env() .find_type_info("examples.http_types.Handler") .unwrap()) .clone() .into_type(); Type::app(typ, collect![T::make_type(vm)]) } } // Rust does not let us define traits on types defined in a different crate such as `hyper`. We can // however work around this by defining a wrapper type which we are then able to define the traits // on. struct Wrap<T>(T); macro_rules! define_vmtype { ($name: ident) => { impl VmType for Wrap<$name> { type Type = $name; fn make_type(vm: &Thread) -> ArcType { let typ = concat!("examples.http_types.", stringify!($name)); (*vm.global_env().get_env().find_type_info(typ).unwrap()) .clone() .into_type() } } } } define_vmtype! { Method } impl<'vm> Pushable<'vm> for Wrap<Method> { fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> { use hyper::Method::*; context.stack.push(Value::tag(match self.0 { Get => 0, Post => 1, Delete => 2, _ => { return Err(VmError::Message(format!( "Method `{:?}` does not exist in gluon", self.0 )).into()) } })); Ok(()) } } define_vmtype! { StatusCode } impl<'vm> Getable<'vm> for Wrap<StatusCode> { fn from_value(_: &'vm Thread, value: Variants) -> Self { use hyper::StatusCode::*; match value.as_ref() { ValueRef::Data(data) => Wrap(match data.tag() { 0 => Ok, 1 => NotFound, 2 => InternalServerError, _ => panic!("Unexpected tag"), }), _ => panic!(), } } } // Representation of a http body that is in the prograss of being read pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>); // By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon // threads impl Userdata for Body {} // Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed impl fmt::Debug for Body { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Body") } } // `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the // value for garbage collected references. Normally objects do not contain any references so this // can be empty impl Traverseable for Body { fn traverse(&self, _: &mut Gc) {} } // `VmType` is the last trait required for a type to implement `Userdata` and defines the type used // in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough // as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine // which should have been registered earlier with `Thread::register_type` impl VmType for Body { type Type = Self; } // Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation // into `&Body` argument fn read_chunk( body: &Body, ) -> FutureResult< Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send + 'static>, > { use futures::future::poll_fn; let body = body.0.clone(); // `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is // polled until completion. After `poll` returns `Ready` the value is then returned to the // gluon function which called `read_chunk` FutureResult(Box::new(poll_fn(move || { let mut stream = body.lock().unwrap(); stream.poll().map(|async| async.map(IO::Value)) }))) } // A http body that is being written pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>); impl fmt::Debug for ResponseBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Response") } } impl Userdata for ResponseBody {} impl Traverseable for ResponseBody { fn traverse(&self, _: &mut Gc) {} } impl VmType for ResponseBody { type Type = Self; } fn
( response: &ResponseBody, bytes: &[u8], ) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send + 'static>> { use futures::future::poll_fn; use futures::AsyncSink; // Turn `bytes´ into a `Chunk` which can be sent to the http body let mut unsent_chunk = Some(Ok(bytes.to_owned().into())); let response = response.0.clone(); FutureResult(Box::new(poll_fn(move || { info!("Starting response send"); let mut sender = response.lock().unwrap(); let sender = sender .as_mut() .expect("Sender has been dropped while still in use"); if let Some(chunk) = unsent_chunk.take() { match sender.start_send(chunk) { Ok(AsyncSink::NotReady(chunk)) => { unsent_chunk = Some(chunk); return Ok(Async::NotReady); } Ok(AsyncSink::Ready) => (), Err(_) => { info!("Could not send http response"); return Ok(Async::Ready(IO::Value(()))); } } } match sender.poll_complete() { Ok(async) => Ok(async.map(IO::Value)), Err(_) => { info!("Could not send http response"); Ok(Async::Ready(IO::Value(()))) } } }))) } // Next we define some record types which are marshalled to and from gluon. These have equivalent // definitions in http_types.glu field_decl! { method, uri, status, body, request, response } type Request = record_type!{ method => Wrap<Method>, uri => String, body => Body }; type Response = record_type!{ status => Wrap<StatusCode> }; type HttpState = record_type!{ request => Request, response => ResponseBody }; fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> { let WithVM { value: handler, vm: thread, } = value; use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse}; // Retrieve the `handle` function from the http module which we use to evaluate values of type // `Handler Response` type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>; let handle: Function<RootedThread, ListenFn> = thread .get_global("examples.http.handle") .unwrap_or_else(|err| panic!("{}", err)); struct Listen { handle: Function<RootedThread, ListenFn>, handler: OpaqueValue<RootedThread, Handler<Response>>, } impl Service for Listen { type Request = HyperRequest; type Response = HyperResponse; type Error = hyper::Error; type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send + 'static>; fn call(&self, request: HyperRequest) -> Self::Future { let gluon_request = record_no_decl! { // Here we use to `Wrap` type to make `hyper::Request` into a type that can be // pushed to gluon method => Wrap(request.method().clone()), uri => request.uri().to_string(), // Since `Body` implements `Userdata` it can be directly pushed to gluon body => Body(Arc::new(Mutex::new(Box::new(request.body() .map_err(|err| VmError::Message(format!("{}", err))) // `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is // marshalled to `Array Byte` in gluon .map(PushAsRef::<_, [u8]>::new))))) }; let (response_sender, response_body) = hyper::Body::pair(); let response_sender = Arc::new(Mutex::new(Some(response_sender))); let http_state = record_no_decl!{ request => gluon_request, response => ResponseBody(response_sender.clone()) }; Box::new( self.handle .clone() .call_async(self.handler.clone(), http_state) .then(move |result| match result { Ok(value) => { match value { IO::Value(record_p!{ status }) => { // Drop the sender to so that it the receiver stops waiting for // more chunks *response_sender.lock().unwrap() = None; Ok( HyperResponse::new() .with_status(status.0) .with_body(response_body), ) } IO::Exception(err) => { let _ = stderr().write(err.as_bytes()); Ok( HyperResponse::new() .with_status(StatusCode::InternalServerError), ) } } } Err(err) => { let _ = stderr().write(format!("{}", err).as_bytes()); Ok(HyperResponse::new().with_status(StatusCode::InternalServerError)) } }), ) } } let addr = format!("127.0.0.1:{}", port).parse().unwrap(); let result = Http::new() .bind(&addr, move || { Ok(Listen { handle: handle.clone(), handler: handler.clone(), }) }) .and_then(|server| server.run()); match result { Ok(()) => IO::Value(()), Err(err) => IO::Exception(format!("{}", err)), } } // To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a // separate function which is called before loading `http_types` pub fn load_types(vm: &Thread) -> VmResult<()> { vm.register_type::<Body>("Body", &[])?; vm.register_type::<ResponseBody>("ResponseBody", &[])?; Ok(()) } pub fn load(vm: &Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record! { listen => primitive!(2 listen), read_chunk => primitive!(1 read_chunk), write_response => primitive!(2 write_response) }, ) } fn main() { if let Err(err) = main_() { panic!("{}", err) } } fn main_() -> Result<(), Box<StdError>> { let _ = env_logger::try_init(); let port = env::args() .nth(1) .map(|port| port.parse::<i32>().expect("port")) .unwrap_or(80); let thread = new_vm(); // First load all the http types so we can refer to them from gluon load_types(&thread)?; Compiler::new().run_expr::<()>( &thread, "", r#"let _ = import! "examples/http_types.glu" in () "#, )?; // Load the primitive functions we define in this module add_extern_module(&thread, "http.prim", load); // Last we run our `http_server.glu` module which returns a function which starts listening // on the port we passed from the command line let mut expr = String::new(); { let mut file = File::open("examples/http_server.glu")?; file.read_to_string(&mut expr)?; } let (mut listen, _) = Compiler::new().run_expr::<FunctionRef<fn(i32) -> IO<()>>>(&thread, "http_test", &expr)?; listen.call(port)?; Ok(()) }
write_response
identifier_name
functional_dependencies.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! FunctionalDependencies keeps track of functional dependencies //! inside DFSchema. use crate::{DFSchema, DFSchemaRef, DataFusionError, JoinType, Result}; use sqlparser::ast::TableConstraint; use std::collections::HashSet; use std::fmt::{Display, Formatter}; /// This object defines a constraint on a table. #[derive(Debug, Clone, PartialEq, Eq, Hash)] enum Constraint { /// Columns with the given indices form a composite primary key (they are /// jointly unique and not nullable): PrimaryKey(Vec<usize>), /// Columns with the given indices form a composite unique key: Unique(Vec<usize>), } /// This object encapsulates a list of functional constraints: #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Constraints { inner: Vec<Constraint>, } impl Constraints { /// Create empty constraints pub fn empty() -> Self { Constraints::new(vec![]) } // This method is private. // Outside callers can either create empty constraint using `Constraints::empty` API. // or create constraint from table constraints using `Constraints::new_from_table_constraints` API. fn new(constraints: Vec<Constraint>) -> Self { Self { inner: constraints } } /// Convert each `TableConstraint` to corresponding `Constraint` pub fn new_from_table_constraints( constraints: &[TableConstraint], df_schema: &DFSchemaRef, ) -> Result<Self> { let constraints = constraints .iter() .map(|c: &TableConstraint| match c { TableConstraint::Unique { columns, is_primary, .. } => { // Get primary key and/or unique indices in the schema: let indices = columns .iter() .map(|pk| { let idx = df_schema .fields() .iter() .position(|item| { item.qualified_name() == pk.value.clone() }) .ok_or_else(|| { DataFusionError::Execution( "Primary key doesn't exist".to_string(), ) })?; Ok(idx) }) .collect::<Result<Vec<_>>>()?; Ok(if *is_primary { Constraint::PrimaryKey(indices) } else { Constraint::Unique(indices) }) } TableConstraint::ForeignKey { .. } => Err(DataFusionError::Plan( "Foreign key constraints are not currently supported".to_string(), )), TableConstraint::Check { .. } => Err(DataFusionError::Plan( "Check constraints are not currently supported".to_string(), )), TableConstraint::Index { .. } => Err(DataFusionError::Plan( "Indexes are not currently supported".to_string(), )), TableConstraint::FulltextOrSpatial { .. } => Err(DataFusionError::Plan( "Indexes are not currently supported".to_string(), )), }) .collect::<Result<Vec<_>>>()?; Ok(Constraints::new(constraints)) } /// Check whether constraints is empty pub fn is_empty(&self) -> bool { self.inner.is_empty() } } impl Display for Constraints { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect(); let pk = pk.join(", "); if !pk.is_empty() { write!(f, " constraints=[{pk}]") } else { write!(f, "") } } } /// This object defines a functional dependence in the schema. A functional /// dependence defines a relationship between determinant keys and dependent /// columns. A determinant key is a column, or a set of columns, whose value /// uniquely determines values of some other (dependent) columns. If two rows /// have the same determinant key, dependent columns in these rows are /// necessarily the same. If the determinant key is unique, the set of /// dependent columns is equal to the entire schema and the determinant key can /// serve as a primary key. Note that a primary key may "downgrade" into a /// determinant key due to an operation such as a join, and this object is /// used to track dependence relationships in such cases. For more information /// on functional dependencies, see: /// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/> #[derive(Debug, Clone, PartialEq, Eq)] pub struct FunctionalDependence { // Column indices of the (possibly composite) determinant key: pub source_indices: Vec<usize>, // Column indices of dependent column(s):
/// this flag is `false`. /// Note that as the schema changes between different stages in a plan, /// such as after LEFT JOIN or RIGHT JOIN operations, this property may /// change. pub nullable: bool, // The functional dependency mode: pub mode: Dependency, } /// Describes functional dependency mode. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Dependency { Single, // A determinant key may occur only once. Multi, // A determinant key may occur multiple times (in multiple rows). } impl FunctionalDependence { // Creates a new functional dependence. pub fn new( source_indices: Vec<usize>, target_indices: Vec<usize>, nullable: bool, ) -> Self { Self { source_indices, target_indices, nullable, // Start with the least restrictive mode by default: mode: Dependency::Multi, } } pub fn with_mode(mut self, mode: Dependency) -> Self { self.mode = mode; self } } /// This object encapsulates all functional dependencies in a given relation. #[derive(Debug, Clone, PartialEq, Eq)] pub struct FunctionalDependencies { deps: Vec<FunctionalDependence>, } impl FunctionalDependencies { /// Creates an empty `FunctionalDependencies` object. pub fn empty() -> Self { Self { deps: vec![] } } /// Creates a new `FunctionalDependencies` object from a vector of /// `FunctionalDependence` objects. pub fn new(dependencies: Vec<FunctionalDependence>) -> Self { Self { deps: dependencies } } /// Creates a new `FunctionalDependencies` object from the given constraints. pub fn new_from_constraints( constraints: Option<&Constraints>, n_field: usize, ) -> Self { if let Some(Constraints { inner: constraints }) = constraints { // Construct dependency objects based on each individual constraint: let dependencies = constraints .iter() .map(|constraint| { // All the field indices are associated with the whole table // since we are dealing with table level constraints: let dependency = match constraint { Constraint::PrimaryKey(indices) => FunctionalDependence::new( indices.to_vec(), (0..n_field).collect::<Vec<_>>(), false, ), Constraint::Unique(indices) => FunctionalDependence::new( indices.to_vec(), (0..n_field).collect::<Vec<_>>(), true, ), }; // As primary keys are guaranteed to be unique, set the // functional dependency mode to `Dependency::Single`: dependency.with_mode(Dependency::Single) }) .collect::<Vec<_>>(); Self::new(dependencies) } else { // There is no constraint, return an empty object: Self::empty() } } pub fn with_dependency(mut self, mode: Dependency) -> Self { self.deps.iter_mut().for_each(|item| item.mode = mode); self } /// Merges the given functional dependencies with these. pub fn extend(&mut self, other: FunctionalDependencies) { self.deps.extend(other.deps); } /// Adds the `offset` value to `source_indices` and `target_indices` for /// each functional dependency. pub fn add_offset(&mut self, offset: usize) { self.deps.iter_mut().for_each( |FunctionalDependence { source_indices, target_indices, .. }| { *source_indices = add_offset_to_vec(source_indices, offset); *target_indices = add_offset_to_vec(target_indices, offset); }, ) } /// Updates `source_indices` and `target_indices` of each functional /// dependence using the index mapping given in `proj_indices`. /// /// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional /// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`). /// In the updated schema, fields at indices \[2, 5, 8\] will transform /// to \[0, 1, 2\]. Therefore, the resulting functional dependence will /// be \[1\] -> \[1, 2\]. pub fn project_functional_dependencies( &self, proj_indices: &[usize], // The argument `n_out` denotes the schema field length, which is needed // to correctly associate a `Single`-mode dependence with the whole table. n_out: usize, ) -> FunctionalDependencies { let mut projected_func_dependencies = vec![]; for FunctionalDependence { source_indices, target_indices, nullable, mode, } in &self.deps { let new_source_indices = update_elements_with_matching_indices(source_indices, proj_indices); let new_target_indices = if *mode == Dependency::Single { // Associate with all of the fields in the schema: (0..n_out).collect() } else { // Update associations according to projection: update_elements_with_matching_indices(target_indices, proj_indices) }; // All of the composite indices should still be valid after projection; // otherwise, functional dependency cannot be propagated. if new_source_indices.len() == source_indices.len() { let new_func_dependence = FunctionalDependence::new( new_source_indices, new_target_indices, *nullable, ) .with_mode(*mode); projected_func_dependencies.push(new_func_dependence); } } FunctionalDependencies::new(projected_func_dependencies) } /// This function joins this set of functional dependencies with the `other` /// according to the given `join_type`. pub fn join( &self, other: &FunctionalDependencies, join_type: &JoinType, left_cols_len: usize, ) -> FunctionalDependencies { // Get mutable copies of left and right side dependencies: let mut right_func_dependencies = other.clone(); let mut left_func_dependencies = self.clone(); match join_type { JoinType::Inner | JoinType::Left | JoinType::Right => { // Add offset to right schema: right_func_dependencies.add_offset(left_cols_len); // Result may have multiple values, update the dependency mode: left_func_dependencies = left_func_dependencies.with_dependency(Dependency::Multi); right_func_dependencies = right_func_dependencies.with_dependency(Dependency::Multi); if *join_type == JoinType::Left { // Downgrade the right side, since it may have additional NULL values: right_func_dependencies.downgrade_dependencies(); } else if *join_type == JoinType::Right { // Downgrade the left side, since it may have additional NULL values: left_func_dependencies.downgrade_dependencies(); } // Combine left and right functional dependencies: left_func_dependencies.extend(right_func_dependencies); left_func_dependencies } JoinType::LeftSemi | JoinType::LeftAnti => { // These joins preserve functional dependencies of the left side: left_func_dependencies } JoinType::RightSemi | JoinType::RightAnti => { // These joins preserve functional dependencies of the right side: right_func_dependencies } JoinType::Full => { // All of the functional dependencies are lost in a FULL join: FunctionalDependencies::empty() } } } /// This function downgrades a functional dependency when nullability becomes /// a possibility: /// - If the dependency in question is UNIQUE (i.e. nullable), a new null value /// invalidates the dependency. /// - If the dependency in question is PRIMARY KEY (i.e. not nullable), a new /// null value turns it into UNIQUE mode. fn downgrade_dependencies(&mut self) { // Delete nullable dependencies, since they are no longer valid: self.deps.retain(|item| !item.nullable); self.deps.iter_mut().for_each(|item| item.nullable = true); } /// This function ensures that functional dependencies involving uniquely /// occuring determinant keys cover their entire table in terms of /// dependent columns. pub fn extend_target_indices(&mut self, n_out: usize) { self.deps.iter_mut().for_each( |FunctionalDependence { mode, target_indices, .. }| { // If unique, cover the whole table: if *mode == Dependency::Single { *target_indices = (0..n_out).collect::<Vec<_>>(); } }, ) } } /// Calculates functional dependencies for aggregate output, when there is a GROUP BY expression. pub fn aggregate_functional_dependencies( aggr_input_schema: &DFSchema, group_by_expr_names: &[String], aggr_schema: &DFSchema, ) -> FunctionalDependencies { let mut aggregate_func_dependencies = vec![]; let aggr_input_fields = aggr_input_schema.fields(); let aggr_fields = aggr_schema.fields(); // Association covers the whole table: let target_indices = (0..aggr_schema.fields().len()).collect::<Vec<_>>(); // Get functional dependencies of the schema: let func_dependencies = aggr_input_schema.functional_dependencies(); for FunctionalDependence { source_indices, nullable, mode, .. } in &func_dependencies.deps { // Keep source indices in a `HashSet` to prevent duplicate entries: let mut new_source_indices = HashSet::new(); let source_field_names = source_indices .iter() .map(|&idx| aggr_input_fields[idx].qualified_name()) .collect::<Vec<_>>(); for (idx, group_by_expr_name) in group_by_expr_names.iter().enumerate() { // When one of the input determinant expressions matches with // the GROUP BY expression, add the index of the GROUP BY // expression as a new determinant key: if source_field_names.contains(group_by_expr_name) { new_source_indices.insert(idx); } } // All of the composite indices occur in the GROUP BY expression: if new_source_indices.len() == source_indices.len() { aggregate_func_dependencies.push( FunctionalDependence::new( new_source_indices.into_iter().collect(), target_indices.clone(), *nullable, ) // input uniqueness stays the same when GROUP BY matches with input functional dependence determinants .with_mode(*mode), ); } } // If we have a single GROUP BY key, we can guarantee uniqueness after // aggregation: if group_by_expr_names.len() == 1 { // If `source_indices` contain 0, delete this functional dependency // as it will be added anyway with mode `Dependency::Single`: if let Some(idx) = aggregate_func_dependencies .iter() .position(|item| item.source_indices.contains(&0)) { // Delete the functional dependency that contains zeroth idx: aggregate_func_dependencies.remove(idx); } // Add a new functional dependency associated with the whole table: aggregate_func_dependencies.push( // Use nullable property of the group by expression FunctionalDependence::new( vec![0], target_indices, aggr_fields[0].is_nullable(), ) .with_mode(Dependency::Single), ); } FunctionalDependencies::new(aggregate_func_dependencies) } /// Returns target indices, for the determinant keys that are inside /// group by expressions. pub fn get_target_functional_dependencies( schema: &DFSchema, group_by_expr_names: &[String], ) -> Option<Vec<usize>> { let mut combined_target_indices = HashSet::new(); let dependencies = schema.functional_dependencies(); let field_names = schema .fields() .iter() .map(|item| item.qualified_name()) .collect::<Vec<_>>(); for FunctionalDependence { source_indices, target_indices, .. } in &dependencies.deps { let source_key_names = source_indices .iter() .map(|id_key_idx| field_names[*id_key_idx].clone()) .collect::<Vec<_>>(); // If the GROUP BY expression contains a determinant key, we can use // the associated fields after aggregation even if they are not part // of the GROUP BY expression. if source_key_names .iter() .all(|source_key_name| group_by_expr_names.contains(source_key_name)) { combined_target_indices.extend(target_indices.iter()); } } (!combined_target_indices.is_empty()) .then_some(combined_target_indices.iter().cloned().collect::<Vec<_>>()) } /// Updates entries inside the `entries` vector with their corresponding /// indices inside the `proj_indices` vector. fn update_elements_with_matching_indices( entries: &[usize], proj_indices: &[usize], ) -> Vec<usize> { entries .iter() .filter_map(|val| proj_indices.iter().position(|proj_idx| proj_idx == val)) .collect() } /// Adds `offset` value to each entry inside `in_data`. fn add_offset_to_vec<T: Copy + std::ops::Add<Output = T>>( in_data: &[T], offset: T, ) -> Vec<T> { in_data.iter().map(|&item| item + offset).collect() }
pub target_indices: Vec<usize>, /// Flag indicating whether one of the `source_indices` can receive NULL values. /// For a data source, if the constraint in question is `Constraint::Unique`, /// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`,
random_line_split
functional_dependencies.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! FunctionalDependencies keeps track of functional dependencies //! inside DFSchema. use crate::{DFSchema, DFSchemaRef, DataFusionError, JoinType, Result}; use sqlparser::ast::TableConstraint; use std::collections::HashSet; use std::fmt::{Display, Formatter}; /// This object defines a constraint on a table. #[derive(Debug, Clone, PartialEq, Eq, Hash)] enum Constraint { /// Columns with the given indices form a composite primary key (they are /// jointly unique and not nullable): PrimaryKey(Vec<usize>), /// Columns with the given indices form a composite unique key: Unique(Vec<usize>), } /// This object encapsulates a list of functional constraints: #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Constraints { inner: Vec<Constraint>, } impl Constraints { /// Create empty constraints pub fn empty() -> Self { Constraints::new(vec![]) } // This method is private. // Outside callers can either create empty constraint using `Constraints::empty` API. // or create constraint from table constraints using `Constraints::new_from_table_constraints` API. fn new(constraints: Vec<Constraint>) -> Self { Self { inner: constraints } } /// Convert each `TableConstraint` to corresponding `Constraint` pub fn new_from_table_constraints( constraints: &[TableConstraint], df_schema: &DFSchemaRef, ) -> Result<Self>
/// Check whether constraints is empty pub fn is_empty(&self) -> bool { self.inner.is_empty() } } impl Display for Constraints { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect(); let pk = pk.join(", "); if !pk.is_empty() { write!(f, " constraints=[{pk}]") } else { write!(f, "") } } } /// This object defines a functional dependence in the schema. A functional /// dependence defines a relationship between determinant keys and dependent /// columns. A determinant key is a column, or a set of columns, whose value /// uniquely determines values of some other (dependent) columns. If two rows /// have the same determinant key, dependent columns in these rows are /// necessarily the same. If the determinant key is unique, the set of /// dependent columns is equal to the entire schema and the determinant key can /// serve as a primary key. Note that a primary key may "downgrade" into a /// determinant key due to an operation such as a join, and this object is /// used to track dependence relationships in such cases. For more information /// on functional dependencies, see: /// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/> #[derive(Debug, Clone, PartialEq, Eq)] pub struct FunctionalDependence { // Column indices of the (possibly composite) determinant key: pub source_indices: Vec<usize>, // Column indices of dependent column(s): pub target_indices: Vec<usize>, /// Flag indicating whether one of the `source_indices` can receive NULL values. /// For a data source, if the constraint in question is `Constraint::Unique`, /// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`, /// this flag is `false`. /// Note that as the schema changes between different stages in a plan, /// such as after LEFT JOIN or RIGHT JOIN operations, this property may /// change. pub nullable: bool, // The functional dependency mode: pub mode: Dependency, } /// Describes functional dependency mode. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Dependency { Single, // A determinant key may occur only once. Multi, // A determinant key may occur multiple times (in multiple rows). } impl FunctionalDependence { // Creates a new functional dependence. pub fn new( source_indices: Vec<usize>, target_indices: Vec<usize>, nullable: bool, ) -> Self { Self { source_indices, target_indices, nullable, // Start with the least restrictive mode by default: mode: Dependency::Multi, } } pub fn with_mode(mut self, mode: Dependency) -> Self { self.mode = mode; self } } /// This object encapsulates all functional dependencies in a given relation. #[derive(Debug, Clone, PartialEq, Eq)] pub struct FunctionalDependencies { deps: Vec<FunctionalDependence>, } impl FunctionalDependencies { /// Creates an empty `FunctionalDependencies` object. pub fn empty() -> Self { Self { deps: vec![] } } /// Creates a new `FunctionalDependencies` object from a vector of /// `FunctionalDependence` objects. pub fn new(dependencies: Vec<FunctionalDependence>) -> Self { Self { deps: dependencies } } /// Creates a new `FunctionalDependencies` object from the given constraints. pub fn new_from_constraints( constraints: Option<&Constraints>, n_field: usize, ) -> Self { if let Some(Constraints { inner: constraints }) = constraints { // Construct dependency objects based on each individual constraint: let dependencies = constraints .iter() .map(|constraint| { // All the field indices are associated with the whole table // since we are dealing with table level constraints: let dependency = match constraint { Constraint::PrimaryKey(indices) => FunctionalDependence::new( indices.to_vec(), (0..n_field).collect::<Vec<_>>(), false, ), Constraint::Unique(indices) => FunctionalDependence::new( indices.to_vec(), (0..n_field).collect::<Vec<_>>(), true, ), }; // As primary keys are guaranteed to be unique, set the // functional dependency mode to `Dependency::Single`: dependency.with_mode(Dependency::Single) }) .collect::<Vec<_>>(); Self::new(dependencies) } else { // There is no constraint, return an empty object: Self::empty() } } pub fn with_dependency(mut self, mode: Dependency) -> Self { self.deps.iter_mut().for_each(|item| item.mode = mode); self } /// Merges the given functional dependencies with these. pub fn extend(&mut self, other: FunctionalDependencies) { self.deps.extend(other.deps); } /// Adds the `offset` value to `source_indices` and `target_indices` for /// each functional dependency. pub fn add_offset(&mut self, offset: usize) { self.deps.iter_mut().for_each( |FunctionalDependence { source_indices, target_indices, .. }| { *source_indices = add_offset_to_vec(source_indices, offset); *target_indices = add_offset_to_vec(target_indices, offset); }, ) } /// Updates `source_indices` and `target_indices` of each functional /// dependence using the index mapping given in `proj_indices`. /// /// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional /// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`). /// In the updated schema, fields at indices \[2, 5, 8\] will transform /// to \[0, 1, 2\]. Therefore, the resulting functional dependence will /// be \[1\] -> \[1, 2\]. pub fn project_functional_dependencies( &self, proj_indices: &[usize], // The argument `n_out` denotes the schema field length, which is needed // to correctly associate a `Single`-mode dependence with the whole table. n_out: usize, ) -> FunctionalDependencies { let mut projected_func_dependencies = vec![]; for FunctionalDependence { source_indices, target_indices, nullable, mode, } in &self.deps { let new_source_indices = update_elements_with_matching_indices(source_indices, proj_indices); let new_target_indices = if *mode == Dependency::Single { // Associate with all of the fields in the schema: (0..n_out).collect() } else { // Update associations according to projection: update_elements_with_matching_indices(target_indices, proj_indices) }; // All of the composite indices should still be valid after projection; // otherwise, functional dependency cannot be propagated. if new_source_indices.len() == source_indices.len() { let new_func_dependence = FunctionalDependence::new( new_source_indices, new_target_indices, *nullable, ) .with_mode(*mode); projected_func_dependencies.push(new_func_dependence); } } FunctionalDependencies::new(projected_func_dependencies) } /// This function joins this set of functional dependencies with the `other` /// according to the given `join_type`. pub fn join( &self, other: &FunctionalDependencies, join_type: &JoinType, left_cols_len: usize, ) -> FunctionalDependencies { // Get mutable copies of left and right side dependencies: let mut right_func_dependencies = other.clone(); let mut left_func_dependencies = self.clone(); match join_type { JoinType::Inner | JoinType::Left | JoinType::Right => { // Add offset to right schema: right_func_dependencies.add_offset(left_cols_len); // Result may have multiple values, update the dependency mode: left_func_dependencies = left_func_dependencies.with_dependency(Dependency::Multi); right_func_dependencies = right_func_dependencies.with_dependency(Dependency::Multi); if *join_type == JoinType::Left { // Downgrade the right side, since it may have additional NULL values: right_func_dependencies.downgrade_dependencies(); } else if *join_type == JoinType::Right { // Downgrade the left side, since it may have additional NULL values: left_func_dependencies.downgrade_dependencies(); } // Combine left and right functional dependencies: left_func_dependencies.extend(right_func_dependencies); left_func_dependencies } JoinType::LeftSemi | JoinType::LeftAnti => { // These joins preserve functional dependencies of the left side: left_func_dependencies } JoinType::RightSemi | JoinType::RightAnti => { // These joins preserve functional dependencies of the right side: right_func_dependencies } JoinType::Full => { // All of the functional dependencies are lost in a FULL join: FunctionalDependencies::empty() } } } /// This function downgrades a functional dependency when nullability becomes /// a possibility: /// - If the dependency in question is UNIQUE (i.e. nullable), a new null value /// invalidates the dependency. /// - If the dependency in question is PRIMARY KEY (i.e. not nullable), a new /// null value turns it into UNIQUE mode. fn downgrade_dependencies(&mut self) { // Delete nullable dependencies, since they are no longer valid: self.deps.retain(|item| !item.nullable); self.deps.iter_mut().for_each(|item| item.nullable = true); } /// This function ensures that functional dependencies involving uniquely /// occuring determinant keys cover their entire table in terms of /// dependent columns. pub fn extend_target_indices(&mut self, n_out: usize) { self.deps.iter_mut().for_each( |FunctionalDependence { mode, target_indices, .. }| { // If unique, cover the whole table: if *mode == Dependency::Single { *target_indices = (0..n_out).collect::<Vec<_>>(); } }, ) } } /// Calculates functional dependencies for aggregate output, when there is a GROUP BY expression. pub fn aggregate_functional_dependencies( aggr_input_schema: &DFSchema, group_by_expr_names: &[String], aggr_schema: &DFSchema, ) -> FunctionalDependencies { let mut aggregate_func_dependencies = vec![]; let aggr_input_fields = aggr_input_schema.fields(); let aggr_fields = aggr_schema.fields(); // Association covers the whole table: let target_indices = (0..aggr_schema.fields().len()).collect::<Vec<_>>(); // Get functional dependencies of the schema: let func_dependencies = aggr_input_schema.functional_dependencies(); for FunctionalDependence { source_indices, nullable, mode, .. } in &func_dependencies.deps { // Keep source indices in a `HashSet` to prevent duplicate entries: let mut new_source_indices = HashSet::new(); let source_field_names = source_indices .iter() .map(|&idx| aggr_input_fields[idx].qualified_name()) .collect::<Vec<_>>(); for (idx, group_by_expr_name) in group_by_expr_names.iter().enumerate() { // When one of the input determinant expressions matches with // the GROUP BY expression, add the index of the GROUP BY // expression as a new determinant key: if source_field_names.contains(group_by_expr_name) { new_source_indices.insert(idx); } } // All of the composite indices occur in the GROUP BY expression: if new_source_indices.len() == source_indices.len() { aggregate_func_dependencies.push( FunctionalDependence::new( new_source_indices.into_iter().collect(), target_indices.clone(), *nullable, ) // input uniqueness stays the same when GROUP BY matches with input functional dependence determinants .with_mode(*mode), ); } } // If we have a single GROUP BY key, we can guarantee uniqueness after // aggregation: if group_by_expr_names.len() == 1 { // If `source_indices` contain 0, delete this functional dependency // as it will be added anyway with mode `Dependency::Single`: if let Some(idx) = aggregate_func_dependencies .iter() .position(|item| item.source_indices.contains(&0)) { // Delete the functional dependency that contains zeroth idx: aggregate_func_dependencies.remove(idx); } // Add a new functional dependency associated with the whole table: aggregate_func_dependencies.push( // Use nullable property of the group by expression FunctionalDependence::new( vec![0], target_indices, aggr_fields[0].is_nullable(), ) .with_mode(Dependency::Single), ); } FunctionalDependencies::new(aggregate_func_dependencies) } /// Returns target indices, for the determinant keys that are inside /// group by expressions. pub fn get_target_functional_dependencies( schema: &DFSchema, group_by_expr_names: &[String], ) -> Option<Vec<usize>> { let mut combined_target_indices = HashSet::new(); let dependencies = schema.functional_dependencies(); let field_names = schema .fields() .iter() .map(|item| item.qualified_name()) .collect::<Vec<_>>(); for FunctionalDependence { source_indices, target_indices, .. } in &dependencies.deps { let source_key_names = source_indices .iter() .map(|id_key_idx| field_names[*id_key_idx].clone()) .collect::<Vec<_>>(); // If the GROUP BY expression contains a determinant key, we can use // the associated fields after aggregation even if they are not part // of the GROUP BY expression. if source_key_names .iter() .all(|source_key_name| group_by_expr_names.contains(source_key_name)) { combined_target_indices.extend(target_indices.iter()); } } (!combined_target_indices.is_empty()) .then_some(combined_target_indices.iter().cloned().collect::<Vec<_>>()) } /// Updates entries inside the `entries` vector with their corresponding /// indices inside the `proj_indices` vector. fn update_elements_with_matching_indices( entries: &[usize], proj_indices: &[usize], ) -> Vec<usize> { entries .iter() .filter_map(|val| proj_indices.iter().position(|proj_idx| proj_idx == val)) .collect() } /// Adds `offset` value to each entry inside `in_data`. fn add_offset_to_vec<T: Copy + std::ops::Add<Output = T>>( in_data: &[T], offset: T, ) -> Vec<T> { in_data.iter().map(|&item| item + offset).collect() }
{ let constraints = constraints .iter() .map(|c: &TableConstraint| match c { TableConstraint::Unique { columns, is_primary, .. } => { // Get primary key and/or unique indices in the schema: let indices = columns .iter() .map(|pk| { let idx = df_schema .fields() .iter() .position(|item| { item.qualified_name() == pk.value.clone() }) .ok_or_else(|| { DataFusionError::Execution( "Primary key doesn't exist".to_string(), ) })?; Ok(idx) }) .collect::<Result<Vec<_>>>()?; Ok(if *is_primary { Constraint::PrimaryKey(indices) } else { Constraint::Unique(indices) }) } TableConstraint::ForeignKey { .. } => Err(DataFusionError::Plan( "Foreign key constraints are not currently supported".to_string(), )), TableConstraint::Check { .. } => Err(DataFusionError::Plan( "Check constraints are not currently supported".to_string(), )), TableConstraint::Index { .. } => Err(DataFusionError::Plan( "Indexes are not currently supported".to_string(), )), TableConstraint::FulltextOrSpatial { .. } => Err(DataFusionError::Plan( "Indexes are not currently supported".to_string(), )), }) .collect::<Result<Vec<_>>>()?; Ok(Constraints::new(constraints)) }
identifier_body
functional_dependencies.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! FunctionalDependencies keeps track of functional dependencies //! inside DFSchema. use crate::{DFSchema, DFSchemaRef, DataFusionError, JoinType, Result}; use sqlparser::ast::TableConstraint; use std::collections::HashSet; use std::fmt::{Display, Formatter}; /// This object defines a constraint on a table. #[derive(Debug, Clone, PartialEq, Eq, Hash)] enum Constraint { /// Columns with the given indices form a composite primary key (they are /// jointly unique and not nullable): PrimaryKey(Vec<usize>), /// Columns with the given indices form a composite unique key: Unique(Vec<usize>), } /// This object encapsulates a list of functional constraints: #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Constraints { inner: Vec<Constraint>, } impl Constraints { /// Create empty constraints pub fn empty() -> Self { Constraints::new(vec![]) } // This method is private. // Outside callers can either create empty constraint using `Constraints::empty` API. // or create constraint from table constraints using `Constraints::new_from_table_constraints` API. fn new(constraints: Vec<Constraint>) -> Self { Self { inner: constraints } } /// Convert each `TableConstraint` to corresponding `Constraint` pub fn new_from_table_constraints( constraints: &[TableConstraint], df_schema: &DFSchemaRef, ) -> Result<Self> { let constraints = constraints .iter() .map(|c: &TableConstraint| match c { TableConstraint::Unique { columns, is_primary, .. } => { // Get primary key and/or unique indices in the schema: let indices = columns .iter() .map(|pk| { let idx = df_schema .fields() .iter() .position(|item| { item.qualified_name() == pk.value.clone() }) .ok_or_else(|| { DataFusionError::Execution( "Primary key doesn't exist".to_string(), ) })?; Ok(idx) }) .collect::<Result<Vec<_>>>()?; Ok(if *is_primary { Constraint::PrimaryKey(indices) } else { Constraint::Unique(indices) }) } TableConstraint::ForeignKey { .. } => Err(DataFusionError::Plan( "Foreign key constraints are not currently supported".to_string(), )), TableConstraint::Check { .. } => Err(DataFusionError::Plan( "Check constraints are not currently supported".to_string(), )), TableConstraint::Index { .. } => Err(DataFusionError::Plan( "Indexes are not currently supported".to_string(), )), TableConstraint::FulltextOrSpatial { .. } => Err(DataFusionError::Plan( "Indexes are not currently supported".to_string(), )), }) .collect::<Result<Vec<_>>>()?; Ok(Constraints::new(constraints)) } /// Check whether constraints is empty pub fn is_empty(&self) -> bool { self.inner.is_empty() } } impl Display for Constraints { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect(); let pk = pk.join(", "); if !pk.is_empty() { write!(f, " constraints=[{pk}]") } else { write!(f, "") } } } /// This object defines a functional dependence in the schema. A functional /// dependence defines a relationship between determinant keys and dependent /// columns. A determinant key is a column, or a set of columns, whose value /// uniquely determines values of some other (dependent) columns. If two rows /// have the same determinant key, dependent columns in these rows are /// necessarily the same. If the determinant key is unique, the set of /// dependent columns is equal to the entire schema and the determinant key can /// serve as a primary key. Note that a primary key may "downgrade" into a /// determinant key due to an operation such as a join, and this object is /// used to track dependence relationships in such cases. For more information /// on functional dependencies, see: /// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/> #[derive(Debug, Clone, PartialEq, Eq)] pub struct FunctionalDependence { // Column indices of the (possibly composite) determinant key: pub source_indices: Vec<usize>, // Column indices of dependent column(s): pub target_indices: Vec<usize>, /// Flag indicating whether one of the `source_indices` can receive NULL values. /// For a data source, if the constraint in question is `Constraint::Unique`, /// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`, /// this flag is `false`. /// Note that as the schema changes between different stages in a plan, /// such as after LEFT JOIN or RIGHT JOIN operations, this property may /// change. pub nullable: bool, // The functional dependency mode: pub mode: Dependency, } /// Describes functional dependency mode. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Dependency { Single, // A determinant key may occur only once. Multi, // A determinant key may occur multiple times (in multiple rows). } impl FunctionalDependence { // Creates a new functional dependence. pub fn new( source_indices: Vec<usize>, target_indices: Vec<usize>, nullable: bool, ) -> Self { Self { source_indices, target_indices, nullable, // Start with the least restrictive mode by default: mode: Dependency::Multi, } } pub fn with_mode(mut self, mode: Dependency) -> Self { self.mode = mode; self } } /// This object encapsulates all functional dependencies in a given relation. #[derive(Debug, Clone, PartialEq, Eq)] pub struct FunctionalDependencies { deps: Vec<FunctionalDependence>, } impl FunctionalDependencies { /// Creates an empty `FunctionalDependencies` object. pub fn empty() -> Self { Self { deps: vec![] } } /// Creates a new `FunctionalDependencies` object from a vector of /// `FunctionalDependence` objects. pub fn
(dependencies: Vec<FunctionalDependence>) -> Self { Self { deps: dependencies } } /// Creates a new `FunctionalDependencies` object from the given constraints. pub fn new_from_constraints( constraints: Option<&Constraints>, n_field: usize, ) -> Self { if let Some(Constraints { inner: constraints }) = constraints { // Construct dependency objects based on each individual constraint: let dependencies = constraints .iter() .map(|constraint| { // All the field indices are associated with the whole table // since we are dealing with table level constraints: let dependency = match constraint { Constraint::PrimaryKey(indices) => FunctionalDependence::new( indices.to_vec(), (0..n_field).collect::<Vec<_>>(), false, ), Constraint::Unique(indices) => FunctionalDependence::new( indices.to_vec(), (0..n_field).collect::<Vec<_>>(), true, ), }; // As primary keys are guaranteed to be unique, set the // functional dependency mode to `Dependency::Single`: dependency.with_mode(Dependency::Single) }) .collect::<Vec<_>>(); Self::new(dependencies) } else { // There is no constraint, return an empty object: Self::empty() } } pub fn with_dependency(mut self, mode: Dependency) -> Self { self.deps.iter_mut().for_each(|item| item.mode = mode); self } /// Merges the given functional dependencies with these. pub fn extend(&mut self, other: FunctionalDependencies) { self.deps.extend(other.deps); } /// Adds the `offset` value to `source_indices` and `target_indices` for /// each functional dependency. pub fn add_offset(&mut self, offset: usize) { self.deps.iter_mut().for_each( |FunctionalDependence { source_indices, target_indices, .. }| { *source_indices = add_offset_to_vec(source_indices, offset); *target_indices = add_offset_to_vec(target_indices, offset); }, ) } /// Updates `source_indices` and `target_indices` of each functional /// dependence using the index mapping given in `proj_indices`. /// /// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional /// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`). /// In the updated schema, fields at indices \[2, 5, 8\] will transform /// to \[0, 1, 2\]. Therefore, the resulting functional dependence will /// be \[1\] -> \[1, 2\]. pub fn project_functional_dependencies( &self, proj_indices: &[usize], // The argument `n_out` denotes the schema field length, which is needed // to correctly associate a `Single`-mode dependence with the whole table. n_out: usize, ) -> FunctionalDependencies { let mut projected_func_dependencies = vec![]; for FunctionalDependence { source_indices, target_indices, nullable, mode, } in &self.deps { let new_source_indices = update_elements_with_matching_indices(source_indices, proj_indices); let new_target_indices = if *mode == Dependency::Single { // Associate with all of the fields in the schema: (0..n_out).collect() } else { // Update associations according to projection: update_elements_with_matching_indices(target_indices, proj_indices) }; // All of the composite indices should still be valid after projection; // otherwise, functional dependency cannot be propagated. if new_source_indices.len() == source_indices.len() { let new_func_dependence = FunctionalDependence::new( new_source_indices, new_target_indices, *nullable, ) .with_mode(*mode); projected_func_dependencies.push(new_func_dependence); } } FunctionalDependencies::new(projected_func_dependencies) } /// This function joins this set of functional dependencies with the `other` /// according to the given `join_type`. pub fn join( &self, other: &FunctionalDependencies, join_type: &JoinType, left_cols_len: usize, ) -> FunctionalDependencies { // Get mutable copies of left and right side dependencies: let mut right_func_dependencies = other.clone(); let mut left_func_dependencies = self.clone(); match join_type { JoinType::Inner | JoinType::Left | JoinType::Right => { // Add offset to right schema: right_func_dependencies.add_offset(left_cols_len); // Result may have multiple values, update the dependency mode: left_func_dependencies = left_func_dependencies.with_dependency(Dependency::Multi); right_func_dependencies = right_func_dependencies.with_dependency(Dependency::Multi); if *join_type == JoinType::Left { // Downgrade the right side, since it may have additional NULL values: right_func_dependencies.downgrade_dependencies(); } else if *join_type == JoinType::Right { // Downgrade the left side, since it may have additional NULL values: left_func_dependencies.downgrade_dependencies(); } // Combine left and right functional dependencies: left_func_dependencies.extend(right_func_dependencies); left_func_dependencies } JoinType::LeftSemi | JoinType::LeftAnti => { // These joins preserve functional dependencies of the left side: left_func_dependencies } JoinType::RightSemi | JoinType::RightAnti => { // These joins preserve functional dependencies of the right side: right_func_dependencies } JoinType::Full => { // All of the functional dependencies are lost in a FULL join: FunctionalDependencies::empty() } } } /// This function downgrades a functional dependency when nullability becomes /// a possibility: /// - If the dependency in question is UNIQUE (i.e. nullable), a new null value /// invalidates the dependency. /// - If the dependency in question is PRIMARY KEY (i.e. not nullable), a new /// null value turns it into UNIQUE mode. fn downgrade_dependencies(&mut self) { // Delete nullable dependencies, since they are no longer valid: self.deps.retain(|item| !item.nullable); self.deps.iter_mut().for_each(|item| item.nullable = true); } /// This function ensures that functional dependencies involving uniquely /// occuring determinant keys cover their entire table in terms of /// dependent columns. pub fn extend_target_indices(&mut self, n_out: usize) { self.deps.iter_mut().for_each( |FunctionalDependence { mode, target_indices, .. }| { // If unique, cover the whole table: if *mode == Dependency::Single { *target_indices = (0..n_out).collect::<Vec<_>>(); } }, ) } } /// Calculates functional dependencies for aggregate output, when there is a GROUP BY expression. pub fn aggregate_functional_dependencies( aggr_input_schema: &DFSchema, group_by_expr_names: &[String], aggr_schema: &DFSchema, ) -> FunctionalDependencies { let mut aggregate_func_dependencies = vec![]; let aggr_input_fields = aggr_input_schema.fields(); let aggr_fields = aggr_schema.fields(); // Association covers the whole table: let target_indices = (0..aggr_schema.fields().len()).collect::<Vec<_>>(); // Get functional dependencies of the schema: let func_dependencies = aggr_input_schema.functional_dependencies(); for FunctionalDependence { source_indices, nullable, mode, .. } in &func_dependencies.deps { // Keep source indices in a `HashSet` to prevent duplicate entries: let mut new_source_indices = HashSet::new(); let source_field_names = source_indices .iter() .map(|&idx| aggr_input_fields[idx].qualified_name()) .collect::<Vec<_>>(); for (idx, group_by_expr_name) in group_by_expr_names.iter().enumerate() { // When one of the input determinant expressions matches with // the GROUP BY expression, add the index of the GROUP BY // expression as a new determinant key: if source_field_names.contains(group_by_expr_name) { new_source_indices.insert(idx); } } // All of the composite indices occur in the GROUP BY expression: if new_source_indices.len() == source_indices.len() { aggregate_func_dependencies.push( FunctionalDependence::new( new_source_indices.into_iter().collect(), target_indices.clone(), *nullable, ) // input uniqueness stays the same when GROUP BY matches with input functional dependence determinants .with_mode(*mode), ); } } // If we have a single GROUP BY key, we can guarantee uniqueness after // aggregation: if group_by_expr_names.len() == 1 { // If `source_indices` contain 0, delete this functional dependency // as it will be added anyway with mode `Dependency::Single`: if let Some(idx) = aggregate_func_dependencies .iter() .position(|item| item.source_indices.contains(&0)) { // Delete the functional dependency that contains zeroth idx: aggregate_func_dependencies.remove(idx); } // Add a new functional dependency associated with the whole table: aggregate_func_dependencies.push( // Use nullable property of the group by expression FunctionalDependence::new( vec![0], target_indices, aggr_fields[0].is_nullable(), ) .with_mode(Dependency::Single), ); } FunctionalDependencies::new(aggregate_func_dependencies) } /// Returns target indices, for the determinant keys that are inside /// group by expressions. pub fn get_target_functional_dependencies( schema: &DFSchema, group_by_expr_names: &[String], ) -> Option<Vec<usize>> { let mut combined_target_indices = HashSet::new(); let dependencies = schema.functional_dependencies(); let field_names = schema .fields() .iter() .map(|item| item.qualified_name()) .collect::<Vec<_>>(); for FunctionalDependence { source_indices, target_indices, .. } in &dependencies.deps { let source_key_names = source_indices .iter() .map(|id_key_idx| field_names[*id_key_idx].clone()) .collect::<Vec<_>>(); // If the GROUP BY expression contains a determinant key, we can use // the associated fields after aggregation even if they are not part // of the GROUP BY expression. if source_key_names .iter() .all(|source_key_name| group_by_expr_names.contains(source_key_name)) { combined_target_indices.extend(target_indices.iter()); } } (!combined_target_indices.is_empty()) .then_some(combined_target_indices.iter().cloned().collect::<Vec<_>>()) } /// Updates entries inside the `entries` vector with their corresponding /// indices inside the `proj_indices` vector. fn update_elements_with_matching_indices( entries: &[usize], proj_indices: &[usize], ) -> Vec<usize> { entries .iter() .filter_map(|val| proj_indices.iter().position(|proj_idx| proj_idx == val)) .collect() } /// Adds `offset` value to each entry inside `in_data`. fn add_offset_to_vec<T: Copy + std::ops::Add<Output = T>>( in_data: &[T], offset: T, ) -> Vec<T> { in_data.iter().map(|&item| item + offset).collect() }
new
identifier_name
functional_dependencies.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! FunctionalDependencies keeps track of functional dependencies //! inside DFSchema. use crate::{DFSchema, DFSchemaRef, DataFusionError, JoinType, Result}; use sqlparser::ast::TableConstraint; use std::collections::HashSet; use std::fmt::{Display, Formatter}; /// This object defines a constraint on a table. #[derive(Debug, Clone, PartialEq, Eq, Hash)] enum Constraint { /// Columns with the given indices form a composite primary key (they are /// jointly unique and not nullable): PrimaryKey(Vec<usize>), /// Columns with the given indices form a composite unique key: Unique(Vec<usize>), } /// This object encapsulates a list of functional constraints: #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Constraints { inner: Vec<Constraint>, } impl Constraints { /// Create empty constraints pub fn empty() -> Self { Constraints::new(vec![]) } // This method is private. // Outside callers can either create empty constraint using `Constraints::empty` API. // or create constraint from table constraints using `Constraints::new_from_table_constraints` API. fn new(constraints: Vec<Constraint>) -> Self { Self { inner: constraints } } /// Convert each `TableConstraint` to corresponding `Constraint` pub fn new_from_table_constraints( constraints: &[TableConstraint], df_schema: &DFSchemaRef, ) -> Result<Self> { let constraints = constraints .iter() .map(|c: &TableConstraint| match c { TableConstraint::Unique { columns, is_primary, .. } => { // Get primary key and/or unique indices in the schema: let indices = columns .iter() .map(|pk| { let idx = df_schema .fields() .iter() .position(|item| { item.qualified_name() == pk.value.clone() }) .ok_or_else(|| { DataFusionError::Execution( "Primary key doesn't exist".to_string(), ) })?; Ok(idx) }) .collect::<Result<Vec<_>>>()?; Ok(if *is_primary
else { Constraint::Unique(indices) }) } TableConstraint::ForeignKey { .. } => Err(DataFusionError::Plan( "Foreign key constraints are not currently supported".to_string(), )), TableConstraint::Check { .. } => Err(DataFusionError::Plan( "Check constraints are not currently supported".to_string(), )), TableConstraint::Index { .. } => Err(DataFusionError::Plan( "Indexes are not currently supported".to_string(), )), TableConstraint::FulltextOrSpatial { .. } => Err(DataFusionError::Plan( "Indexes are not currently supported".to_string(), )), }) .collect::<Result<Vec<_>>>()?; Ok(Constraints::new(constraints)) } /// Check whether constraints is empty pub fn is_empty(&self) -> bool { self.inner.is_empty() } } impl Display for Constraints { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect(); let pk = pk.join(", "); if !pk.is_empty() { write!(f, " constraints=[{pk}]") } else { write!(f, "") } } } /// This object defines a functional dependence in the schema. A functional /// dependence defines a relationship between determinant keys and dependent /// columns. A determinant key is a column, or a set of columns, whose value /// uniquely determines values of some other (dependent) columns. If two rows /// have the same determinant key, dependent columns in these rows are /// necessarily the same. If the determinant key is unique, the set of /// dependent columns is equal to the entire schema and the determinant key can /// serve as a primary key. Note that a primary key may "downgrade" into a /// determinant key due to an operation such as a join, and this object is /// used to track dependence relationships in such cases. For more information /// on functional dependencies, see: /// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/> #[derive(Debug, Clone, PartialEq, Eq)] pub struct FunctionalDependence { // Column indices of the (possibly composite) determinant key: pub source_indices: Vec<usize>, // Column indices of dependent column(s): pub target_indices: Vec<usize>, /// Flag indicating whether one of the `source_indices` can receive NULL values. /// For a data source, if the constraint in question is `Constraint::Unique`, /// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`, /// this flag is `false`. /// Note that as the schema changes between different stages in a plan, /// such as after LEFT JOIN or RIGHT JOIN operations, this property may /// change. pub nullable: bool, // The functional dependency mode: pub mode: Dependency, } /// Describes functional dependency mode. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Dependency { Single, // A determinant key may occur only once. Multi, // A determinant key may occur multiple times (in multiple rows). } impl FunctionalDependence { // Creates a new functional dependence. pub fn new( source_indices: Vec<usize>, target_indices: Vec<usize>, nullable: bool, ) -> Self { Self { source_indices, target_indices, nullable, // Start with the least restrictive mode by default: mode: Dependency::Multi, } } pub fn with_mode(mut self, mode: Dependency) -> Self { self.mode = mode; self } } /// This object encapsulates all functional dependencies in a given relation. #[derive(Debug, Clone, PartialEq, Eq)] pub struct FunctionalDependencies { deps: Vec<FunctionalDependence>, } impl FunctionalDependencies { /// Creates an empty `FunctionalDependencies` object. pub fn empty() -> Self { Self { deps: vec![] } } /// Creates a new `FunctionalDependencies` object from a vector of /// `FunctionalDependence` objects. pub fn new(dependencies: Vec<FunctionalDependence>) -> Self { Self { deps: dependencies } } /// Creates a new `FunctionalDependencies` object from the given constraints. pub fn new_from_constraints( constraints: Option<&Constraints>, n_field: usize, ) -> Self { if let Some(Constraints { inner: constraints }) = constraints { // Construct dependency objects based on each individual constraint: let dependencies = constraints .iter() .map(|constraint| { // All the field indices are associated with the whole table // since we are dealing with table level constraints: let dependency = match constraint { Constraint::PrimaryKey(indices) => FunctionalDependence::new( indices.to_vec(), (0..n_field).collect::<Vec<_>>(), false, ), Constraint::Unique(indices) => FunctionalDependence::new( indices.to_vec(), (0..n_field).collect::<Vec<_>>(), true, ), }; // As primary keys are guaranteed to be unique, set the // functional dependency mode to `Dependency::Single`: dependency.with_mode(Dependency::Single) }) .collect::<Vec<_>>(); Self::new(dependencies) } else { // There is no constraint, return an empty object: Self::empty() } } pub fn with_dependency(mut self, mode: Dependency) -> Self { self.deps.iter_mut().for_each(|item| item.mode = mode); self } /// Merges the given functional dependencies with these. pub fn extend(&mut self, other: FunctionalDependencies) { self.deps.extend(other.deps); } /// Adds the `offset` value to `source_indices` and `target_indices` for /// each functional dependency. pub fn add_offset(&mut self, offset: usize) { self.deps.iter_mut().for_each( |FunctionalDependence { source_indices, target_indices, .. }| { *source_indices = add_offset_to_vec(source_indices, offset); *target_indices = add_offset_to_vec(target_indices, offset); }, ) } /// Updates `source_indices` and `target_indices` of each functional /// dependence using the index mapping given in `proj_indices`. /// /// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional /// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`). /// In the updated schema, fields at indices \[2, 5, 8\] will transform /// to \[0, 1, 2\]. Therefore, the resulting functional dependence will /// be \[1\] -> \[1, 2\]. pub fn project_functional_dependencies( &self, proj_indices: &[usize], // The argument `n_out` denotes the schema field length, which is needed // to correctly associate a `Single`-mode dependence with the whole table. n_out: usize, ) -> FunctionalDependencies { let mut projected_func_dependencies = vec![]; for FunctionalDependence { source_indices, target_indices, nullable, mode, } in &self.deps { let new_source_indices = update_elements_with_matching_indices(source_indices, proj_indices); let new_target_indices = if *mode == Dependency::Single { // Associate with all of the fields in the schema: (0..n_out).collect() } else { // Update associations according to projection: update_elements_with_matching_indices(target_indices, proj_indices) }; // All of the composite indices should still be valid after projection; // otherwise, functional dependency cannot be propagated. if new_source_indices.len() == source_indices.len() { let new_func_dependence = FunctionalDependence::new( new_source_indices, new_target_indices, *nullable, ) .with_mode(*mode); projected_func_dependencies.push(new_func_dependence); } } FunctionalDependencies::new(projected_func_dependencies) } /// This function joins this set of functional dependencies with the `other` /// according to the given `join_type`. pub fn join( &self, other: &FunctionalDependencies, join_type: &JoinType, left_cols_len: usize, ) -> FunctionalDependencies { // Get mutable copies of left and right side dependencies: let mut right_func_dependencies = other.clone(); let mut left_func_dependencies = self.clone(); match join_type { JoinType::Inner | JoinType::Left | JoinType::Right => { // Add offset to right schema: right_func_dependencies.add_offset(left_cols_len); // Result may have multiple values, update the dependency mode: left_func_dependencies = left_func_dependencies.with_dependency(Dependency::Multi); right_func_dependencies = right_func_dependencies.with_dependency(Dependency::Multi); if *join_type == JoinType::Left { // Downgrade the right side, since it may have additional NULL values: right_func_dependencies.downgrade_dependencies(); } else if *join_type == JoinType::Right { // Downgrade the left side, since it may have additional NULL values: left_func_dependencies.downgrade_dependencies(); } // Combine left and right functional dependencies: left_func_dependencies.extend(right_func_dependencies); left_func_dependencies } JoinType::LeftSemi | JoinType::LeftAnti => { // These joins preserve functional dependencies of the left side: left_func_dependencies } JoinType::RightSemi | JoinType::RightAnti => { // These joins preserve functional dependencies of the right side: right_func_dependencies } JoinType::Full => { // All of the functional dependencies are lost in a FULL join: FunctionalDependencies::empty() } } } /// This function downgrades a functional dependency when nullability becomes /// a possibility: /// - If the dependency in question is UNIQUE (i.e. nullable), a new null value /// invalidates the dependency. /// - If the dependency in question is PRIMARY KEY (i.e. not nullable), a new /// null value turns it into UNIQUE mode. fn downgrade_dependencies(&mut self) { // Delete nullable dependencies, since they are no longer valid: self.deps.retain(|item| !item.nullable); self.deps.iter_mut().for_each(|item| item.nullable = true); } /// This function ensures that functional dependencies involving uniquely /// occuring determinant keys cover their entire table in terms of /// dependent columns. pub fn extend_target_indices(&mut self, n_out: usize) { self.deps.iter_mut().for_each( |FunctionalDependence { mode, target_indices, .. }| { // If unique, cover the whole table: if *mode == Dependency::Single { *target_indices = (0..n_out).collect::<Vec<_>>(); } }, ) } } /// Calculates functional dependencies for aggregate output, when there is a GROUP BY expression. pub fn aggregate_functional_dependencies( aggr_input_schema: &DFSchema, group_by_expr_names: &[String], aggr_schema: &DFSchema, ) -> FunctionalDependencies { let mut aggregate_func_dependencies = vec![]; let aggr_input_fields = aggr_input_schema.fields(); let aggr_fields = aggr_schema.fields(); // Association covers the whole table: let target_indices = (0..aggr_schema.fields().len()).collect::<Vec<_>>(); // Get functional dependencies of the schema: let func_dependencies = aggr_input_schema.functional_dependencies(); for FunctionalDependence { source_indices, nullable, mode, .. } in &func_dependencies.deps { // Keep source indices in a `HashSet` to prevent duplicate entries: let mut new_source_indices = HashSet::new(); let source_field_names = source_indices .iter() .map(|&idx| aggr_input_fields[idx].qualified_name()) .collect::<Vec<_>>(); for (idx, group_by_expr_name) in group_by_expr_names.iter().enumerate() { // When one of the input determinant expressions matches with // the GROUP BY expression, add the index of the GROUP BY // expression as a new determinant key: if source_field_names.contains(group_by_expr_name) { new_source_indices.insert(idx); } } // All of the composite indices occur in the GROUP BY expression: if new_source_indices.len() == source_indices.len() { aggregate_func_dependencies.push( FunctionalDependence::new( new_source_indices.into_iter().collect(), target_indices.clone(), *nullable, ) // input uniqueness stays the same when GROUP BY matches with input functional dependence determinants .with_mode(*mode), ); } } // If we have a single GROUP BY key, we can guarantee uniqueness after // aggregation: if group_by_expr_names.len() == 1 { // If `source_indices` contain 0, delete this functional dependency // as it will be added anyway with mode `Dependency::Single`: if let Some(idx) = aggregate_func_dependencies .iter() .position(|item| item.source_indices.contains(&0)) { // Delete the functional dependency that contains zeroth idx: aggregate_func_dependencies.remove(idx); } // Add a new functional dependency associated with the whole table: aggregate_func_dependencies.push( // Use nullable property of the group by expression FunctionalDependence::new( vec![0], target_indices, aggr_fields[0].is_nullable(), ) .with_mode(Dependency::Single), ); } FunctionalDependencies::new(aggregate_func_dependencies) } /// Returns target indices, for the determinant keys that are inside /// group by expressions. pub fn get_target_functional_dependencies( schema: &DFSchema, group_by_expr_names: &[String], ) -> Option<Vec<usize>> { let mut combined_target_indices = HashSet::new(); let dependencies = schema.functional_dependencies(); let field_names = schema .fields() .iter() .map(|item| item.qualified_name()) .collect::<Vec<_>>(); for FunctionalDependence { source_indices, target_indices, .. } in &dependencies.deps { let source_key_names = source_indices .iter() .map(|id_key_idx| field_names[*id_key_idx].clone()) .collect::<Vec<_>>(); // If the GROUP BY expression contains a determinant key, we can use // the associated fields after aggregation even if they are not part // of the GROUP BY expression. if source_key_names .iter() .all(|source_key_name| group_by_expr_names.contains(source_key_name)) { combined_target_indices.extend(target_indices.iter()); } } (!combined_target_indices.is_empty()) .then_some(combined_target_indices.iter().cloned().collect::<Vec<_>>()) } /// Updates entries inside the `entries` vector with their corresponding /// indices inside the `proj_indices` vector. fn update_elements_with_matching_indices( entries: &[usize], proj_indices: &[usize], ) -> Vec<usize> { entries .iter() .filter_map(|val| proj_indices.iter().position(|proj_idx| proj_idx == val)) .collect() } /// Adds `offset` value to each entry inside `in_data`. fn add_offset_to_vec<T: Copy + std::ops::Add<Output = T>>( in_data: &[T], offset: T, ) -> Vec<T> { in_data.iter().map(|&item| item + offset).collect() }
{ Constraint::PrimaryKey(indices) }
conditional_block
views.py
import os import math import random import datetime import functools from app.models import * # from app import csrf from hashlib import md5 from .forms import TaskForm from app import api from . import main from flask_restful import Resource from flask import jsonify # flask 封装后的json方法 from flask_sqlalchemy import Pagination from flask import render_template, request, redirect, session def set_pwd(pwd): # 密码加密 hl = md5(pwd.encode(encoding='utf-8')) new_pwd = hl.hexdigest() return new_pwd # def back_page(pages, current_page): # 返回页数 # if pages <= 5: # return range(1, pages + 1) # if current_page <= 3: # return range(1, 6) # elif current_page + 3 >= pages: # return range(pages - 4, pages + 1) # else: # return range(current_page - 2, current_page + 2) def loginValid(fun): @functools.wraps(fun) def inner(*args, **kwargs): id = request.cookies.get('id', 0) username = request.cookies.get('username') session_username = session.get('username') user = User.query.get(int(id)) if user: if user.username == username and username == session_username: return fun(*args, **kwargs) return redirect('/login/') return inner class Calendar: # 日历类 def __init__(self, year=datetime.datetime.now().year, month=datetime.datetime.now().month): assert int(month) <= 12 date = datetime.datetime(year, month, 1, 0, 0) # 当前月1日 self.start_day = date.weekday() # 当前月1号是周几 self.days = list(self.back_days(year, month)) # 当月天数 self.work = ['语文', '数学', '英语', '物理', '化学', '地理', '生物'] def back_days(self, year, month): # 返回当月天数 big_month = [1, 3, 5, 7, 8, 10, 12] small_month = [4, 6, 9, 11] two_month = 28 if year % 4 == 0 and year % 100 != 0 or year % 400 == 0: two_month = 29 assert int(month) <= 12 if month in big_month: return range(1, 32) elif month in small_month: return range(1, 31) else: return range(1, two_month + 1) def first_list(self, start_day, days): # 日历第一行 ca_list = [{self.days.pop(0): random.choice(self.work)} for i in range(1, 8 - start_day)] [ca_list.insert(0, 'empty') for j in range(7 - len(ca_list))] return ca_list def return_calendar(self): # 返回日历的列表 first_line = self.first_list(self.start_day, self.days) # 日历第一行 lines = [first_line] # 存日历的列表 while self.days: # 得到每一行 line = [{self.days.pop(0): random.choice(self.work)} for i in range(7) if self.days] [line.append('empty') for j in range(7 - len(line))] # 长度不足补空 lines.append(line) return lines class Paginator: def __init__(self, datas, page_size): self.datas = datas self.page_size = page_size self.all_pages = math.ceil(self.datas.count() / self.page_size) def back_page(self, current_page): if self.all_pages <= 5: return range(1, self.all_pages + 1) if current_page <= 3: return range(1, 6) elif current_page + 3 >= self.all_pages: return range(self.all_pages - 4, self.all_pages + 1) else: return range(current_page - 2, current_page + 2) def back_data(self, current_page): datas = self.datas.offset((current_page - 1) * self.page_size).limit(self.page_size) return datas @main.route('/') # 路由 def base(): # 视图 # c = Curriculum(c_id='0001', c_name='python', c_time=datetime.datetime.now()) # c.save() return render_template('base.html') @main.route('/index/') # 路由 @loginValid def index(): # 视图 return render_template('index.html') @main.route('/register/', methods=['GET', 'POST']) # 路由 def register(): # 视图 err_msg = '' if request.method == 'POST': username = request.form.get('username') password = request.form.get('password') email = request.form.get('email') if username: if email: if password: user = User() user.username = username user.email = email user.password = set_pwd(password) user.save() return redirect('/login/') else: err_msg = '密码不可为空' else: err_msg = '邮箱不可为空' else: err_msg = '用户名不可为空' return render_template('register.html', **locals()) @main.route('/login/', methods=['GET', 'POST']) # 路由 def login(): # 视图 err_msg = '' if request.method == 'POST': email = request.form.get('email') password = request.form.get('password') user = User.query.filter_by(email=email).first() if user: if set_pwd(password) == user.password: response = redirect('/index/') response.set_cookie('email', user.email) response.set_cookie('id', str(user.id)) response.set_cookie('username', user.username) print(user.username) session['username'] = user.username return response else: err_msg = '密码错误' else: err_msg = '该账号未注册' return render_template('login.html', **locals()) @main.route('/logout/') def logout(): # 退出 response = redirect('/login/') response.delete_cookie('email') response.delete_cookie('id') response.delete_cookie('username') session.pop('username') return response @main.route('/user_info/') # 路由 @loginValid def user_info(): # 个人中心 c = Calendar() datas = c.return_calendar() day = datetime.datetime.now().day return render_template('user_info.html', **locals()) @main.route('/leave/', methods=['get', 'post']) # @csrf.exempt @loginValid def leave(): err_msg = '' if request.method == 'POST': leave_name = request.form.get('leave_name') leave_type = request.form.get('leave_type') leave_start = request.form.get('leave_start') leave_end = request.form.get('leave_end') leave_desc = request.form.get('leave_desc') leave_phone = request.form.get('leave_phone') if leave_name and leave_type and leave_start and leave_end and leave_desc and leave_phone: id = int(request.cookies.get('id')) lea = Leave() lea.leave_id = id lea.leave_name = leave_name lea.leave_type = leave_type lea.leave_start = leave_start lea.leave_end = leave_end lea.leave_desc = leave_desc lea.leave_phone = leave_phone lea.leave_status = '0' lea.save() else: err_msg = '请填写全部内容' return render_template('leave.html', **locals()) @main.route('/leave_list/<p>/', methods=['get', 'post']) @loginValid def leave_list(p): page_size = 5 p = int(p) id = int(request.cookies.get('id')) leaves = Leave.query.filter_by(leave_id=id) pagin = Paginator(leaves, page_size) pages = pagin.back_page(p) leaves = pagin.back_data(p) return render_template('leave_list.html', **locals()) @main.route('/cancel/') def cancel(): id = request.args.get('id') # 通过args接受get请求数据 leave = Leave.query.get(int(id)) leave.delete() return jsonify({'data': '删除成功'}) @main.route('/add_task/', methods=['get', 'post'])
task.validate_on_submit() # 判断是否是一个有效的post请求 task.validate() # 判断是否是一个有效的post请求 task.data # 提交的数据 :return: ''' errors = {} task = TaskForm() if request.method == 'POST': if task.validate_on_submit(): formData = task.data else: errors_list = list(task.errors.keys()) errors = task.errors print(errors) return render_template('add_task.html', **locals()) @api.resource('/Api/Leave/') class LeaveApi(Resource): def __init__(self): # 定义返回的格式 super(LeaveApi, self).__init__() self.result = { 'version': '1.0', 'data': '' } def set_data(self, leave): # 定义返回的数据 result_data = { 'leave_name': leave.leave_name, 'leave_type': leave.leave_type, 'leave_start': leave.leave_start, 'leave_end': leave.leave_end, 'leave_desc': leave.leave_desc, 'leave_phone': leave.leave_phone, } return result_data def get(self): data = request.args # 获取请求的数据 id = data.get('id') if id: leave = Leave.query.get(int(id)) result_data = self.set_data(leave) else: leaves = Leave.query.all() result_data = [] for leave in leaves: result_data.append(self.set_data(leave)) self.result['data'] = result_data return self.result def post(self): data = request.form leave_id = data.get("leave_id") leave_name = data.get("leave_name") leave_type = data.get("leave_type") leave_start = data.get("leave_start") leave_end = data.get("leave_end") leave_desc = data.get("leave_desc") leave_phone = data.get("leave_phone") leave = Leave() leave.leave_id = leave_id leave.leave_name = leave_name leave.leave_type = leave_type # 假期类型 leave.leave_start = leave_start # 起始时间 leave.leave_end = leave_end # 结束时间 leave.leave_desc = leave_desc # 请假事由 leave.leave_phone = leave_phone # 联系方式 leave.leave_status = "0" # 假条状态 leave.save() self.result["data"] = self.set_data(leave) return self.result def put(self): data = request.form id = data.get('id') leave = Leave.query.get(int(id)) for key, value in data.items(): if key != 'id': setattr(leave, key, value) leave.save() self.result['data'] = self.set_data(leave) return self.result def delete(self): data = request.form id = data.get('id') leave = Leave.query.get(int(id)) leave.delete() self.result['data'] = 'ID为%s的数据,删除成功' % id return self.result
def add_task(): ''' task.errors # 表单校验错误
random_line_split
views.py
import os import math import random import datetime import functools from app.models import * # from app import csrf from hashlib import md5 from .forms import TaskForm from app import api from . import main from flask_restful import Resource from flask import jsonify # flask 封装后的json方法 from flask_sqlalchemy import Pagination from flask import render_template, request, redirect, session def set_pwd(pwd): # 密码加密 hl = md5(pwd.encode(encoding='utf-8')) new_pwd = hl.hexdigest() return new_pwd # def back_page(pages, current_page): # 返回页数 # if pages <= 5: # return range(1, pages + 1) # if current_page <= 3: # return range(1, 6) # elif current_page + 3 >= pages: # return range(pages - 4, pages + 1) # else: # return range(current_page - 2, current_page + 2) def loginValid(fun): @functools.wraps(fun) def inner(*args, **kwargs): id = request.cookies.get('id', 0) username = request.cookies.get('username') session_username = session.get('username') user = User.query.get(int(id)) if user: if user.username == username and username == session_username: return fun(*args, **kwargs) return redirect('/login/') return inner class Calendar: # 日历类 def __init__(self, year=datetime.datetime.now().year, month=datetime.datetime.now().month): assert int(month) <= 12 date = datetime.datetime(year, month, 1, 0, 0) # 当前月1日 self.start_day = date.weekday() # 当前月1号是周几 self.days = list(self.back_days(year, month)) # 当月天数 self.work = ['语文', '数学', '英语', '物理', '化学', '地理', '生物'] def back_days(self, year, month): # 返回当月天数 big_month = [1, 3, 5, 7, 8, 10, 12] small_month = [4, 6, 9, 11] two_month = 28 if year % 4 == 0 and year % 100 != 0 or year % 400 == 0: two_month = 29 assert int(month) <= 12 if month in big_month: return range(1, 32) elif month in small_month: return range(1, 31) else: return range(1, two_month + 1) def first_list(self, start_day, days): # 日历第一行 ca_list = [{self.days.pop(0): random.choice(self.work)} for i in range(1, 8 - start_day)] [ca_list.insert(0, 'empty') for j in range(7 - len(ca_list))] return ca_list def return_calendar(self): # 返回日历的列表 first_line = self.first_list(self.start_day, self.days) # 日历第一行 lines = [first_line] # 存日历的列表 while self.days: # 得到每一行 line = [{self.days.pop(0): random.choice(self.work)} for i in range(7) if self.days] [line.append('empty') for j in range(7 - len(line))] # 长度不足补空 lines.append(line) return lines class Paginator: def __init__(self, datas, page_size): self.datas = datas self.page_size = page_size self.all_pages = math.ceil(self.datas.count() / self.page_size) def back_page(self, current_page): if self.all_pages <= 5: return range(1, self.all_pages + 1) if current_page <= 3: return range(1, 6) elif current_page + 3 >= self.all_pages: return range(self.all_pages - 4, self.all_pages + 1) else: return range(current_page - 2, current_page + 2) def back_data(self, current_page): datas = self.datas.offset((current_page - 1) * self.page_size).limit(self.page_size) return datas @main.route('/') # 路由 def base(): # 视图 # c = Curriculum(c_id='0001', c_name='python', c_time=datetime.datetime.now()) # c.save() return render_template('base.html') @main.route('/index/') # 路由 @loginValid def index(): # 视图 return render_template('index.html') @main.route('/register/', meth
ister(): # 视图 err_msg = '' if request.method == 'POST': username = request.form.get('username') password = request.form.get('password') email = request.form.get('email') if username: if email: if password: user = User() user.username = username user.email = email user.password = set_pwd(password) user.save() return redirect('/login/') else: err_msg = '密码不可为空' else: err_msg = '邮箱不可为空' else: err_msg = '用户名不可为空' return render_template('register.html', **locals()) @main.route('/login/', methods=['GET', 'POST']) # 路由 def login(): # 视图 err_msg = '' if request.method == 'POST': email = request.form.get('email') password = request.form.get('password') user = User.query.filter_by(email=email).first() if user: if set_pwd(password) == user.password: response = redirect('/index/') response.set_cookie('email', user.email) response.set_cookie('id', str(user.id)) response.set_cookie('username', user.username) print(user.username) session['username'] = user.username return response else: err_msg = '密码错误' else: err_msg = '该账号未注册' return render_template('login.html', **locals()) @main.route('/logout/') def logout(): # 退出 response = redirect('/login/') response.delete_cookie('email') response.delete_cookie('id') response.delete_cookie('username') session.pop('username') return response @main.route('/user_info/') # 路由 @loginValid def user_info(): # 个人中心 c = Calendar() datas = c.return_calendar() day = datetime.datetime.now().day return render_template('user_info.html', **locals()) @main.route('/leave/', methods=['get', 'post']) # @csrf.exempt @loginValid def leave(): err_msg = '' if request.method == 'POST': leave_name = request.form.get('leave_name') leave_type = request.form.get('leave_type') leave_start = request.form.get('leave_start') leave_end = request.form.get('leave_end') leave_desc = request.form.get('leave_desc') leave_phone = request.form.get('leave_phone') if leave_name and leave_type and leave_start and leave_end and leave_desc and leave_phone: id = int(request.cookies.get('id')) lea = Leave() lea.leave_id = id lea.leave_name = leave_name lea.leave_type = leave_type lea.leave_start = leave_start lea.leave_end = leave_end lea.leave_desc = leave_desc lea.leave_phone = leave_phone lea.leave_status = '0' lea.save() else: err_msg = '请填写全部内容' return render_template('leave.html', **locals()) @main.route('/leave_list/<p>/', methods=['get', 'post']) @loginValid def leave_list(p): page_size = 5 p = int(p) id = int(request.cookies.get('id')) leaves = Leave.query.filter_by(leave_id=id) pagin = Paginator(leaves, page_size) pages = pagin.back_page(p) leaves = pagin.back_data(p) return render_template('leave_list.html', **locals()) @main.route('/cancel/') def cancel(): id = request.args.get('id') # 通过args接受get请求数据 leave = Leave.query.get(int(id)) leave.delete() return jsonify({'data': '删除成功'}) @main.route('/add_task/', methods=['get', 'post']) def add_task(): ''' task.errors # 表单校验错误 task.validate_on_submit() # 判断是否是一个有效的post请求 task.validate() # 判断是否是一个有效的post请求 task.data # 提交的数据 :return: ''' errors = {} task = TaskForm() if request.method == 'POST': if task.validate_on_submit(): formData = task.data else: errors_list = list(task.errors.keys()) errors = task.errors print(errors) return render_template('add_task.html', **locals()) @api.resource('/Api/Leave/') class LeaveApi(Resource): def __init__(self): # 定义返回的格式 super(LeaveApi, self).__init__() self.result = { 'version': '1.0', 'data': '' } def set_data(self, leave): # 定义返回的数据 result_data = { 'leave_name': leave.leave_name, 'leave_type': leave.leave_type, 'leave_start': leave.leave_start, 'leave_end': leave.leave_end, 'leave_desc': leave.leave_desc, 'leave_phone': leave.leave_phone, } return result_data def get(self): data = request.args # 获取请求的数据 id = data.get('id') if id: leave = Leave.query.get(int(id)) result_data = self.set_data(leave) else: leaves = Leave.query.all() result_data = [] for leave in leaves: result_data.append(self.set_data(leave)) self.result['data'] = result_data return self.result def post(self): data = request.form leave_id = data.get("leave_id") leave_name = data.get("leave_name") leave_type = data.get("leave_type") leave_start = data.get("leave_start") leave_end = data.get("leave_end") leave_desc = data.get("leave_desc") leave_phone = data.get("leave_phone") leave = Leave() leave.leave_id = leave_id leave.leave_name = leave_name leave.leave_type = leave_type # 假期类型 leave.leave_start = leave_start # 起始时间 leave.leave_end = leave_end # 结束时间 leave.leave_desc = leave_desc # 请假事由 leave.leave_phone = leave_phone # 联系方式 leave.leave_status = "0" # 假条状态 leave.save() self.result["data"] = self.set_data(leave) return self.result def put(self): data = request.form id = data.get('id') leave = Leave.query.get(int(id)) for key, value in data.items(): if key != 'id': setattr(leave, key, value) leave.save() self.result['data'] = self.set_data(leave) return self.result def delete(self): data = request.form id = data.get('id') leave = Leave.query.get(int(id)) leave.delete() self.result['data'] = 'ID为%s的数据,删除成功' % id return self.result
ods=['GET', 'POST']) # 路由 def reg
identifier_body
views.py
import os import math import random import datetime import functools from app.models import * # from app import csrf from hashlib import md5 from .forms import TaskForm from app import api from . import main from flask_restful import Resource from flask import jsonify # flask 封装后的json方法 from flask_sqlalchemy import Pagination from flask import render_template, request, redirect, session def set_pwd(pwd)
加密 hl = md5(pwd.encode(encoding='utf-8')) new_pwd = hl.hexdigest() return new_pwd # def back_page(pages, current_page): # 返回页数 # if pages <= 5: # return range(1, pages + 1) # if current_page <= 3: # return range(1, 6) # elif current_page + 3 >= pages: # return range(pages - 4, pages + 1) # else: # return range(current_page - 2, current_page + 2) def loginValid(fun): @functools.wraps(fun) def inner(*args, **kwargs): id = request.cookies.get('id', 0) username = request.cookies.get('username') session_username = session.get('username') user = User.query.get(int(id)) if user: if user.username == username and username == session_username: return fun(*args, **kwargs) return redirect('/login/') return inner class Calendar: # 日历类 def __init__(self, year=datetime.datetime.now().year, month=datetime.datetime.now().month): assert int(month) <= 12 date = datetime.datetime(year, month, 1, 0, 0) # 当前月1日 self.start_day = date.weekday() # 当前月1号是周几 self.days = list(self.back_days(year, month)) # 当月天数 self.work = ['语文', '数学', '英语', '物理', '化学', '地理', '生物'] def back_days(self, year, month): # 返回当月天数 big_month = [1, 3, 5, 7, 8, 10, 12] small_month = [4, 6, 9, 11] two_month = 28 if year % 4 == 0 and year % 100 != 0 or year % 400 == 0: two_month = 29 assert int(month) <= 12 if month in big_month: return range(1, 32) elif month in small_month: return range(1, 31) else: return range(1, two_month + 1) def first_list(self, start_day, days): # 日历第一行 ca_list = [{self.days.pop(0): random.choice(self.work)} for i in range(1, 8 - start_day)] [ca_list.insert(0, 'empty') for j in range(7 - len(ca_list))] return ca_list def return_calendar(self): # 返回日历的列表 first_line = self.first_list(self.start_day, self.days) # 日历第一行 lines = [first_line] # 存日历的列表 while self.days: # 得到每一行 line = [{self.days.pop(0): random.choice(self.work)} for i in range(7) if self.days] [line.append('empty') for j in range(7 - len(line))] # 长度不足补空 lines.append(line) return lines class Paginator: def __init__(self, datas, page_size): self.datas = datas self.page_size = page_size self.all_pages = math.ceil(self.datas.count() / self.page_size) def back_page(self, current_page): if self.all_pages <= 5: return range(1, self.all_pages + 1) if current_page <= 3: return range(1, 6) elif current_page + 3 >= self.all_pages: return range(self.all_pages - 4, self.all_pages + 1) else: return range(current_page - 2, current_page + 2) def back_data(self, current_page): datas = self.datas.offset((current_page - 1) * self.page_size).limit(self.page_size) return datas @main.route('/') # 路由 def base(): # 视图 # c = Curriculum(c_id='0001', c_name='python', c_time=datetime.datetime.now()) # c.save() return render_template('base.html') @main.route('/index/') # 路由 @loginValid def index(): # 视图 return render_template('index.html') @main.route('/register/', methods=['GET', 'POST']) # 路由 def register(): # 视图 err_msg = '' if request.method == 'POST': username = request.form.get('username') password = request.form.get('password') email = request.form.get('email') if username: if email: if password: user = User() user.username = username user.email = email user.password = set_pwd(password) user.save() return redirect('/login/') else: err_msg = '密码不可为空' else: err_msg = '邮箱不可为空' else: err_msg = '用户名不可为空' return render_template('register.html', **locals()) @main.route('/login/', methods=['GET', 'POST']) # 路由 def login(): # 视图 err_msg = '' if request.method == 'POST': email = request.form.get('email') password = request.form.get('password') user = User.query.filter_by(email=email).first() if user: if set_pwd(password) == user.password: response = redirect('/index/') response.set_cookie('email', user.email) response.set_cookie('id', str(user.id)) response.set_cookie('username', user.username) print(user.username) session['username'] = user.username return response else: err_msg = '密码错误' else: err_msg = '该账号未注册' return render_template('login.html', **locals()) @main.route('/logout/') def logout(): # 退出 response = redirect('/login/') response.delete_cookie('email') response.delete_cookie('id') response.delete_cookie('username') session.pop('username') return response @main.route('/user_info/') # 路由 @loginValid def user_info(): # 个人中心 c = Calendar() datas = c.return_calendar() day = datetime.datetime.now().day return render_template('user_info.html', **locals()) @main.route('/leave/', methods=['get', 'post']) # @csrf.exempt @loginValid def leave(): err_msg = '' if request.method == 'POST': leave_name = request.form.get('leave_name') leave_type = request.form.get('leave_type') leave_start = request.form.get('leave_start') leave_end = request.form.get('leave_end') leave_desc = request.form.get('leave_desc') leave_phone = request.form.get('leave_phone') if leave_name and leave_type and leave_start and leave_end and leave_desc and leave_phone: id = int(request.cookies.get('id')) lea = Leave() lea.leave_id = id lea.leave_name = leave_name lea.leave_type = leave_type lea.leave_start = leave_start lea.leave_end = leave_end lea.leave_desc = leave_desc lea.leave_phone = leave_phone lea.leave_status = '0' lea.save() else: err_msg = '请填写全部内容' return render_template('leave.html', **locals()) @main.route('/leave_list/<p>/', methods=['get', 'post']) @loginValid def leave_list(p): page_size = 5 p = int(p) id = int(request.cookies.get('id')) leaves = Leave.query.filter_by(leave_id=id) pagin = Paginator(leaves, page_size) pages = pagin.back_page(p) leaves = pagin.back_data(p) return render_template('leave_list.html', **locals()) @main.route('/cancel/') def cancel(): id = request.args.get('id') # 通过args接受get请求数据 leave = Leave.query.get(int(id)) leave.delete() return jsonify({'data': '删除成功'}) @main.route('/add_task/', methods=['get', 'post']) def add_task(): ''' task.errors # 表单校验错误 task.validate_on_submit() # 判断是否是一个有效的post请求 task.validate() # 判断是否是一个有效的post请求 task.data # 提交的数据 :return: ''' errors = {} task = TaskForm() if request.method == 'POST': if task.validate_on_submit(): formData = task.data else: errors_list = list(task.errors.keys()) errors = task.errors print(errors) return render_template('add_task.html', **locals()) @api.resource('/Api/Leave/') class LeaveApi(Resource): def __init__(self): # 定义返回的格式 super(LeaveApi, self).__init__() self.result = { 'version': '1.0', 'data': '' } def set_data(self, leave): # 定义返回的数据 result_data = { 'leave_name': leave.leave_name, 'leave_type': leave.leave_type, 'leave_start': leave.leave_start, 'leave_end': leave.leave_end, 'leave_desc': leave.leave_desc, 'leave_phone': leave.leave_phone, } return result_data def get(self): data = request.args # 获取请求的数据 id = data.get('id') if id: leave = Leave.query.get(int(id)) result_data = self.set_data(leave) else: leaves = Leave.query.all() result_data = [] for leave in leaves: result_data.append(self.set_data(leave)) self.result['data'] = result_data return self.result def post(self): data = request.form leave_id = data.get("leave_id") leave_name = data.get("leave_name") leave_type = data.get("leave_type") leave_start = data.get("leave_start") leave_end = data.get("leave_end") leave_desc = data.get("leave_desc") leave_phone = data.get("leave_phone") leave = Leave() leave.leave_id = leave_id leave.leave_name = leave_name leave.leave_type = leave_type # 假期类型 leave.leave_start = leave_start # 起始时间 leave.leave_end = leave_end # 结束时间 leave.leave_desc = leave_desc # 请假事由 leave.leave_phone = leave_phone # 联系方式 leave.leave_status = "0" # 假条状态 leave.save() self.result["data"] = self.set_data(leave) return self.result def put(self): data = request.form id = data.get('id') leave = Leave.query.get(int(id)) for key, value in data.items(): if key != 'id': setattr(leave, key, value) leave.save() self.result['data'] = self.set_data(leave) return self.result def delete(self): data = request.form id = data.get('id') leave = Leave.query.get(int(id)) leave.delete() self.result['data'] = 'ID为%s的数据,删除成功' % id return self.result
: # 密码
identifier_name
views.py
import os import math import random import datetime import functools from app.models import * # from app import csrf from hashlib import md5 from .forms import TaskForm from app import api from . import main from flask_restful import Resource from flask import jsonify # flask 封装后的json方法 from flask_sqlalchemy import Pagination from flask import render_template, request, redirect, session def set_pwd(pwd): # 密码加密 hl = md5(pwd.encode(encoding='utf-8')) new_pwd = hl.hexdigest() return new_pwd # def back_page(pages, current_page): # 返回页数 # if pages <= 5: # return range(1, pages + 1) # if current_page <= 3: # return range(1, 6) # elif current_page + 3 >= pages: # return range(pages - 4, pages + 1) # else: # return range(current_page - 2, current_page + 2) def loginValid(fun): @functools.wraps(fun) def inner(*args, **kwargs): id = request.cookies.get('id', 0) username = request.cookies.get('username') session_username = session.get('username') user = User.query.get(int(id)) if user: if user.username == username and username == session_username: return fun(*args, **kwargs) return redirect('/login/') return inner class Calendar: # 日历类 def __init__(self, year=datetime.datetime.now().year, month=datetime.datetime.now().month): assert int(month) <= 12 date = datetime.datetime(year, month, 1, 0, 0) # 当前月1日 self.start_day = date.weekday() # 当前月1号是周几 self.days = list(self.back_days(year, month)) # 当月天数 self.work = ['语文', '数学', '英语', '物理', '化学', '地理', '生物'] def back_days(self, year, month): # 返回当月天数 big_month = [1, 3, 5, 7, 8, 10, 12] small_month = [4, 6, 9, 11] two_month = 28 if year % 4 == 0 and year % 100 != 0 or year % 400 == 0: two_month = 29 assert int(month) <= 12 if month in big_month: return range(1, 32) elif month in small_month: return range(1, 31) else: return range(1, two_month + 1) def first_list(self, start_day, days): # 日历第一行 ca_list = [{self.days.pop(0): random.choice(self.work)} for i in range(1, 8 - start_day)] [ca_list.insert(0, 'empty') for j in range(7 - len(ca_list))] return ca_list def return_calendar(self): # 返回日历的列表 first_line = self.first_list(self.start_day, self.days) # 日历第一行 lines = [first_line] # 存日历的列表 while self.days: # 得到每一行 line = [{self.days.pop(0): random.choice(self.work)} for i in range(7) if self.days] [line.append('empty') for j in range(7 - len(line))] # 长度不足补空 lines.append(line) return lines class Paginator: def __init__(self, datas, page_size): self.datas = datas self.page_size = page_size self.all_pages = math.ceil(self.datas.count() / self.page_size) def back_page(self, current_page): if self.all_pages <= 5: return range(1, self.all_pages + 1) if current_page <= 3: return range(1, 6) elif current_page + 3 >= self.all_pages: return range(self.all_pages - 4, self.all_pages + 1) else: return range(current_page - 2, current_page + 2) def back_data(self, current_page): datas = self.datas.offset((current_page - 1) * self.page_size).limit(self.page_size) return datas @main.route('/') # 路由 def base(): # 视图 # c = Curriculum(c_id='0001', c_name='python', c_time=datetime.datetime.now()) # c.save() return render_template('base.html') @main.route('/index/') # 路由 @loginValid def index(): # 视图 return render_template('index.html') @main.route('/register/', methods=['GET', 'POST']) # 路由 def register(): # 视图 err_msg = '' if request.method == 'POST': username = request.form.get('username') password = request.form.get('password') email = request.form.get('email') if username: if email: if password: user = User() user.username = username user.email = email user.password = set_pwd(password) user.save() return redirect('/login/') else: err_msg = '密码不可为空' else: err_msg = '邮箱不可为空' else: err_msg = '用户名不可为空' return render_template('register.html', **locals()) @main.route('/login/', methods=['GET', 'POST']) # 路由 def login(): # 视图
ethod == 'POST': email = request.form.get('email') password = request.form.get('password') user = User.query.filter_by(email=email).first() if user: if set_pwd(password) == user.password: response = redirect('/index/') response.set_cookie('email', user.email) response.set_cookie('id', str(user.id)) response.set_cookie('username', user.username) print(user.username) session['username'] = user.username return response else: err_msg = '密码错误' else: err_msg = '该账号未注册' return render_template('login.html', **locals()) @main.route('/logout/') def logout(): # 退出 response = redirect('/login/') response.delete_cookie('email') response.delete_cookie('id') response.delete_cookie('username') session.pop('username') return response @main.route('/user_info/') # 路由 @loginValid def user_info(): # 个人中心 c = Calendar() datas = c.return_calendar() day = datetime.datetime.now().day return render_template('user_info.html', **locals()) @main.route('/leave/', methods=['get', 'post']) # @csrf.exempt @loginValid def leave(): err_msg = '' if request.method == 'POST': leave_name = request.form.get('leave_name') leave_type = request.form.get('leave_type') leave_start = request.form.get('leave_start') leave_end = request.form.get('leave_end') leave_desc = request.form.get('leave_desc') leave_phone = request.form.get('leave_phone') if leave_name and leave_type and leave_start and leave_end and leave_desc and leave_phone: id = int(request.cookies.get('id')) lea = Leave() lea.leave_id = id lea.leave_name = leave_name lea.leave_type = leave_type lea.leave_start = leave_start lea.leave_end = leave_end lea.leave_desc = leave_desc lea.leave_phone = leave_phone lea.leave_status = '0' lea.save() else: err_msg = '请填写全部内容' return render_template('leave.html', **locals()) @main.route('/leave_list/<p>/', methods=['get', 'post']) @loginValid def leave_list(p): page_size = 5 p = int(p) id = int(request.cookies.get('id')) leaves = Leave.query.filter_by(leave_id=id) pagin = Paginator(leaves, page_size) pages = pagin.back_page(p) leaves = pagin.back_data(p) return render_template('leave_list.html', **locals()) @main.route('/cancel/') def cancel(): id = request.args.get('id') # 通过args接受get请求数据 leave = Leave.query.get(int(id)) leave.delete() return jsonify({'data': '删除成功'}) @main.route('/add_task/', methods=['get', 'post']) def add_task(): ''' task.errors # 表单校验错误 task.validate_on_submit() # 判断是否是一个有效的post请求 task.validate() # 判断是否是一个有效的post请求 task.data # 提交的数据 :return: ''' errors = {} task = TaskForm() if request.method == 'POST': if task.validate_on_submit(): formData = task.data else: errors_list = list(task.errors.keys()) errors = task.errors print(errors) return render_template('add_task.html', **locals()) @api.resource('/Api/Leave/') class LeaveApi(Resource): def __init__(self): # 定义返回的格式 super(LeaveApi, self).__init__() self.result = { 'version': '1.0', 'data': '' } def set_data(self, leave): # 定义返回的数据 result_data = { 'leave_name': leave.leave_name, 'leave_type': leave.leave_type, 'leave_start': leave.leave_start, 'leave_end': leave.leave_end, 'leave_desc': leave.leave_desc, 'leave_phone': leave.leave_phone, } return result_data def get(self): data = request.args # 获取请求的数据 id = data.get('id') if id: leave = Leave.query.get(int(id)) result_data = self.set_data(leave) else: leaves = Leave.query.all() result_data = [] for leave in leaves: result_data.append(self.set_data(leave)) self.result['data'] = result_data return self.result def post(self): data = request.form leave_id = data.get("leave_id") leave_name = data.get("leave_name") leave_type = data.get("leave_type") leave_start = data.get("leave_start") leave_end = data.get("leave_end") leave_desc = data.get("leave_desc") leave_phone = data.get("leave_phone") leave = Leave() leave.leave_id = leave_id leave.leave_name = leave_name leave.leave_type = leave_type # 假期类型 leave.leave_start = leave_start # 起始时间 leave.leave_end = leave_end # 结束时间 leave.leave_desc = leave_desc # 请假事由 leave.leave_phone = leave_phone # 联系方式 leave.leave_status = "0" # 假条状态 leave.save() self.result["data"] = self.set_data(leave) return self.result def put(self): data = request.form id = data.get('id') leave = Leave.query.get(int(id)) for key, value in data.items(): if key != 'id': setattr(leave, key, value) leave.save() self.result['data'] = self.set_data(leave) return self.result def delete(self): data = request.form id = data.get('id') leave = Leave.query.get(int(id)) leave.delete() self.result['data'] = 'ID为%s的数据,删除成功' % id return self.result
err_msg = '' if request.m
conditional_block
apigroup.rs
use super::{ parse::{self, GroupVersionData}, version::Version, }; use crate::{error::DiscoveryError, Client, Result}; use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIGroup, APIVersions}; pub use kube_core::discovery::{verbs, ApiCapabilities, ApiResource, Scope}; use kube_core::gvk::{GroupVersion, GroupVersionKind}; /// Describes one API groups collected resources and capabilities. /// /// Each `ApiGroup` contains all data pinned to a each version. /// In particular, one data set within the `ApiGroup` for `"apiregistration.k8s.io"` /// is the subset pinned to `"v1"`; commonly referred to as `"apiregistration.k8s.io/v1"`. /// /// If you know the version of the discovered group, you can fetch it directly: /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// for (apiresource, caps) in apigroup.versioned_resources("v1") { /// println!("Found ApiResource {}", apiresource.kind); /// } /// Ok(()) /// } /// ``` /// /// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`]. /// /// Whichever way you choose the end result is something describing a resource and its abilities: /// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup /// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud /// /// These two types: [`ApiResource`], and [`ApiCapabilities`] /// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API. /// You will likely need to use [`DynamicObject`] as the generic type for Api to do this, /// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait. /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap(); /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for service in api.list(&Default::default()).await? { /// println!("Found APIService: {}", service.name()); /// } /// Ok(()) /// } /// ``` /// [`ApiResource`]: crate::discovery::ApiResource /// [`ApiCapabilities`]: crate::discovery::ApiCapabilities /// [`DynamicObject`]: crate::api::DynamicObject /// [`Resource`]: crate::Resource /// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest /// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources /// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources /// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind pub struct ApiGroup { /// Name of the group e.g. apiregistration.k8s.io name: String, /// List of resource information, capabilities at particular versions data: Vec<GroupVersionData>, /// Preferred version if exported by the `APIGroup` preferred: Option<String>, } /// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup /// /// These queriers ignore groups with empty versions. /// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer. /// On construction, they also sort the internal vec of GroupVersionData according to `Version`. impl ApiGroup { pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> { tracing::debug!(name = g.name.as_str(), "Listing group versions"); let key = g.name; if g.versions.is_empty() { return Err(DiscoveryError::EmptyApiGroup(key).into()); } let mut data = vec![]; for vers in &g.versions { let resources = client.list_api_group_resources(&vers.group_version).await?; data.push(GroupVersionData::new(vers.version.clone(), resources)?); } let mut group = ApiGroup { name: key, data, preferred: g.preferred_version.map(|v| v.version), }; group.sort_versions(); Ok(group) } pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> { let mut data = vec![]; let key = ApiGroup::CORE_GROUP.to_string(); if coreapis.versions.is_empty() { return Err(DiscoveryError::EmptyApiGroup(key).into()); } for v in coreapis.versions { let resources = client.list_core_api_resources(&v).await?; data.push(GroupVersionData::new(v, resources)?); } let mut group = ApiGroup { name: ApiGroup::CORE_GROUP.to_string(), data, preferred: Some("v1".to_string()), }; group.sort_versions(); Ok(group) } fn sort_versions(&mut self) { self.data .sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str())) } // shortcut method to give cheapest return for a single GVK pub(crate) async fn query_gvk( client: &Client, gvk: &GroupVersionKind, ) -> Result<(ApiResource, ApiCapabilities)> { let apiver = gvk.api_version(); let list = if gvk.group.is_empty() { client.list_core_api_resources(&apiver).await? } else { client.list_api_group_resources(&apiver).await? }; for res in &list.resources { if res.kind == gvk.kind && !res.name.contains('/')
} Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into()) } // shortcut method to give cheapest return for a pinned group pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> { let apiver = gv.api_version(); let list = if gv.group.is_empty() { client.list_core_api_resources(&apiver).await? } else { client.list_api_group_resources(&apiver).await? }; let data = GroupVersionData::new(gv.version.clone(), list)?; let group = ApiGroup { name: gv.group.clone(), data: vec![data], preferred: Some(gv.version.clone()), // you preferred what you asked for }; Ok(group) } } /// Public ApiGroup interface impl ApiGroup { /// Core group name pub const CORE_GROUP: &'static str = ""; /// Returns the name of this group. pub fn name(&self) -> &str { &self.name } /// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group. /// /// This list is always non-empty, and sorted in the following order: /// - Stable versions (with the last being the first) /// - Beta versions (with the last being the first) /// - Alpha versions (with the last being the first) /// - Other versions, alphabetically /// /// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority). pub fn versions(&self) -> impl Iterator<Item = &str> { self.data.as_slice().iter().map(|gvd| gvd.version.as_str()) } /// Returns preferred version for working with given group. pub fn preferred_version(&self) -> Option<&str> { self.preferred.as_deref() } /// Returns the preferred version or latest version for working with given group. /// /// If server does not recommend one, we pick the "most stable and most recent" version /// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority). pub fn preferred_version_or_latest(&self) -> &str { // NB: self.versions is non-empty by construction in ApiGroup self.preferred .as_deref() .unwrap_or_else(|| self.versions().next().unwrap()) } /// Returns the resources in the group at an arbitrary version string. /// /// If the group does not support this version, the returned vector is empty. /// /// If you are looking for the api recommended list of resources, or just on particular kind /// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead. pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> { self.data .iter() .find(|gvd| gvd.version == ver) .map(|gvd| gvd.resources.clone()) .unwrap_or_default() } /// Returns the recommended (preferred or latest) versioned resources in the group /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// for (ar, caps) in apigroup.recommended_resources() { /// if !caps.supports_operation(verbs::LIST) { /// continue; /// } /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for inst in api.list(&Default::default()).await? { /// println!("Found {}: {}", ar.kind, inst.name()); /// } /// } /// Ok(()) /// } /// ``` /// /// This is equivalent to taking the [`ApiGroup::versioned_resources`] at the [`ApiGroup::preferred_version_or_latest`]. pub fn recommended_resources(&self) -> Vec<(ApiResource, ApiCapabilities)> { let ver = self.preferred_version_or_latest(); self.versioned_resources(ver) } /// Returns the recommended version of the `kind` in the recommended resources (if found) /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap(); /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for service in api.list(&Default::default()).await? { /// println!("Found APIService: {}", service.name()); /// } /// Ok(()) /// } /// ``` /// /// This is equivalent to filtering the [`ApiGroup::versioned_resources`] at [`ApiGroup::preferred_version_or_latest`] against a chosen `kind`. pub fn recommended_kind(&self, kind: &str) -> Option<(ApiResource, ApiCapabilities)> { let ver = self.preferred_version_or_latest(); for (ar, caps) in self.versioned_resources(ver) { if ar.kind == kind { return Some((ar, caps)); } } None } }
{ let ar = parse::parse_apiresource(res, &list.group_version)?; let caps = parse::parse_apicapabilities(&list, &res.name)?; return Ok((ar, caps)); }
conditional_block
apigroup.rs
use super::{ parse::{self, GroupVersionData}, version::Version, }; use crate::{error::DiscoveryError, Client, Result}; use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIGroup, APIVersions}; pub use kube_core::discovery::{verbs, ApiCapabilities, ApiResource, Scope}; use kube_core::gvk::{GroupVersion, GroupVersionKind}; /// Describes one API groups collected resources and capabilities. /// /// Each `ApiGroup` contains all data pinned to a each version. /// In particular, one data set within the `ApiGroup` for `"apiregistration.k8s.io"` /// is the subset pinned to `"v1"`; commonly referred to as `"apiregistration.k8s.io/v1"`. /// /// If you know the version of the discovered group, you can fetch it directly: /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// for (apiresource, caps) in apigroup.versioned_resources("v1") { /// println!("Found ApiResource {}", apiresource.kind); /// } /// Ok(()) /// } /// ``` /// /// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`]. /// /// Whichever way you choose the end result is something describing a resource and its abilities: /// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup /// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud /// /// These two types: [`ApiResource`], and [`ApiCapabilities`] /// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API. /// You will likely need to use [`DynamicObject`] as the generic type for Api to do this, /// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait. /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap(); /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for service in api.list(&Default::default()).await? { /// println!("Found APIService: {}", service.name()); /// } /// Ok(()) /// } /// ``` /// [`ApiResource`]: crate::discovery::ApiResource /// [`ApiCapabilities`]: crate::discovery::ApiCapabilities /// [`DynamicObject`]: crate::api::DynamicObject /// [`Resource`]: crate::Resource /// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest /// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources /// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources /// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind pub struct ApiGroup { /// Name of the group e.g. apiregistration.k8s.io name: String, /// List of resource information, capabilities at particular versions data: Vec<GroupVersionData>, /// Preferred version if exported by the `APIGroup` preferred: Option<String>, } /// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup /// /// These queriers ignore groups with empty versions. /// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer. /// On construction, they also sort the internal vec of GroupVersionData according to `Version`. impl ApiGroup { pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> { tracing::debug!(name = g.name.as_str(), "Listing group versions"); let key = g.name; if g.versions.is_empty() { return Err(DiscoveryError::EmptyApiGroup(key).into()); } let mut data = vec![]; for vers in &g.versions { let resources = client.list_api_group_resources(&vers.group_version).await?; data.push(GroupVersionData::new(vers.version.clone(), resources)?); } let mut group = ApiGroup { name: key, data, preferred: g.preferred_version.map(|v| v.version), }; group.sort_versions(); Ok(group) } pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> { let mut data = vec![]; let key = ApiGroup::CORE_GROUP.to_string(); if coreapis.versions.is_empty() { return Err(DiscoveryError::EmptyApiGroup(key).into()); } for v in coreapis.versions { let resources = client.list_core_api_resources(&v).await?; data.push(GroupVersionData::new(v, resources)?); } let mut group = ApiGroup { name: ApiGroup::CORE_GROUP.to_string(), data, preferred: Some("v1".to_string()), }; group.sort_versions(); Ok(group) } fn
(&mut self) { self.data .sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str())) } // shortcut method to give cheapest return for a single GVK pub(crate) async fn query_gvk( client: &Client, gvk: &GroupVersionKind, ) -> Result<(ApiResource, ApiCapabilities)> { let apiver = gvk.api_version(); let list = if gvk.group.is_empty() { client.list_core_api_resources(&apiver).await? } else { client.list_api_group_resources(&apiver).await? }; for res in &list.resources { if res.kind == gvk.kind && !res.name.contains('/') { let ar = parse::parse_apiresource(res, &list.group_version)?; let caps = parse::parse_apicapabilities(&list, &res.name)?; return Ok((ar, caps)); } } Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into()) } // shortcut method to give cheapest return for a pinned group pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> { let apiver = gv.api_version(); let list = if gv.group.is_empty() { client.list_core_api_resources(&apiver).await? } else { client.list_api_group_resources(&apiver).await? }; let data = GroupVersionData::new(gv.version.clone(), list)?; let group = ApiGroup { name: gv.group.clone(), data: vec![data], preferred: Some(gv.version.clone()), // you preferred what you asked for }; Ok(group) } } /// Public ApiGroup interface impl ApiGroup { /// Core group name pub const CORE_GROUP: &'static str = ""; /// Returns the name of this group. pub fn name(&self) -> &str { &self.name } /// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group. /// /// This list is always non-empty, and sorted in the following order: /// - Stable versions (with the last being the first) /// - Beta versions (with the last being the first) /// - Alpha versions (with the last being the first) /// - Other versions, alphabetically /// /// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority). pub fn versions(&self) -> impl Iterator<Item = &str> { self.data.as_slice().iter().map(|gvd| gvd.version.as_str()) } /// Returns preferred version for working with given group. pub fn preferred_version(&self) -> Option<&str> { self.preferred.as_deref() } /// Returns the preferred version or latest version for working with given group. /// /// If server does not recommend one, we pick the "most stable and most recent" version /// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority). pub fn preferred_version_or_latest(&self) -> &str { // NB: self.versions is non-empty by construction in ApiGroup self.preferred .as_deref() .unwrap_or_else(|| self.versions().next().unwrap()) } /// Returns the resources in the group at an arbitrary version string. /// /// If the group does not support this version, the returned vector is empty. /// /// If you are looking for the api recommended list of resources, or just on particular kind /// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead. pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> { self.data .iter() .find(|gvd| gvd.version == ver) .map(|gvd| gvd.resources.clone()) .unwrap_or_default() } /// Returns the recommended (preferred or latest) versioned resources in the group /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// for (ar, caps) in apigroup.recommended_resources() { /// if !caps.supports_operation(verbs::LIST) { /// continue; /// } /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for inst in api.list(&Default::default()).await? { /// println!("Found {}: {}", ar.kind, inst.name()); /// } /// } /// Ok(()) /// } /// ``` /// /// This is equivalent to taking the [`ApiGroup::versioned_resources`] at the [`ApiGroup::preferred_version_or_latest`]. pub fn recommended_resources(&self) -> Vec<(ApiResource, ApiCapabilities)> { let ver = self.preferred_version_or_latest(); self.versioned_resources(ver) } /// Returns the recommended version of the `kind` in the recommended resources (if found) /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap(); /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for service in api.list(&Default::default()).await? { /// println!("Found APIService: {}", service.name()); /// } /// Ok(()) /// } /// ``` /// /// This is equivalent to filtering the [`ApiGroup::versioned_resources`] at [`ApiGroup::preferred_version_or_latest`] against a chosen `kind`. pub fn recommended_kind(&self, kind: &str) -> Option<(ApiResource, ApiCapabilities)> { let ver = self.preferred_version_or_latest(); for (ar, caps) in self.versioned_resources(ver) { if ar.kind == kind { return Some((ar, caps)); } } None } }
sort_versions
identifier_name
apigroup.rs
use super::{ parse::{self, GroupVersionData}, version::Version, }; use crate::{error::DiscoveryError, Client, Result}; use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIGroup, APIVersions}; pub use kube_core::discovery::{verbs, ApiCapabilities, ApiResource, Scope}; use kube_core::gvk::{GroupVersion, GroupVersionKind}; /// Describes one API groups collected resources and capabilities. /// /// Each `ApiGroup` contains all data pinned to a each version. /// In particular, one data set within the `ApiGroup` for `"apiregistration.k8s.io"` /// is the subset pinned to `"v1"`; commonly referred to as `"apiregistration.k8s.io/v1"`. /// /// If you know the version of the discovered group, you can fetch it directly: /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// for (apiresource, caps) in apigroup.versioned_resources("v1") { /// println!("Found ApiResource {}", apiresource.kind); /// } /// Ok(()) /// } /// ``` /// /// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`]. /// /// Whichever way you choose the end result is something describing a resource and its abilities: /// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup /// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud /// /// These two types: [`ApiResource`], and [`ApiCapabilities`] /// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API. /// You will likely need to use [`DynamicObject`] as the generic type for Api to do this, /// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait. /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap(); /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for service in api.list(&Default::default()).await? { /// println!("Found APIService: {}", service.name()); /// } /// Ok(()) /// } /// ``` /// [`ApiResource`]: crate::discovery::ApiResource /// [`ApiCapabilities`]: crate::discovery::ApiCapabilities /// [`DynamicObject`]: crate::api::DynamicObject /// [`Resource`]: crate::Resource /// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest /// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources /// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources /// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind pub struct ApiGroup { /// Name of the group e.g. apiregistration.k8s.io name: String, /// List of resource information, capabilities at particular versions data: Vec<GroupVersionData>, /// Preferred version if exported by the `APIGroup` preferred: Option<String>, } /// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup /// /// These queriers ignore groups with empty versions. /// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer. /// On construction, they also sort the internal vec of GroupVersionData according to `Version`. impl ApiGroup { pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> { tracing::debug!(name = g.name.as_str(), "Listing group versions"); let key = g.name; if g.versions.is_empty() { return Err(DiscoveryError::EmptyApiGroup(key).into()); } let mut data = vec![]; for vers in &g.versions { let resources = client.list_api_group_resources(&vers.group_version).await?; data.push(GroupVersionData::new(vers.version.clone(), resources)?); } let mut group = ApiGroup { name: key, data, preferred: g.preferred_version.map(|v| v.version), }; group.sort_versions(); Ok(group) } pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> { let mut data = vec![]; let key = ApiGroup::CORE_GROUP.to_string(); if coreapis.versions.is_empty() { return Err(DiscoveryError::EmptyApiGroup(key).into()); } for v in coreapis.versions { let resources = client.list_core_api_resources(&v).await?; data.push(GroupVersionData::new(v, resources)?); } let mut group = ApiGroup { name: ApiGroup::CORE_GROUP.to_string(), data, preferred: Some("v1".to_string()), }; group.sort_versions(); Ok(group) } fn sort_versions(&mut self) { self.data .sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str())) } // shortcut method to give cheapest return for a single GVK pub(crate) async fn query_gvk( client: &Client, gvk: &GroupVersionKind, ) -> Result<(ApiResource, ApiCapabilities)> { let apiver = gvk.api_version(); let list = if gvk.group.is_empty() { client.list_core_api_resources(&apiver).await? } else { client.list_api_group_resources(&apiver).await? }; for res in &list.resources { if res.kind == gvk.kind && !res.name.contains('/') { let ar = parse::parse_apiresource(res, &list.group_version)?; let caps = parse::parse_apicapabilities(&list, &res.name)?; return Ok((ar, caps)); } } Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into()) } // shortcut method to give cheapest return for a pinned group pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> { let apiver = gv.api_version(); let list = if gv.group.is_empty() { client.list_core_api_resources(&apiver).await? } else { client.list_api_group_resources(&apiver).await? }; let data = GroupVersionData::new(gv.version.clone(), list)?; let group = ApiGroup { name: gv.group.clone(), data: vec![data], preferred: Some(gv.version.clone()), // you preferred what you asked for }; Ok(group) } } /// Public ApiGroup interface impl ApiGroup { /// Core group name pub const CORE_GROUP: &'static str = ""; /// Returns the name of this group. pub fn name(&self) -> &str { &self.name } /// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group. /// /// This list is always non-empty, and sorted in the following order: /// - Stable versions (with the last being the first) /// - Beta versions (with the last being the first) /// - Alpha versions (with the last being the first) /// - Other versions, alphabetically /// /// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority). pub fn versions(&self) -> impl Iterator<Item = &str> { self.data.as_slice().iter().map(|gvd| gvd.version.as_str()) } /// Returns preferred version for working with given group. pub fn preferred_version(&self) -> Option<&str> { self.preferred.as_deref() } /// Returns the preferred version or latest version for working with given group. /// /// If server does not recommend one, we pick the "most stable and most recent" version /// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority). pub fn preferred_version_or_latest(&self) -> &str { // NB: self.versions is non-empty by construction in ApiGroup self.preferred .as_deref() .unwrap_or_else(|| self.versions().next().unwrap()) } /// Returns the resources in the group at an arbitrary version string. /// /// If the group does not support this version, the returned vector is empty. /// /// If you are looking for the api recommended list of resources, or just on particular kind /// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead. pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> { self.data .iter() .find(|gvd| gvd.version == ver) .map(|gvd| gvd.resources.clone()) .unwrap_or_default() } /// Returns the recommended (preferred or latest) versioned resources in the group /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for inst in api.list(&Default::default()).await? { /// println!("Found {}: {}", ar.kind, inst.name()); /// } /// } /// Ok(()) /// } /// ``` /// /// This is equivalent to taking the [`ApiGroup::versioned_resources`] at the [`ApiGroup::preferred_version_or_latest`]. pub fn recommended_resources(&self) -> Vec<(ApiResource, ApiCapabilities)> { let ver = self.preferred_version_or_latest(); self.versioned_resources(ver) } /// Returns the recommended version of the `kind` in the recommended resources (if found) /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap(); /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for service in api.list(&Default::default()).await? { /// println!("Found APIService: {}", service.name()); /// } /// Ok(()) /// } /// ``` /// /// This is equivalent to filtering the [`ApiGroup::versioned_resources`] at [`ApiGroup::preferred_version_or_latest`] against a chosen `kind`. pub fn recommended_kind(&self, kind: &str) -> Option<(ApiResource, ApiCapabilities)> { let ver = self.preferred_version_or_latest(); for (ar, caps) in self.versioned_resources(ver) { if ar.kind == kind { return Some((ar, caps)); } } None } }
/// for (ar, caps) in apigroup.recommended_resources() { /// if !caps.supports_operation(verbs::LIST) { /// continue; /// } /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
random_line_split
apigroup.rs
use super::{ parse::{self, GroupVersionData}, version::Version, }; use crate::{error::DiscoveryError, Client, Result}; use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIGroup, APIVersions}; pub use kube_core::discovery::{verbs, ApiCapabilities, ApiResource, Scope}; use kube_core::gvk::{GroupVersion, GroupVersionKind}; /// Describes one API groups collected resources and capabilities. /// /// Each `ApiGroup` contains all data pinned to a each version. /// In particular, one data set within the `ApiGroup` for `"apiregistration.k8s.io"` /// is the subset pinned to `"v1"`; commonly referred to as `"apiregistration.k8s.io/v1"`. /// /// If you know the version of the discovered group, you can fetch it directly: /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// for (apiresource, caps) in apigroup.versioned_resources("v1") { /// println!("Found ApiResource {}", apiresource.kind); /// } /// Ok(()) /// } /// ``` /// /// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`]. /// /// Whichever way you choose the end result is something describing a resource and its abilities: /// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup /// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud /// /// These two types: [`ApiResource`], and [`ApiCapabilities`] /// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API. /// You will likely need to use [`DynamicObject`] as the generic type for Api to do this, /// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait. /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap(); /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for service in api.list(&Default::default()).await? { /// println!("Found APIService: {}", service.name()); /// } /// Ok(()) /// } /// ``` /// [`ApiResource`]: crate::discovery::ApiResource /// [`ApiCapabilities`]: crate::discovery::ApiCapabilities /// [`DynamicObject`]: crate::api::DynamicObject /// [`Resource`]: crate::Resource /// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest /// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources /// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources /// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind pub struct ApiGroup { /// Name of the group e.g. apiregistration.k8s.io name: String, /// List of resource information, capabilities at particular versions data: Vec<GroupVersionData>, /// Preferred version if exported by the `APIGroup` preferred: Option<String>, } /// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup /// /// These queriers ignore groups with empty versions. /// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer. /// On construction, they also sort the internal vec of GroupVersionData according to `Version`. impl ApiGroup { pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> { tracing::debug!(name = g.name.as_str(), "Listing group versions"); let key = g.name; if g.versions.is_empty() { return Err(DiscoveryError::EmptyApiGroup(key).into()); } let mut data = vec![]; for vers in &g.versions { let resources = client.list_api_group_resources(&vers.group_version).await?; data.push(GroupVersionData::new(vers.version.clone(), resources)?); } let mut group = ApiGroup { name: key, data, preferred: g.preferred_version.map(|v| v.version), }; group.sort_versions(); Ok(group) } pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> { let mut data = vec![]; let key = ApiGroup::CORE_GROUP.to_string(); if coreapis.versions.is_empty() { return Err(DiscoveryError::EmptyApiGroup(key).into()); } for v in coreapis.versions { let resources = client.list_core_api_resources(&v).await?; data.push(GroupVersionData::new(v, resources)?); } let mut group = ApiGroup { name: ApiGroup::CORE_GROUP.to_string(), data, preferred: Some("v1".to_string()), }; group.sort_versions(); Ok(group) } fn sort_versions(&mut self) { self.data .sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str())) } // shortcut method to give cheapest return for a single GVK pub(crate) async fn query_gvk( client: &Client, gvk: &GroupVersionKind, ) -> Result<(ApiResource, ApiCapabilities)>
// shortcut method to give cheapest return for a pinned group pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> { let apiver = gv.api_version(); let list = if gv.group.is_empty() { client.list_core_api_resources(&apiver).await? } else { client.list_api_group_resources(&apiver).await? }; let data = GroupVersionData::new(gv.version.clone(), list)?; let group = ApiGroup { name: gv.group.clone(), data: vec![data], preferred: Some(gv.version.clone()), // you preferred what you asked for }; Ok(group) } } /// Public ApiGroup interface impl ApiGroup { /// Core group name pub const CORE_GROUP: &'static str = ""; /// Returns the name of this group. pub fn name(&self) -> &str { &self.name } /// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group. /// /// This list is always non-empty, and sorted in the following order: /// - Stable versions (with the last being the first) /// - Beta versions (with the last being the first) /// - Alpha versions (with the last being the first) /// - Other versions, alphabetically /// /// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority). pub fn versions(&self) -> impl Iterator<Item = &str> { self.data.as_slice().iter().map(|gvd| gvd.version.as_str()) } /// Returns preferred version for working with given group. pub fn preferred_version(&self) -> Option<&str> { self.preferred.as_deref() } /// Returns the preferred version or latest version for working with given group. /// /// If server does not recommend one, we pick the "most stable and most recent" version /// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority). pub fn preferred_version_or_latest(&self) -> &str { // NB: self.versions is non-empty by construction in ApiGroup self.preferred .as_deref() .unwrap_or_else(|| self.versions().next().unwrap()) } /// Returns the resources in the group at an arbitrary version string. /// /// If the group does not support this version, the returned vector is empty. /// /// If you are looking for the api recommended list of resources, or just on particular kind /// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead. pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> { self.data .iter() .find(|gvd| gvd.version == ver) .map(|gvd| gvd.resources.clone()) .unwrap_or_default() } /// Returns the recommended (preferred or latest) versioned resources in the group /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// for (ar, caps) in apigroup.recommended_resources() { /// if !caps.supports_operation(verbs::LIST) { /// continue; /// } /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for inst in api.list(&Default::default()).await? { /// println!("Found {}: {}", ar.kind, inst.name()); /// } /// } /// Ok(()) /// } /// ``` /// /// This is equivalent to taking the [`ApiGroup::versioned_resources`] at the [`ApiGroup::preferred_version_or_latest`]. pub fn recommended_resources(&self) -> Vec<(ApiResource, ApiCapabilities)> { let ver = self.preferred_version_or_latest(); self.versioned_resources(ver) } /// Returns the recommended version of the `kind` in the recommended resources (if found) /// /// ```no_run /// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt}; /// #[tokio::main] /// async fn main() -> Result<(), kube::Error> { /// let client = Client::try_default().await?; /// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; /// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap(); /// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); /// for service in api.list(&Default::default()).await? { /// println!("Found APIService: {}", service.name()); /// } /// Ok(()) /// } /// ``` /// /// This is equivalent to filtering the [`ApiGroup::versioned_resources`] at [`ApiGroup::preferred_version_or_latest`] against a chosen `kind`. pub fn recommended_kind(&self, kind: &str) -> Option<(ApiResource, ApiCapabilities)> { let ver = self.preferred_version_or_latest(); for (ar, caps) in self.versioned_resources(ver) { if ar.kind == kind { return Some((ar, caps)); } } None } }
{ let apiver = gvk.api_version(); let list = if gvk.group.is_empty() { client.list_core_api_resources(&apiver).await? } else { client.list_api_group_resources(&apiver).await? }; for res in &list.resources { if res.kind == gvk.kind && !res.name.contains('/') { let ar = parse::parse_apiresource(res, &list.group_version)?; let caps = parse::parse_apicapabilities(&list, &res.name)?; return Ok((ar, caps)); } } Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into()) }
identifier_body
tlcell.rs
use std::any::TypeId; use std::cell::UnsafeCell; use std::collections::HashSet; use std::marker::PhantomData; use super::Invariant; std::thread_local! { static SINGLETON_CHECK: std::cell::RefCell<HashSet<TypeId>> = std::cell::RefCell::new(HashSet::new()); } struct NotSendOrSync(*const ()); /// Borrowing-owner of zero or more [`TLCell`](struct.TLCell.html) /// instances. /// /// See [crate documentation](index.html). #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub struct TLCellOwner<Q: 'static> { // Use NotSendOrSync to disable Send and Sync, not_send_or_sync: PhantomData<NotSendOrSync>, // Use Invariant<Q> for invariant parameter typ: PhantomData<Invariant<Q>>, } impl<Q: 'static> Drop for TLCellOwner<Q> { fn drop(&mut self) { SINGLETON_CHECK.with(|set| set.borrow_mut().remove(&TypeId::of::<Q>())); } } impl<Q: 'static> Default for TLCellOwner<Q> { fn default() -> Self { TLCellOwner::new() } } impl<Q: 'static> TLCellOwner<Q> { /// Create the singleton owner instance. Each owner may be used /// to create many `TLCell` instances. There may be only one /// instance of this type per thread at any given time for each /// different marker type `Q`. This call panics if a second /// simultaneous instance is created. Since the owner is only /// valid to use in the thread it is created in, it does not /// support `Send` or `Sync`. pub fn new() -> Self { SINGLETON_CHECK.with(|set| { assert!(set.borrow_mut().insert(TypeId::of::<Q>()), "Illegal to create two TLCellOwner instances within the same thread with the same marker type parameter"); }); Self { not_send_or_sync: PhantomData, typ: PhantomData, } } /// Create a new cell owned by this owner instance. See also /// [`TLCell::new`]. /// /// [`TLCell::new`]: struct.TLCell.html pub fn cell<T>(&self, value: T) -> TLCell<Q, T> { TLCell::<Q, T>::new(value) } /// Borrow contents of a `TLCell` immutably (read-only). Many /// `TLCell` instances can be borrowed immutably at the same time /// from the same owner. #[inline] pub fn ro<'a, T: ?Sized>(&'a self, tc: &'a TLCell<Q, T>) -> &'a T { unsafe { &*tc.value.get() } } /// Borrow contents of a `TLCell` mutably (read-write). Only one /// `TLCell` at a time can be borrowed from the owner using this /// call. The returned reference must go out of scope before /// another can be borrowed. #[inline] pub fn rw<'a, T: ?Sized>(&'a mut self, tc: &'a TLCell<Q, T>) -> &'a mut T { unsafe { &mut *tc.value.get() } } /// Borrow contents of two `TLCell` instances mutably. Panics if /// the two `TLCell` instances point to the same memory. #[inline] pub fn rw2<'a, T: ?Sized, U: ?Sized>( &'a mut self, tc1: &'a TLCell<Q, T>, tc2: &'a TLCell<Q, U>, ) -> (&'a mut T, &'a mut U) { assert!( tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize, "Illegal to borrow same TLCell twice with rw2()" ); unsafe { (&mut *tc1.value.get(), &mut *tc2.value.get()) } } /// Borrow contents of three `TLCell` instances mutably. Panics if /// any pair of `TLCell` instances point to the same memory. #[inline] pub fn rw3<'a, T: ?Sized, U: ?Sized, V: ?Sized>( &'a mut self, tc1: &'a TLCell<Q, T>, tc2: &'a TLCell<Q, U>, tc3: &'a TLCell<Q, V>, ) -> (&'a mut T, &'a mut U, &'a mut V) { assert!( (tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize) && (tc2 as *const _ as *const () as usize != tc3 as *const _ as *const () as usize) && (tc3 as *const _ as *const () as usize != tc1 as *const _ as *const () as usize), "Illegal to borrow same TLCell twice with rw3()" ); unsafe { ( &mut *tc1.value.get(), &mut *tc2.value.get(), &mut *tc3.value.get(), ) } } } /// Cell whose contents is owned (for borrowing purposes) by a /// [`TLCellOwner`]. /// /// To borrow from this cell, use the borrowing calls on the /// [`TLCellOwner`] instance that shares the same marker type. Since /// there may be another indistinguishable [`TLCellOwner`] in another /// thread, `Sync` is not supported for this type. However it *is* /// possible to send the cell to another thread, which then allows its /// contents to be borrowed using the owner in that thread. /// /// See also [crate documentation](index.html). /// /// [`TLCellOwner`]: struct.TLCellOwner.html #[repr(transparent)] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub struct TLCell<Q, T: ?Sized> { // Use Invariant<Q> for invariant parameter owner: PhantomData<Invariant<Q>>, // TLCell absolutely cannot be Sync, since otherwise you could send // two &TLCell's to two different threads, that each have their own // TLCellOwner<Q> instance and that could therefore both give out // a &mut T to the same T. // // However, it's fine to Send a TLCell to a different thread, because // you can only send something if nothing borrows it, so nothing can // be accessing its contents. After sending the TLCell, the original // TLCellOwner can no longer give access to the TLCell's contents since // TLCellOwner is !Send + !Sync. Only the TLCellOwner of the new thread // can give access to this TLCell's contents now. // // `UnsafeCell` already disables `Sync` and gives the right `Send` implementation. value: UnsafeCell<T>, } impl<Q, T> TLCell<Q, T> { /// Create a new `TLCell` owned for borrowing purposes by the /// `TLCellOwner` derived from the same marker type `Q`. #[inline] pub const fn new(value: T) -> TLCell<Q, T> { TLCell { owner: PhantomData, value: UnsafeCell::new(value), } } /// Destroy the cell and return the contained value /// /// Safety: Since this consumes the cell, there can be no other /// references to the cell or the data at this point. #[inline] pub fn into_inner(self) -> T { self.value.into_inner() } } impl<Q, T: ?Sized> TLCell<Q, T> { /// Borrow contents of this cell immutably (read-only). Many /// `TLCell` instances can be borrowed immutably at the same time /// from the same owner. #[inline] pub fn ro<'a>(&'a self, owner: &'a TLCellOwner<Q>) -> &'a T { owner.ro(self) } /// Borrow contents of this cell mutably (read-write). Only one /// `TLCell` at a time can be borrowed from the owner using this /// call. The returned reference must go out of scope before /// another can be borrowed. To mutably borrow from two or three /// cells at the same time, see [`TLCellOwner::rw2`] or /// [`TLCellOwner::rw3`]. #[inline] pub fn rw<'a>(&'a self, owner: &'a mut TLCellOwner<Q>) -> &'a mut T { owner.rw(self) } /// Returns a mutable reference to the underlying data /// /// Note that this is only useful at the beginning-of-life or /// end-of-life of the cell when you have exclusive access to it. /// Normally you'd use [`TLCell::rw`] or [`TLCellOwner::rw`] to /// get a mutable reference to the contents of the cell. /// /// Safety: This call borrows `TLCell` mutably which guarantees /// that we possess the only reference. This means that there can /// be no active borrows of other forms, even ones obtained using /// an immutable reference. #[inline] pub fn get_mut(&mut self) -> &mut T { self.value.get_mut() } } impl<Q: 'static, T: Default + ?Sized> Default for TLCell<Q, T> { fn default() -> Self { TLCell::new(T::default()) } } #[cfg(test)] mod tests { use super::{TLCell, TLCellOwner}; #[test] #[should_panic] fn tlcell_singleton_1() { struct Marker; let _owner1 = TLCellOwner::<Marker>::new(); let _owner2 = TLCellOwner::<Marker>::new(); // Panic here } #[test] fn tlcell_singleton_2() { struct Marker; let owner1 = TLCellOwner::<Marker>::new(); drop(owner1); let _owner2 = TLCellOwner::<Marker>::new(); } #[test] fn tlcell_singleton_3() { struct Marker1; struct Marker2; let _owner1 = TLCellOwner::<Marker1>::new(); let _owner2 = TLCellOwner::<Marker2>::new(); } #[test] fn tlcell() { struct Marker; type ACellOwner = TLCellOwner<Marker>; type ACell<T> = TLCell<Marker, T>; let mut owner = ACellOwner::new(); let c1 = ACell::new(100u32); let c2 = owner.cell(200u32); (*owner.rw(&c1)) += 1; (*owner.rw(&c2)) += 2; let c1ref = owner.ro(&c1); let c2ref = owner.ro(&c2); let total = *c1ref + *c2ref; assert_eq!(total, 303); } #[test] fn tlcell_threads() { struct Marker; type ACellOwner = TLCellOwner<Marker>; let mut _owner1 = ACellOwner::new(); std::thread::spawn(|| { let mut _owner2 = ACellOwner::new(); }) .join() .unwrap(); } #[test] fn tlcell_get_mut() { struct Marker; type ACellOwner = TLCellOwner<Marker>; type ACell<T> = TLCell<Marker, T>; let owner = ACellOwner::new(); let mut cell = ACell::new(100u32); let mut_ref = cell.get_mut(); *mut_ref = 50; let cell_ref = owner.ro(&cell); assert_eq!(*cell_ref, 50); } #[test] fn tlcell_into_inner() { struct Marker; type ACell<T> = TLCell<Marker, T>; let cell = ACell::new(100u32); assert_eq!(cell.into_inner(), 100); } #[test] fn tlcell_unsized() { struct Marker; type ACellOwner = TLCellOwner<Marker>; type ACell<T> = TLCell<Marker, T>; let mut owner = ACellOwner::new(); struct Squares(u32); struct Integers(u64); trait Series { fn step(&mut self); fn value(&self) -> u64; } impl Series for Squares { fn step(&mut self) { self.0 += 1; } fn value(&self) -> u64 { (self.0 as u64) * (self.0 as u64) } } impl Series for Integers { fn step(&mut self) { self.0 += 1; } fn value(&self) -> u64 { self.0 } } fn series(init: u32, is_squares: bool) -> Box<ACell<dyn Series>> { if is_squares
else { Box::new(ACell::new(Integers(init as u64))) } } let own = &mut owner; let cell1 = series(4, false); let cell2 = series(7, true); let cell3 = series(3, true); assert_eq!(cell1.ro(own).value(), 4); cell1.rw(own).step(); assert_eq!(cell1.ro(own).value(), 5); assert_eq!(own.ro(&cell2).value(), 49); own.rw(&cell2).step(); assert_eq!(own.ro(&cell2).value(), 64); let (r1, r2, r3) = own.rw3(&cell1, &cell2, &cell3); r1.step(); r2.step(); r3.step(); assert_eq!(cell1.ro(own).value(), 6); assert_eq!(cell2.ro(own).value(), 81); assert_eq!(cell3.ro(own).value(), 16); let (r1, r2) = own.rw2(&cell1, &cell2); r1.step(); r2.step(); assert_eq!(cell1.ro(own).value(), 7); assert_eq!(cell2.ro(own).value(), 100); } }
{ Box::new(ACell::new(Squares(init))) }
conditional_block
tlcell.rs
use std::any::TypeId; use std::cell::UnsafeCell; use std::collections::HashSet; use std::marker::PhantomData; use super::Invariant; std::thread_local! { static SINGLETON_CHECK: std::cell::RefCell<HashSet<TypeId>> = std::cell::RefCell::new(HashSet::new()); } struct NotSendOrSync(*const ()); /// Borrowing-owner of zero or more [`TLCell`](struct.TLCell.html) /// instances. /// /// See [crate documentation](index.html). #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub struct TLCellOwner<Q: 'static> { // Use NotSendOrSync to disable Send and Sync, not_send_or_sync: PhantomData<NotSendOrSync>, // Use Invariant<Q> for invariant parameter typ: PhantomData<Invariant<Q>>, } impl<Q: 'static> Drop for TLCellOwner<Q> { fn drop(&mut self) { SINGLETON_CHECK.with(|set| set.borrow_mut().remove(&TypeId::of::<Q>())); } } impl<Q: 'static> Default for TLCellOwner<Q> { fn default() -> Self { TLCellOwner::new() } } impl<Q: 'static> TLCellOwner<Q> { /// Create the singleton owner instance. Each owner may be used /// to create many `TLCell` instances. There may be only one /// instance of this type per thread at any given time for each /// different marker type `Q`. This call panics if a second /// simultaneous instance is created. Since the owner is only /// valid to use in the thread it is created in, it does not /// support `Send` or `Sync`. pub fn new() -> Self { SINGLETON_CHECK.with(|set| { assert!(set.borrow_mut().insert(TypeId::of::<Q>()), "Illegal to create two TLCellOwner instances within the same thread with the same marker type parameter"); }); Self { not_send_or_sync: PhantomData, typ: PhantomData, } } /// Create a new cell owned by this owner instance. See also /// [`TLCell::new`]. /// /// [`TLCell::new`]: struct.TLCell.html pub fn cell<T>(&self, value: T) -> TLCell<Q, T> { TLCell::<Q, T>::new(value) } /// Borrow contents of a `TLCell` immutably (read-only). Many /// `TLCell` instances can be borrowed immutably at the same time /// from the same owner. #[inline] pub fn ro<'a, T: ?Sized>(&'a self, tc: &'a TLCell<Q, T>) -> &'a T { unsafe { &*tc.value.get() } } /// Borrow contents of a `TLCell` mutably (read-write). Only one /// `TLCell` at a time can be borrowed from the owner using this /// call. The returned reference must go out of scope before /// another can be borrowed. #[inline] pub fn rw<'a, T: ?Sized>(&'a mut self, tc: &'a TLCell<Q, T>) -> &'a mut T { unsafe { &mut *tc.value.get() } } /// Borrow contents of two `TLCell` instances mutably. Panics if /// the two `TLCell` instances point to the same memory. #[inline] pub fn rw2<'a, T: ?Sized, U: ?Sized>( &'a mut self, tc1: &'a TLCell<Q, T>, tc2: &'a TLCell<Q, U>, ) -> (&'a mut T, &'a mut U) { assert!( tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize, "Illegal to borrow same TLCell twice with rw2()" ); unsafe { (&mut *tc1.value.get(), &mut *tc2.value.get()) } } /// Borrow contents of three `TLCell` instances mutably. Panics if /// any pair of `TLCell` instances point to the same memory. #[inline] pub fn rw3<'a, T: ?Sized, U: ?Sized, V: ?Sized>( &'a mut self, tc1: &'a TLCell<Q, T>, tc2: &'a TLCell<Q, U>, tc3: &'a TLCell<Q, V>, ) -> (&'a mut T, &'a mut U, &'a mut V) { assert!( (tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize) && (tc2 as *const _ as *const () as usize != tc3 as *const _ as *const () as usize) && (tc3 as *const _ as *const () as usize != tc1 as *const _ as *const () as usize), "Illegal to borrow same TLCell twice with rw3()" ); unsafe { ( &mut *tc1.value.get(), &mut *tc2.value.get(), &mut *tc3.value.get(), ) } } } /// Cell whose contents is owned (for borrowing purposes) by a
/// thread, `Sync` is not supported for this type. However it *is* /// possible to send the cell to another thread, which then allows its /// contents to be borrowed using the owner in that thread. /// /// See also [crate documentation](index.html). /// /// [`TLCellOwner`]: struct.TLCellOwner.html #[repr(transparent)] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub struct TLCell<Q, T: ?Sized> { // Use Invariant<Q> for invariant parameter owner: PhantomData<Invariant<Q>>, // TLCell absolutely cannot be Sync, since otherwise you could send // two &TLCell's to two different threads, that each have their own // TLCellOwner<Q> instance and that could therefore both give out // a &mut T to the same T. // // However, it's fine to Send a TLCell to a different thread, because // you can only send something if nothing borrows it, so nothing can // be accessing its contents. After sending the TLCell, the original // TLCellOwner can no longer give access to the TLCell's contents since // TLCellOwner is !Send + !Sync. Only the TLCellOwner of the new thread // can give access to this TLCell's contents now. // // `UnsafeCell` already disables `Sync` and gives the right `Send` implementation. value: UnsafeCell<T>, } impl<Q, T> TLCell<Q, T> { /// Create a new `TLCell` owned for borrowing purposes by the /// `TLCellOwner` derived from the same marker type `Q`. #[inline] pub const fn new(value: T) -> TLCell<Q, T> { TLCell { owner: PhantomData, value: UnsafeCell::new(value), } } /// Destroy the cell and return the contained value /// /// Safety: Since this consumes the cell, there can be no other /// references to the cell or the data at this point. #[inline] pub fn into_inner(self) -> T { self.value.into_inner() } } impl<Q, T: ?Sized> TLCell<Q, T> { /// Borrow contents of this cell immutably (read-only). Many /// `TLCell` instances can be borrowed immutably at the same time /// from the same owner. #[inline] pub fn ro<'a>(&'a self, owner: &'a TLCellOwner<Q>) -> &'a T { owner.ro(self) } /// Borrow contents of this cell mutably (read-write). Only one /// `TLCell` at a time can be borrowed from the owner using this /// call. The returned reference must go out of scope before /// another can be borrowed. To mutably borrow from two or three /// cells at the same time, see [`TLCellOwner::rw2`] or /// [`TLCellOwner::rw3`]. #[inline] pub fn rw<'a>(&'a self, owner: &'a mut TLCellOwner<Q>) -> &'a mut T { owner.rw(self) } /// Returns a mutable reference to the underlying data /// /// Note that this is only useful at the beginning-of-life or /// end-of-life of the cell when you have exclusive access to it. /// Normally you'd use [`TLCell::rw`] or [`TLCellOwner::rw`] to /// get a mutable reference to the contents of the cell. /// /// Safety: This call borrows `TLCell` mutably which guarantees /// that we possess the only reference. This means that there can /// be no active borrows of other forms, even ones obtained using /// an immutable reference. #[inline] pub fn get_mut(&mut self) -> &mut T { self.value.get_mut() } } impl<Q: 'static, T: Default + ?Sized> Default for TLCell<Q, T> { fn default() -> Self { TLCell::new(T::default()) } } #[cfg(test)] mod tests { use super::{TLCell, TLCellOwner}; #[test] #[should_panic] fn tlcell_singleton_1() { struct Marker; let _owner1 = TLCellOwner::<Marker>::new(); let _owner2 = TLCellOwner::<Marker>::new(); // Panic here } #[test] fn tlcell_singleton_2() { struct Marker; let owner1 = TLCellOwner::<Marker>::new(); drop(owner1); let _owner2 = TLCellOwner::<Marker>::new(); } #[test] fn tlcell_singleton_3() { struct Marker1; struct Marker2; let _owner1 = TLCellOwner::<Marker1>::new(); let _owner2 = TLCellOwner::<Marker2>::new(); } #[test] fn tlcell() { struct Marker; type ACellOwner = TLCellOwner<Marker>; type ACell<T> = TLCell<Marker, T>; let mut owner = ACellOwner::new(); let c1 = ACell::new(100u32); let c2 = owner.cell(200u32); (*owner.rw(&c1)) += 1; (*owner.rw(&c2)) += 2; let c1ref = owner.ro(&c1); let c2ref = owner.ro(&c2); let total = *c1ref + *c2ref; assert_eq!(total, 303); } #[test] fn tlcell_threads() { struct Marker; type ACellOwner = TLCellOwner<Marker>; let mut _owner1 = ACellOwner::new(); std::thread::spawn(|| { let mut _owner2 = ACellOwner::new(); }) .join() .unwrap(); } #[test] fn tlcell_get_mut() { struct Marker; type ACellOwner = TLCellOwner<Marker>; type ACell<T> = TLCell<Marker, T>; let owner = ACellOwner::new(); let mut cell = ACell::new(100u32); let mut_ref = cell.get_mut(); *mut_ref = 50; let cell_ref = owner.ro(&cell); assert_eq!(*cell_ref, 50); } #[test] fn tlcell_into_inner() { struct Marker; type ACell<T> = TLCell<Marker, T>; let cell = ACell::new(100u32); assert_eq!(cell.into_inner(), 100); } #[test] fn tlcell_unsized() { struct Marker; type ACellOwner = TLCellOwner<Marker>; type ACell<T> = TLCell<Marker, T>; let mut owner = ACellOwner::new(); struct Squares(u32); struct Integers(u64); trait Series { fn step(&mut self); fn value(&self) -> u64; } impl Series for Squares { fn step(&mut self) { self.0 += 1; } fn value(&self) -> u64 { (self.0 as u64) * (self.0 as u64) } } impl Series for Integers { fn step(&mut self) { self.0 += 1; } fn value(&self) -> u64 { self.0 } } fn series(init: u32, is_squares: bool) -> Box<ACell<dyn Series>> { if is_squares { Box::new(ACell::new(Squares(init))) } else { Box::new(ACell::new(Integers(init as u64))) } } let own = &mut owner; let cell1 = series(4, false); let cell2 = series(7, true); let cell3 = series(3, true); assert_eq!(cell1.ro(own).value(), 4); cell1.rw(own).step(); assert_eq!(cell1.ro(own).value(), 5); assert_eq!(own.ro(&cell2).value(), 49); own.rw(&cell2).step(); assert_eq!(own.ro(&cell2).value(), 64); let (r1, r2, r3) = own.rw3(&cell1, &cell2, &cell3); r1.step(); r2.step(); r3.step(); assert_eq!(cell1.ro(own).value(), 6); assert_eq!(cell2.ro(own).value(), 81); assert_eq!(cell3.ro(own).value(), 16); let (r1, r2) = own.rw2(&cell1, &cell2); r1.step(); r2.step(); assert_eq!(cell1.ro(own).value(), 7); assert_eq!(cell2.ro(own).value(), 100); } }
/// [`TLCellOwner`]. /// /// To borrow from this cell, use the borrowing calls on the /// [`TLCellOwner`] instance that shares the same marker type. Since /// there may be another indistinguishable [`TLCellOwner`] in another
random_line_split
tlcell.rs
use std::any::TypeId; use std::cell::UnsafeCell; use std::collections::HashSet; use std::marker::PhantomData; use super::Invariant; std::thread_local! { static SINGLETON_CHECK: std::cell::RefCell<HashSet<TypeId>> = std::cell::RefCell::new(HashSet::new()); } struct
(*const ()); /// Borrowing-owner of zero or more [`TLCell`](struct.TLCell.html) /// instances. /// /// See [crate documentation](index.html). #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub struct TLCellOwner<Q: 'static> { // Use NotSendOrSync to disable Send and Sync, not_send_or_sync: PhantomData<NotSendOrSync>, // Use Invariant<Q> for invariant parameter typ: PhantomData<Invariant<Q>>, } impl<Q: 'static> Drop for TLCellOwner<Q> { fn drop(&mut self) { SINGLETON_CHECK.with(|set| set.borrow_mut().remove(&TypeId::of::<Q>())); } } impl<Q: 'static> Default for TLCellOwner<Q> { fn default() -> Self { TLCellOwner::new() } } impl<Q: 'static> TLCellOwner<Q> { /// Create the singleton owner instance. Each owner may be used /// to create many `TLCell` instances. There may be only one /// instance of this type per thread at any given time for each /// different marker type `Q`. This call panics if a second /// simultaneous instance is created. Since the owner is only /// valid to use in the thread it is created in, it does not /// support `Send` or `Sync`. pub fn new() -> Self { SINGLETON_CHECK.with(|set| { assert!(set.borrow_mut().insert(TypeId::of::<Q>()), "Illegal to create two TLCellOwner instances within the same thread with the same marker type parameter"); }); Self { not_send_or_sync: PhantomData, typ: PhantomData, } } /// Create a new cell owned by this owner instance. See also /// [`TLCell::new`]. /// /// [`TLCell::new`]: struct.TLCell.html pub fn cell<T>(&self, value: T) -> TLCell<Q, T> { TLCell::<Q, T>::new(value) } /// Borrow contents of a `TLCell` immutably (read-only). Many /// `TLCell` instances can be borrowed immutably at the same time /// from the same owner. #[inline] pub fn ro<'a, T: ?Sized>(&'a self, tc: &'a TLCell<Q, T>) -> &'a T { unsafe { &*tc.value.get() } } /// Borrow contents of a `TLCell` mutably (read-write). Only one /// `TLCell` at a time can be borrowed from the owner using this /// call. The returned reference must go out of scope before /// another can be borrowed. #[inline] pub fn rw<'a, T: ?Sized>(&'a mut self, tc: &'a TLCell<Q, T>) -> &'a mut T { unsafe { &mut *tc.value.get() } } /// Borrow contents of two `TLCell` instances mutably. Panics if /// the two `TLCell` instances point to the same memory. #[inline] pub fn rw2<'a, T: ?Sized, U: ?Sized>( &'a mut self, tc1: &'a TLCell<Q, T>, tc2: &'a TLCell<Q, U>, ) -> (&'a mut T, &'a mut U) { assert!( tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize, "Illegal to borrow same TLCell twice with rw2()" ); unsafe { (&mut *tc1.value.get(), &mut *tc2.value.get()) } } /// Borrow contents of three `TLCell` instances mutably. Panics if /// any pair of `TLCell` instances point to the same memory. #[inline] pub fn rw3<'a, T: ?Sized, U: ?Sized, V: ?Sized>( &'a mut self, tc1: &'a TLCell<Q, T>, tc2: &'a TLCell<Q, U>, tc3: &'a TLCell<Q, V>, ) -> (&'a mut T, &'a mut U, &'a mut V) { assert!( (tc1 as *const _ as *const () as usize != tc2 as *const _ as *const () as usize) && (tc2 as *const _ as *const () as usize != tc3 as *const _ as *const () as usize) && (tc3 as *const _ as *const () as usize != tc1 as *const _ as *const () as usize), "Illegal to borrow same TLCell twice with rw3()" ); unsafe { ( &mut *tc1.value.get(), &mut *tc2.value.get(), &mut *tc3.value.get(), ) } } } /// Cell whose contents is owned (for borrowing purposes) by a /// [`TLCellOwner`]. /// /// To borrow from this cell, use the borrowing calls on the /// [`TLCellOwner`] instance that shares the same marker type. Since /// there may be another indistinguishable [`TLCellOwner`] in another /// thread, `Sync` is not supported for this type. However it *is* /// possible to send the cell to another thread, which then allows its /// contents to be borrowed using the owner in that thread. /// /// See also [crate documentation](index.html). /// /// [`TLCellOwner`]: struct.TLCellOwner.html #[repr(transparent)] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub struct TLCell<Q, T: ?Sized> { // Use Invariant<Q> for invariant parameter owner: PhantomData<Invariant<Q>>, // TLCell absolutely cannot be Sync, since otherwise you could send // two &TLCell's to two different threads, that each have their own // TLCellOwner<Q> instance and that could therefore both give out // a &mut T to the same T. // // However, it's fine to Send a TLCell to a different thread, because // you can only send something if nothing borrows it, so nothing can // be accessing its contents. After sending the TLCell, the original // TLCellOwner can no longer give access to the TLCell's contents since // TLCellOwner is !Send + !Sync. Only the TLCellOwner of the new thread // can give access to this TLCell's contents now. // // `UnsafeCell` already disables `Sync` and gives the right `Send` implementation. value: UnsafeCell<T>, } impl<Q, T> TLCell<Q, T> { /// Create a new `TLCell` owned for borrowing purposes by the /// `TLCellOwner` derived from the same marker type `Q`. #[inline] pub const fn new(value: T) -> TLCell<Q, T> { TLCell { owner: PhantomData, value: UnsafeCell::new(value), } } /// Destroy the cell and return the contained value /// /// Safety: Since this consumes the cell, there can be no other /// references to the cell or the data at this point. #[inline] pub fn into_inner(self) -> T { self.value.into_inner() } } impl<Q, T: ?Sized> TLCell<Q, T> { /// Borrow contents of this cell immutably (read-only). Many /// `TLCell` instances can be borrowed immutably at the same time /// from the same owner. #[inline] pub fn ro<'a>(&'a self, owner: &'a TLCellOwner<Q>) -> &'a T { owner.ro(self) } /// Borrow contents of this cell mutably (read-write). Only one /// `TLCell` at a time can be borrowed from the owner using this /// call. The returned reference must go out of scope before /// another can be borrowed. To mutably borrow from two or three /// cells at the same time, see [`TLCellOwner::rw2`] or /// [`TLCellOwner::rw3`]. #[inline] pub fn rw<'a>(&'a self, owner: &'a mut TLCellOwner<Q>) -> &'a mut T { owner.rw(self) } /// Returns a mutable reference to the underlying data /// /// Note that this is only useful at the beginning-of-life or /// end-of-life of the cell when you have exclusive access to it. /// Normally you'd use [`TLCell::rw`] or [`TLCellOwner::rw`] to /// get a mutable reference to the contents of the cell. /// /// Safety: This call borrows `TLCell` mutably which guarantees /// that we possess the only reference. This means that there can /// be no active borrows of other forms, even ones obtained using /// an immutable reference. #[inline] pub fn get_mut(&mut self) -> &mut T { self.value.get_mut() } } impl<Q: 'static, T: Default + ?Sized> Default for TLCell<Q, T> { fn default() -> Self { TLCell::new(T::default()) } } #[cfg(test)] mod tests { use super::{TLCell, TLCellOwner}; #[test] #[should_panic] fn tlcell_singleton_1() { struct Marker; let _owner1 = TLCellOwner::<Marker>::new(); let _owner2 = TLCellOwner::<Marker>::new(); // Panic here } #[test] fn tlcell_singleton_2() { struct Marker; let owner1 = TLCellOwner::<Marker>::new(); drop(owner1); let _owner2 = TLCellOwner::<Marker>::new(); } #[test] fn tlcell_singleton_3() { struct Marker1; struct Marker2; let _owner1 = TLCellOwner::<Marker1>::new(); let _owner2 = TLCellOwner::<Marker2>::new(); } #[test] fn tlcell() { struct Marker; type ACellOwner = TLCellOwner<Marker>; type ACell<T> = TLCell<Marker, T>; let mut owner = ACellOwner::new(); let c1 = ACell::new(100u32); let c2 = owner.cell(200u32); (*owner.rw(&c1)) += 1; (*owner.rw(&c2)) += 2; let c1ref = owner.ro(&c1); let c2ref = owner.ro(&c2); let total = *c1ref + *c2ref; assert_eq!(total, 303); } #[test] fn tlcell_threads() { struct Marker; type ACellOwner = TLCellOwner<Marker>; let mut _owner1 = ACellOwner::new(); std::thread::spawn(|| { let mut _owner2 = ACellOwner::new(); }) .join() .unwrap(); } #[test] fn tlcell_get_mut() { struct Marker; type ACellOwner = TLCellOwner<Marker>; type ACell<T> = TLCell<Marker, T>; let owner = ACellOwner::new(); let mut cell = ACell::new(100u32); let mut_ref = cell.get_mut(); *mut_ref = 50; let cell_ref = owner.ro(&cell); assert_eq!(*cell_ref, 50); } #[test] fn tlcell_into_inner() { struct Marker; type ACell<T> = TLCell<Marker, T>; let cell = ACell::new(100u32); assert_eq!(cell.into_inner(), 100); } #[test] fn tlcell_unsized() { struct Marker; type ACellOwner = TLCellOwner<Marker>; type ACell<T> = TLCell<Marker, T>; let mut owner = ACellOwner::new(); struct Squares(u32); struct Integers(u64); trait Series { fn step(&mut self); fn value(&self) -> u64; } impl Series for Squares { fn step(&mut self) { self.0 += 1; } fn value(&self) -> u64 { (self.0 as u64) * (self.0 as u64) } } impl Series for Integers { fn step(&mut self) { self.0 += 1; } fn value(&self) -> u64 { self.0 } } fn series(init: u32, is_squares: bool) -> Box<ACell<dyn Series>> { if is_squares { Box::new(ACell::new(Squares(init))) } else { Box::new(ACell::new(Integers(init as u64))) } } let own = &mut owner; let cell1 = series(4, false); let cell2 = series(7, true); let cell3 = series(3, true); assert_eq!(cell1.ro(own).value(), 4); cell1.rw(own).step(); assert_eq!(cell1.ro(own).value(), 5); assert_eq!(own.ro(&cell2).value(), 49); own.rw(&cell2).step(); assert_eq!(own.ro(&cell2).value(), 64); let (r1, r2, r3) = own.rw3(&cell1, &cell2, &cell3); r1.step(); r2.step(); r3.step(); assert_eq!(cell1.ro(own).value(), 6); assert_eq!(cell2.ro(own).value(), 81); assert_eq!(cell3.ro(own).value(), 16); let (r1, r2) = own.rw2(&cell1, &cell2); r1.step(); r2.step(); assert_eq!(cell1.ro(own).value(), 7); assert_eq!(cell2.ro(own).value(), 100); } }
NotSendOrSync
identifier_name
transaction.go
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2021 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package transactionrecord import ( "encoding/hex" "github.com/bitmark-inc/bitmarkd/account" "github.com/bitmark-inc/bitmarkd/currency" "github.com/bitmark-inc/bitmarkd/merkle" "github.com/bitmark-inc/bitmarkd/util" ) // TagType - type code for transactions type TagType uint64 // enumerate the possible transaction record types // this is encoded a Varint64 at start of "Packed" const ( // null marks beginning of list - not used as a record type NullTag = TagType(iota) // valid record types // OBSOLETE items must still be supported to process older blocks BaseDataTag = TagType(iota) // OBSOLETE: block owner AssetDataTag = TagType(iota) // create asset BitmarkIssueTag = TagType(iota) // issue asset BitmarkTransferUnratifiedTag = TagType(iota) // single signed transfer BitmarkTransferCountersignedTag = TagType(iota) // two signature transfer BlockFoundationTag = TagType(iota) // block owner BlockOwnerTransferTag = TagType(iota) // block owner transfer BitmarkShareTag = TagType(iota) // convert bitmark to a quantity of shares ShareGrantTag = TagType(iota) // grant some value to another account ShareSwapTag = TagType(iota) // atomically swap shares between accounts // this item must be last InvalidTag = TagType(iota) ) // Packed - packed records are just a byte slice type Packed []byte // Transaction - generic transaction interface type Transaction interface { Pack(account *account.Account) (Packed, error) } // byte sizes for various fields const ( maxNameLength = 64 maxMetadataLength = 2048 minFingerprintLength = 1 maxFingerprintLength = 1024 maxSignatureLength = 1024 ) // OldBaseData - the unpacked Proofer Data structure (OBSOLETE) // this is first tx in every block and can only be used there type OldBaseData struct { Currency currency.Currency `json:"currency"` // utf-8 → Enum PaymentAddress string `json:"paymentAddress"` // utf-8 Owner *account.Account `json:"owner"` // base58 Nonce uint64 `json:"nonce,string"` // unsigned 0..N Signature account.Signature `json:"signature,"` // hex } // AssetData - the unpacked Asset Data structure type AssetData struct { Name string `json:"name"` // utf-8 Fingerprint string `json:"fingerprint"` // utf-8 Metadata string `json:"metadata"` // utf-8 Registrant *account.Account `json:"registrant"` // base58 Signature account.Signature `json:"signature"` // hex } // BitmarkIssue - the unpacked BitmarkIssue structure type BitmarkIssue struct { AssetId AssetIdentifier `json:"assetId"` // link to asset record Owner *account.Account `json:"owner"` // base58: the "destination" owner Nonce uint64 `json:"nonce,string"` // to allow for multiple issues at the same time Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record } // Payment - optional payment record type Payment struct { Currency currency.Currency `json:"currency"` // utf-8 → Enum Address string `json:"address"` // utf-8 Amount uint64 `json:"amount,string"` // number as string, in terms of smallest currency unit } // PaymentAlternative - a single payment possibility - for use in RPC layers // up to entries: // 1. issue block owner payment // 2. last transfer block owner payment (can merge with 1 if same address) // 3. optional transfer payment type PaymentAlternative []*Payment // BitmarkTransfer - to access field of various transfer types type BitmarkTransfer interface { Transaction GetLink() merkle.Digest GetPayment() *Payment GetOwner() *account.Account GetCurrencies() currency.Map GetSignature() account.Signature GetCountersignature() account.Signature } // BitmarkTransferUnratified - the unpacked BitmarkTransfer structure type BitmarkTransferUnratified struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Owner *account.Account `json:"owner"` // base58: the "destination" owner Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record } // BitmarkTransferCountersigned - the unpacked Countersigned BitmarkTransfer structure type BitmarkTransferCountersigned struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Owner *account.Account `json:"owner"` // base58: the "destination" owner Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // BlockFoundation - the unpacked Proofer Data structure // this is first tx in every block and can only be used there type BlockFoundation struct { Version uint64 `json:"version,string"` // reflects combination of supported currencies Payments currency.Map `json:"payments"` // contents depend on version Owner *account.Account `json:"owner"` // base58 Nonce uint64 `json:"nonce,string"` // unsigned 0..N Signature account.Signature `json:"signature"` // hex } // BlockOwnerTransfer - the unpacked Block Owner Transfer Data structure // forms a chain that links back to a foundation record which has a TxId of: // SHA3-256 . concat blockDigest leBlockNumberUint64 type BlockOwnerTransfer struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Version uint64 `json:"version,string"` // reflects combination of supported currencies Payments currency.Map `json:"payments"` // require length and contents depend on version Owner *account.Account `json:"owner"` // base58 Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // BitmarkShare - turn a bitmark provenance chain into a fungible share type BitmarkShare struct { Link merkle.Digest `json:"link"` // previous record Quantity uint64 `json:"quantity,string"` // initial balance quantity Signature account.Signature `json:"signature"` // hex } // ShareGrant - grant some shares to another (one way transfer) type ShareGrant struct { ShareId merkle.Digest `json:"shareId"` // share = issue id Quantity uint64 `json:"quantity,string"` // shares to transfer > 0 Owner *account.Account `json:"owner"` // base58 Recipient *account.Account `json:"recipient"` // base58 BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // ShareSwap - swap some shares to another (two way transfer) type ShareSwap struct { ShareIdOne merkle.Digest `json:"shareIdOne"` // share = issue id QuantityOne uint64 `json:"quantityOne,string"` // shares to transfer > 0 OwnerOne *account.Account `json:"ownerOne"` // base58 ShareIdTwo merkle.Digest `json:"shareIdTwo"` // share = issue id QuantityTwo uint64 `json:"quantityTwo,string"` // shares to transfer > 0 OwnerTwo *account.Account `json:"ownerTwo"` // base58 BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // Type - returns the record type code func (record Packed) Type() TagType { recordType, n := util.FromVarint64(record) if 0 == n {
turn TagType(recordType) } // RecordName - returns the name of a transaction record as a string func RecordName(record interface{}) (string, bool) { switch record.(type) { case *OldBaseData, OldBaseData: return "BaseData", true case *AssetData, AssetData: return "AssetData", true case *BitmarkIssue, BitmarkIssue: return "BitmarkIssue", true case *BitmarkTransferUnratified, BitmarkTransferUnratified: return "BitmarkTransferUnratified", true case *BitmarkTransferCountersigned, BitmarkTransferCountersigned: return "BitmarkTransferCountersigned", true case *BlockFoundation, BlockFoundation: return "BlockFoundation", true case *BlockOwnerTransfer, BlockOwnerTransfer: return "BlockOwnerTransfer", true case *BitmarkShare, BitmarkShare: return "ShareBalance", true case *ShareGrant, ShareGrant: return "ShareGrant", true case *ShareSwap, ShareSwap: return "ShareSwap", true default: return "*unknown*", false } } // AssetId - compute an asset id func (assetData *AssetData) AssetId() AssetIdentifier { return NewAssetIdentifier([]byte(assetData.Fingerprint)) } // MakeLink - Create an link for a packed record func (record Packed) MakeLink() merkle.Digest { return merkle.NewDigest(record) } // MarshalText - convert a packed to its hex JSON form func (record Packed) MarshalText() ([]byte, error) { size := hex.EncodedLen(len(record)) b := make([]byte, size) hex.Encode(b, record) return b, nil } // UnmarshalText - convert a packed to its hex JSON form func (record *Packed) UnmarshalText(s []byte) error { size := hex.DecodedLen(len(s)) *record = make([]byte, size) _, err := hex.Decode(*record, s) return err }
return NullTag } re
conditional_block
transaction.go
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2021 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package transactionrecord import ( "encoding/hex" "github.com/bitmark-inc/bitmarkd/account" "github.com/bitmark-inc/bitmarkd/currency" "github.com/bitmark-inc/bitmarkd/merkle" "github.com/bitmark-inc/bitmarkd/util" ) // TagType - type code for transactions type TagType uint64 // enumerate the possible transaction record types // this is encoded a Varint64 at start of "Packed" const ( // null marks beginning of list - not used as a record type NullTag = TagType(iota) // valid record types // OBSOLETE items must still be supported to process older blocks BaseDataTag = TagType(iota) // OBSOLETE: block owner AssetDataTag = TagType(iota) // create asset BitmarkIssueTag = TagType(iota) // issue asset BitmarkTransferUnratifiedTag = TagType(iota) // single signed transfer BitmarkTransferCountersignedTag = TagType(iota) // two signature transfer BlockFoundationTag = TagType(iota) // block owner BlockOwnerTransferTag = TagType(iota) // block owner transfer BitmarkShareTag = TagType(iota) // convert bitmark to a quantity of shares ShareGrantTag = TagType(iota) // grant some value to another account ShareSwapTag = TagType(iota) // atomically swap shares between accounts // this item must be last InvalidTag = TagType(iota) ) // Packed - packed records are just a byte slice type Packed []byte // Transaction - generic transaction interface type Transaction interface { Pack(account *account.Account) (Packed, error) } // byte sizes for various fields const ( maxNameLength = 64 maxMetadataLength = 2048 minFingerprintLength = 1 maxFingerprintLength = 1024 maxSignatureLength = 1024 ) // OldBaseData - the unpacked Proofer Data structure (OBSOLETE) // this is first tx in every block and can only be used there type OldBaseData struct { Currency currency.Currency `json:"currency"` // utf-8 → Enum PaymentAddress string `json:"paymentAddress"` // utf-8 Owner *account.Account `json:"owner"` // base58 Nonce uint64 `json:"nonce,string"` // unsigned 0..N Signature account.Signature `json:"signature,"` // hex } // AssetData - the unpacked Asset Data structure type AssetData struct { Name string `json:"name"` // utf-8 Fingerprint string `json:"fingerprint"` // utf-8 Metadata string `json:"metadata"` // utf-8 Registrant *account.Account `json:"registrant"` // base58 Signature account.Signature `json:"signature"` // hex } // BitmarkIssue - the unpacked BitmarkIssue structure type BitmarkIssue struct { AssetId AssetIdentifier `json:"assetId"` // link to asset record Owner *account.Account `json:"owner"` // base58: the "destination" owner Nonce uint64 `json:"nonce,string"` // to allow for multiple issues at the same time Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record } // Payment - optional payment record type Payment struct { Currency currency.Currency `json:"currency"` // utf-8 → Enum Address string `json:"address"` // utf-8 Amount uint64 `json:"amount,string"` // number as string, in terms of smallest currency unit } // PaymentAlternative - a single payment possibility - for use in RPC layers // up to entries: // 1. issue block owner payment // 2. last transfer block owner payment (can merge with 1 if same address) // 3. optional transfer payment type PaymentAlternative []*Payment // BitmarkTransfer - to access field of various transfer types type BitmarkTransfer interface { Transaction GetLink() merkle.Digest GetPayment() *Payment GetOwner() *account.Account GetCurrencies() currency.Map GetSignature() account.Signature GetCountersignature() account.Signature } // BitmarkTransferUnratified - the unpacked BitmarkTransfer structure type BitmarkTransferUnratified struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Owner *account.Account `json:"owner"` // base58: the "destination" owner Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record } // BitmarkTransferCountersigned - the unpacked Countersigned BitmarkTransfer structure type BitmarkTransferCountersigned struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Owner *account.Account `json:"owner"` // base58: the "destination" owner Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // BlockFoundation - the unpacked Proofer Data structure // this is first tx in every block and can only be used there type BlockFoundation struct { Version uint64 `json:"version,string"` // reflects combination of supported currencies Payments currency.Map `json:"payments"` // contents depend on version Owner *account.Account `json:"owner"` // base58 Nonce uint64 `json:"nonce,string"` // unsigned 0..N Signature account.Signature `json:"signature"` // hex } // BlockOwnerTransfer - the unpacked Block Owner Transfer Data structure // forms a chain that links back to a foundation record which has a TxId of: // SHA3-256 . concat blockDigest leBlockNumberUint64 type BlockOwnerTransfer struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Version uint64 `json:"version,string"` // reflects combination of supported currencies Payments currency.Map `json:"payments"` // require length and contents depend on version Owner *account.Account `json:"owner"` // base58 Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // BitmarkShare - turn a bitmark provenance chain into a fungible share type BitmarkShare struct { Link merkle.Digest `json:"link"` // previous record Quantity uint64 `json:"quantity,string"` // initial balance quantity Signature account.Signature `json:"signature"` // hex } // ShareGrant - grant some shares to another (one way transfer) type ShareGrant struct { ShareId merkle.Digest `json:"shareId"` // share = issue id Quantity uint64 `json:"quantity,string"` // shares to transfer > 0 Owner *account.Account `json:"owner"` // base58 Recipient *account.Account `json:"recipient"` // base58 BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // ShareSwap - swap some shares to another (two way transfer) type ShareSwap struct { ShareIdOne merkle.Digest `json:"shareIdOne"` // share = issue id QuantityOne uint64 `json:"quantityOne,string"` // shares to transfer > 0 OwnerOne *account.Account `json:"ownerOne"` // base58 ShareIdTwo merkle.Digest `json:"shareIdTwo"` // share = issue id QuantityTwo uint64 `json:"quantityTwo,string"` // shares to transfer > 0 OwnerTwo *account.Account `json:"ownerTwo"` // base58 BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // Type - returns the record type code func (record Packed) Type() TagType { recordType, n := util.FromVarint64(record) if 0 == n { return NullTag } return TagType(recordType) } // RecordName - returns the name of a transaction record as a string func RecordName(record interface{}) (string, bool) { switch record.(type) { case *OldBaseData, OldBaseData: return "BaseData", true case *AssetData, AssetData: return "AssetData", true case *BitmarkIssue, BitmarkIssue: return "BitmarkIssue", true case *BitmarkTransferUnratified, BitmarkTransferUnratified: return "BitmarkTransferUnratified", true case *BitmarkTransferCountersigned, BitmarkTransferCountersigned: return "BitmarkTransferCountersigned", true case *BlockFoundation, BlockFoundation: return "BlockFoundation", true case *BlockOwnerTransfer, BlockOwnerTransfer: return "BlockOwnerTransfer", true case *BitmarkShare, BitmarkShare: return "ShareBalance", true case *ShareGrant, ShareGrant: return "ShareGrant", true case *ShareSwap, ShareSwap: return "ShareSwap", true default: return "*unknown*", false } } // AssetId - compute an asset id func (assetData *AssetData) AssetId() AssetIdentifier { r
MakeLink - Create an link for a packed record func (record Packed) MakeLink() merkle.Digest { return merkle.NewDigest(record) } // MarshalText - convert a packed to its hex JSON form func (record Packed) MarshalText() ([]byte, error) { size := hex.EncodedLen(len(record)) b := make([]byte, size) hex.Encode(b, record) return b, nil } // UnmarshalText - convert a packed to its hex JSON form func (record *Packed) UnmarshalText(s []byte) error { size := hex.DecodedLen(len(s)) *record = make([]byte, size) _, err := hex.Decode(*record, s) return err }
eturn NewAssetIdentifier([]byte(assetData.Fingerprint)) } //
identifier_body
transaction.go
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2021 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package transactionrecord import ( "encoding/hex" "github.com/bitmark-inc/bitmarkd/account" "github.com/bitmark-inc/bitmarkd/currency" "github.com/bitmark-inc/bitmarkd/merkle" "github.com/bitmark-inc/bitmarkd/util" ) // TagType - type code for transactions type TagType uint64 // enumerate the possible transaction record types // this is encoded a Varint64 at start of "Packed" const ( // null marks beginning of list - not used as a record type NullTag = TagType(iota) // valid record types // OBSOLETE items must still be supported to process older blocks BaseDataTag = TagType(iota) // OBSOLETE: block owner AssetDataTag = TagType(iota) // create asset BitmarkIssueTag = TagType(iota) // issue asset BitmarkTransferUnratifiedTag = TagType(iota) // single signed transfer BitmarkTransferCountersignedTag = TagType(iota) // two signature transfer BlockFoundationTag = TagType(iota) // block owner BlockOwnerTransferTag = TagType(iota) // block owner transfer BitmarkShareTag = TagType(iota) // convert bitmark to a quantity of shares ShareGrantTag = TagType(iota) // grant some value to another account ShareSwapTag = TagType(iota) // atomically swap shares between accounts // this item must be last InvalidTag = TagType(iota) ) // Packed - packed records are just a byte slice type Packed []byte // Transaction - generic transaction interface type Transaction interface { Pack(account *account.Account) (Packed, error) } // byte sizes for various fields const ( maxNameLength = 64 maxMetadataLength = 2048 minFingerprintLength = 1 maxFingerprintLength = 1024 maxSignatureLength = 1024 ) // OldBaseData - the unpacked Proofer Data structure (OBSOLETE) // this is first tx in every block and can only be used there type OldBaseData struct { Currency currency.Currency `json:"currency"` // utf-8 → Enum PaymentAddress string `json:"paymentAddress"` // utf-8 Owner *account.Account `json:"owner"` // base58 Nonce uint64 `json:"nonce,string"` // unsigned 0..N Signature account.Signature `json:"signature,"` // hex } // AssetData - the unpacked Asset Data structure type AssetData struct { Name string `json:"name"` // utf-8 Fingerprint string `json:"fingerprint"` // utf-8 Metadata string `json:"metadata"` // utf-8 Registrant *account.Account `json:"registrant"` // base58 Signature account.Signature `json:"signature"` // hex } // BitmarkIssue - the unpacked BitmarkIssue structure type BitmarkIssue struct { AssetId AssetIdentifier `json:"assetId"` // link to asset record Owner *account.Account `json:"owner"` // base58: the "destination" owner Nonce uint64 `json:"nonce,string"` // to allow for multiple issues at the same time Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record } // Payment - optional payment record type Payment struct { Currency currency.Currency `json:"currency"` // utf-8 → Enum Address string `json:"address"` // utf-8 Amount uint64 `json:"amount,string"` // number as string, in terms of smallest currency unit } // PaymentAlternative - a single payment possibility - for use in RPC layers // up to entries: // 1. issue block owner payment // 2. last transfer block owner payment (can merge with 1 if same address) // 3. optional transfer payment type PaymentAlternative []*Payment // BitmarkTransfer - to access field of various transfer types type BitmarkTransfer interface { Transaction GetLink() merkle.Digest GetPayment() *Payment GetOwner() *account.Account GetCurrencies() currency.Map GetSignature() account.Signature GetCountersignature() account.Signature } // BitmarkTransferUnratified - the unpacked BitmarkTransfer structure type BitmarkTransferUnratified struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Owner *account.Account `json:"owner"` // base58: the "destination" owner Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record } // BitmarkTransferCountersigned - the unpacked Countersigned BitmarkTransfer structure type BitmarkTransferCountersigned struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Owner *account.Account `json:"owner"` // base58: the "destination" owner Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // BlockFoundation - the unpacked Proofer Data structure // this is first tx in every block and can only be used there type BlockFoundation struct { Version uint64 `json:"version,string"` // reflects combination of supported currencies Payments currency.Map `json:"payments"` // contents depend on version Owner *account.Account `json:"owner"` // base58 Nonce uint64 `json:"nonce,string"` // unsigned 0..N Signature account.Signature `json:"signature"` // hex } // BlockOwnerTransfer - the unpacked Block Owner Transfer Data structure // forms a chain that links back to a foundation record which has a TxId of: // SHA3-256 . concat blockDigest leBlockNumberUint64 type BlockOwnerTransfer struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Version uint64 `json:"version,string"` // reflects combination of supported currencies Payments currency.Map `json:"payments"` // require length and contents depend on version Owner *account.Account `json:"owner"` // base58 Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // BitmarkShare - turn a bitmark provenance chain into a fungible share type BitmarkShare struct { Link merkle.Digest `json:"link"` // previous record Quantity uint64 `json:"quantity,string"` // initial balance quantity Signature account.Signature `json:"signature"` // hex } // ShareGrant - grant some shares to another (one way transfer) type ShareGrant struct { ShareId merkle.Digest `json:"shareId"` // share = issue id Quantity uint64 `json:"quantity,string"` // shares to transfer > 0 Owner *account.Account `json:"owner"` // base58 Recipient *account.Account `json:"recipient"` // base58 BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // ShareSwap - swap some shares to another (two way transfer) type ShareSwap struct { ShareIdOne merkle.Digest `json:"shareIdOne"` // share = issue id QuantityOne uint64 `json:"quantityOne,string"` // shares to transfer > 0 OwnerOne *account.Account `json:"ownerOne"` // base58 ShareIdTwo merkle.Digest `json:"shareIdTwo"` // share = issue id QuantityTwo uint64 `json:"quantityTwo,string"` // shares to transfer > 0 OwnerTwo *account.Account `json:"ownerTwo"` // base58 BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // Type - returns the record type code func (record Packed) Type() TagType { recordType, n := util.FromVarint64(record) if 0 == n { return NullTag } return TagType(recordType) } // RecordName - returns the name of a transaction record as a string func RecordName(record interface{}) (string, bool) { switch record.(type) { case *OldBaseData, OldBaseData: return "BaseData", true case *AssetData, AssetData: return "AssetData", true case *BitmarkIssue, BitmarkIssue: return "BitmarkIssue", true case *BitmarkTransferUnratified, BitmarkTransferUnratified: return "BitmarkTransferUnratified", true case *BitmarkTransferCountersigned, BitmarkTransferCountersigned: return "BitmarkTransferCountersigned", true case *BlockFoundation, BlockFoundation: return "BlockFoundation", true case *BlockOwnerTransfer, BlockOwnerTransfer: return "BlockOwnerTransfer", true case *BitmarkShare, BitmarkShare: return "ShareBalance", true case *ShareGrant, ShareGrant: return "ShareGrant", true case *ShareSwap, ShareSwap: return "ShareSwap", true default: return "*unknown*", false } } // AssetId - compute an asset id func (assetData *AssetData) AssetId() AssetIdentifier { return NewAssetIdentifier([]byte(assetData.Fingerprint)) } // MakeLink - Create an link for a packed record func (record Packed) MakeLink() merkle.Digest { return merkle.NewDigest(record) } // MarshalText - convert a packed to its hex JSON form func (record Packed) MarshalText() ([]byte, error) { size := hex.EncodedLen(len(record)) b := make([]byte, size) hex.Encode(b, record) return b, nil } // UnmarshalText - convert a packed to its hex JSON form func (record *Packed) Unma
]byte) error { size := hex.DecodedLen(len(s)) *record = make([]byte, size) _, err := hex.Decode(*record, s) return err }
rshalText(s [
identifier_name
transaction.go
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2021 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package transactionrecord import ( "encoding/hex" "github.com/bitmark-inc/bitmarkd/account" "github.com/bitmark-inc/bitmarkd/currency" "github.com/bitmark-inc/bitmarkd/merkle" "github.com/bitmark-inc/bitmarkd/util" )
// TagType - type code for transactions type TagType uint64 // enumerate the possible transaction record types // this is encoded a Varint64 at start of "Packed" const ( // null marks beginning of list - not used as a record type NullTag = TagType(iota) // valid record types // OBSOLETE items must still be supported to process older blocks BaseDataTag = TagType(iota) // OBSOLETE: block owner AssetDataTag = TagType(iota) // create asset BitmarkIssueTag = TagType(iota) // issue asset BitmarkTransferUnratifiedTag = TagType(iota) // single signed transfer BitmarkTransferCountersignedTag = TagType(iota) // two signature transfer BlockFoundationTag = TagType(iota) // block owner BlockOwnerTransferTag = TagType(iota) // block owner transfer BitmarkShareTag = TagType(iota) // convert bitmark to a quantity of shares ShareGrantTag = TagType(iota) // grant some value to another account ShareSwapTag = TagType(iota) // atomically swap shares between accounts // this item must be last InvalidTag = TagType(iota) ) // Packed - packed records are just a byte slice type Packed []byte // Transaction - generic transaction interface type Transaction interface { Pack(account *account.Account) (Packed, error) } // byte sizes for various fields const ( maxNameLength = 64 maxMetadataLength = 2048 minFingerprintLength = 1 maxFingerprintLength = 1024 maxSignatureLength = 1024 ) // OldBaseData - the unpacked Proofer Data structure (OBSOLETE) // this is first tx in every block and can only be used there type OldBaseData struct { Currency currency.Currency `json:"currency"` // utf-8 → Enum PaymentAddress string `json:"paymentAddress"` // utf-8 Owner *account.Account `json:"owner"` // base58 Nonce uint64 `json:"nonce,string"` // unsigned 0..N Signature account.Signature `json:"signature,"` // hex } // AssetData - the unpacked Asset Data structure type AssetData struct { Name string `json:"name"` // utf-8 Fingerprint string `json:"fingerprint"` // utf-8 Metadata string `json:"metadata"` // utf-8 Registrant *account.Account `json:"registrant"` // base58 Signature account.Signature `json:"signature"` // hex } // BitmarkIssue - the unpacked BitmarkIssue structure type BitmarkIssue struct { AssetId AssetIdentifier `json:"assetId"` // link to asset record Owner *account.Account `json:"owner"` // base58: the "destination" owner Nonce uint64 `json:"nonce,string"` // to allow for multiple issues at the same time Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record } // Payment - optional payment record type Payment struct { Currency currency.Currency `json:"currency"` // utf-8 → Enum Address string `json:"address"` // utf-8 Amount uint64 `json:"amount,string"` // number as string, in terms of smallest currency unit } // PaymentAlternative - a single payment possibility - for use in RPC layers // up to entries: // 1. issue block owner payment // 2. last transfer block owner payment (can merge with 1 if same address) // 3. optional transfer payment type PaymentAlternative []*Payment // BitmarkTransfer - to access field of various transfer types type BitmarkTransfer interface { Transaction GetLink() merkle.Digest GetPayment() *Payment GetOwner() *account.Account GetCurrencies() currency.Map GetSignature() account.Signature GetCountersignature() account.Signature } // BitmarkTransferUnratified - the unpacked BitmarkTransfer structure type BitmarkTransferUnratified struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Owner *account.Account `json:"owner"` // base58: the "destination" owner Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record } // BitmarkTransferCountersigned - the unpacked Countersigned BitmarkTransfer structure type BitmarkTransferCountersigned struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Owner *account.Account `json:"owner"` // base58: the "destination" owner Signature account.Signature `json:"signature"` // hex: corresponds to owner in linked record Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // BlockFoundation - the unpacked Proofer Data structure // this is first tx in every block and can only be used there type BlockFoundation struct { Version uint64 `json:"version,string"` // reflects combination of supported currencies Payments currency.Map `json:"payments"` // contents depend on version Owner *account.Account `json:"owner"` // base58 Nonce uint64 `json:"nonce,string"` // unsigned 0..N Signature account.Signature `json:"signature"` // hex } // BlockOwnerTransfer - the unpacked Block Owner Transfer Data structure // forms a chain that links back to a foundation record which has a TxId of: // SHA3-256 . concat blockDigest leBlockNumberUint64 type BlockOwnerTransfer struct { Link merkle.Digest `json:"link"` // previous record Escrow *Payment `json:"escrow"` // optional escrow payment address Version uint64 `json:"version,string"` // reflects combination of supported currencies Payments currency.Map `json:"payments"` // require length and contents depend on version Owner *account.Account `json:"owner"` // base58 Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // BitmarkShare - turn a bitmark provenance chain into a fungible share type BitmarkShare struct { Link merkle.Digest `json:"link"` // previous record Quantity uint64 `json:"quantity,string"` // initial balance quantity Signature account.Signature `json:"signature"` // hex } // ShareGrant - grant some shares to another (one way transfer) type ShareGrant struct { ShareId merkle.Digest `json:"shareId"` // share = issue id Quantity uint64 `json:"quantity,string"` // shares to transfer > 0 Owner *account.Account `json:"owner"` // base58 Recipient *account.Account `json:"recipient"` // base58 BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // ShareSwap - swap some shares to another (two way transfer) type ShareSwap struct { ShareIdOne merkle.Digest `json:"shareIdOne"` // share = issue id QuantityOne uint64 `json:"quantityOne,string"` // shares to transfer > 0 OwnerOne *account.Account `json:"ownerOne"` // base58 ShareIdTwo merkle.Digest `json:"shareIdTwo"` // share = issue id QuantityTwo uint64 `json:"quantityTwo,string"` // shares to transfer > 0 OwnerTwo *account.Account `json:"ownerTwo"` // base58 BeforeBlock uint64 `json:"beforeBlock,string"` // expires when chain height > before block Signature account.Signature `json:"signature"` // hex Countersignature account.Signature `json:"countersignature"` // hex: corresponds to owner in this record } // Type - returns the record type code func (record Packed) Type() TagType { recordType, n := util.FromVarint64(record) if 0 == n { return NullTag } return TagType(recordType) } // RecordName - returns the name of a transaction record as a string func RecordName(record interface{}) (string, bool) { switch record.(type) { case *OldBaseData, OldBaseData: return "BaseData", true case *AssetData, AssetData: return "AssetData", true case *BitmarkIssue, BitmarkIssue: return "BitmarkIssue", true case *BitmarkTransferUnratified, BitmarkTransferUnratified: return "BitmarkTransferUnratified", true case *BitmarkTransferCountersigned, BitmarkTransferCountersigned: return "BitmarkTransferCountersigned", true case *BlockFoundation, BlockFoundation: return "BlockFoundation", true case *BlockOwnerTransfer, BlockOwnerTransfer: return "BlockOwnerTransfer", true case *BitmarkShare, BitmarkShare: return "ShareBalance", true case *ShareGrant, ShareGrant: return "ShareGrant", true case *ShareSwap, ShareSwap: return "ShareSwap", true default: return "*unknown*", false } } // AssetId - compute an asset id func (assetData *AssetData) AssetId() AssetIdentifier { return NewAssetIdentifier([]byte(assetData.Fingerprint)) } // MakeLink - Create an link for a packed record func (record Packed) MakeLink() merkle.Digest { return merkle.NewDigest(record) } // MarshalText - convert a packed to its hex JSON form func (record Packed) MarshalText() ([]byte, error) { size := hex.EncodedLen(len(record)) b := make([]byte, size) hex.Encode(b, record) return b, nil } // UnmarshalText - convert a packed to its hex JSON form func (record *Packed) UnmarshalText(s []byte) error { size := hex.DecodedLen(len(s)) *record = make([]byte, size) _, err := hex.Decode(*record, s) return err }
random_line_split
history.component.ts
import { Component, OnInit, ElementRef, ViewChild, Inject } from '@angular/core'; import { ApiService } from '../../services/api.service'; import { DOCUMENT } from '@angular/common' import { element } from 'protractor'; import { ChartDataSets } from 'chart.js'; import { Color } from 'ng2-charts'; import * as pdfMake from "pdfmake/build/pdfmake"; import * as pdfFonts from 'pdfmake/build/vfs_fonts'; import { Router } from '@angular/router'; import { AuthenticationService } from '../../services/authentication.service'; import { Expense } from '../../classes/expense'; import { ConnectionService } from '../../services/connection.service'; declare var $:any; declare var getTranslation: any; declare var setLanguage: any; @Component({ selector: 'app-history', templateUrl: './history.component.html', styleUrls: ['./history.component.css'] }) export class HistoryComponent implements OnInit { public categories: Array<Object> = []; public currency: string; public expenses: Array<Object> = []; fileName: string = "history"; graph: Object = { "used": true, "name": "HistoryChart" }; message: string = "messageHistory"; welcomeMessage: string = "welcomeMessageHistory"; logout: string = "Logout"; DASHBOAR: string = "DASHBOARD,"; ENVELOPES: string = "ENVELOPES"; GOALS: string = "GOALS"; BILLS: string = "BILLS"; HISTORY: string = "HISTORY"; UTILITIES: string = "UTILITIES"; user: string = "User"; settings: string = "Settings"; appearance: string = "Appearance"; light: string = "Light"; dark: string = "Dark"; chartData1: Array<any> = []; chartColors1: Array<any> = []; chartLabels1: Array<any> = []; chartType1: string = "doughnut"; chartData2: Array<any> = []; chartColors2: Array<any> = []; chartLabels2: Array<string> = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; chartType2: string = "line"; paginatedExpenses: Array<Expense> = []; expenseCount: number = 0; page: number = 1; pageSize: number = 10 filter: string = ''; historyAll = "historyAll"; historyExport = "historyExport"; historyTotal = "historyTotal"; constructor( private api: ApiService, @Inject(DOCUMENT) private document: HTMLDocument, private router: Router, private authentication: AuthenticationService, private connectionService: ConnectionService ) { } public hasConnection(): boolean { return this.connectionService.hasConnection; } async handlePaginatedOfflineAsync() { if (this.connectionService.hasConnection && this.paginatedExpenses.length === 0) { if (this.connectionService.hasConnection) { this.api.getExpense('', this.pageSize, (this.page - 1) * this.pageSize).then(result => { this.expenseCount = result['length']; this.paginatedExpenses = result['expenses']; }).catch(error => {}); } } } @ViewChild('color') color: ElementRef; ngOnInit(): void { this.api.getUser().then(result => { this.refreshLanguage(result.language); this.categories = result.categories; this.expenses = this.generateExpenses(result.expense); this.currency = result.defaultCurrency; //data za tabele const parsedTable = this.parseTable(this.expenses); document.querySelector(".totaltext").innerHTML = "<h5>" + this.historyTotal + ": " + parsedTable.sum.toFixed(2); + "€</h5>"; const pieChart = this.groupByCategories(parsedTable); const lineChartData = this.makeDataForGraph(this.filterByCategory(this.expenses)) this.chartData1 = this.makeDataArray1(pieChart); this.chartColors1 = this.makeColorArray1(pieChart); this.chartLabels1 = this.makeLabelArray1(pieChart); this.chartData2 = this.generateDatasets(lineChartData); this.chartColors2 = this.getColors(lineChartData); }).catch(error => { this.authentication.logout(); this.router.navigate(['/']); }); setInterval(()=> { this.handlePaginatedOfflineAsync() }, 1000); } refreshLanguage(language: string) { setLanguage(language); this.message = getTranslation("messageHistory"); this.welcomeMessage = getTranslation("welcomeMessageHistory"); this.historyAll = getTranslation("historyAll"); this.historyExport = getTranslation("historyExport"); this.historyTotal = getTranslation("historyTotal"); } parseTable(rows) { const parsedTable = { sum: 0, data: [] }; for (let row of rows) { parsedTable.data.push({ id: row.id, year: row.year, month: row.month, day: row.day, category: row.category, receiver: row.receiver, currency: row.currency, value: row.value, color: row.color, }); parsedTable.sum += row.value; } return parsedTable; } filterByCategory(table) { var categories = new Map(); for (var expense of table) { if (!categories.get(expense.category)) { categories.set(expense.category, []); } categories.get(expense.category).push(expense); categories.get(expense.category).color = expense.color; } return categories; } filterByMonth(expenses) { var month = new Map(); for (var expense of expenses) { if (!month.get(expense.month)) { month.set(expense.month, {}); month.get(expense.month).sum = 0; } month.get(expense.month).sum += expense.value; } return month; } makeDataForGraph(category) { var month = new Map(); let keys = Array.from(category.keys()); for (let name of keys) { month.set(name, this.filterByMonth(category.get(name))); month.get(name).color = category.get(name).color; } return month; } convertMonthsToName(month) { switch (month) { case 'JAN': return "January"; case 'FEB': return "February"; case 'MAR': return "March"; case 'APR': return "April"; case 'MAY': return "May"; case 'JUN': return "June"; case 'JUL': return "July"; case 'AUG': return "August"; case 'SEP': return "September"; case 'OCT': return "October"; case 'NOV': return "November"; case 'DEC': return "December"; } } generateExpenses(expense) { var expensesArray = [] for (var exp of expense) { var date = exp.date.split('T')[0].split('-'); expensesArray.push({ id: exp._id, year: date[0], month: date[1], monthName: this.translateMonth(date[1]), day: date[2], category: exp.category.name, recipient: exp.recipient, value: exp.value, currency: exp.currency, color: exp.category.color, }); } expensesArray.sort(this.compare) return expensesArray; } translateMonth(month) { switch (month) { case '01': return "JAN"; case '02': return "FEB"; case '03': return "MAR"; case '04': return "APR"; case '05': return "MAY"; case '06': return "JUN"; case '07': return "JUL"; case '08': return "AUG"; case '09': return "SEP"; case '10': return "OCT"; case '11': return "NOV"; case '12': return "DEC"; } } compare(a, b) { //1 menjava, -1 ni menjava if (a.year < b.year) { return 1; } else if (a.year == b.year) { if (a.month < b.month) { return 1; } else if (a.month == b.month) { if (a.day < b.day) { return 1; } else { return -1; } } else { return -1; } } else { return -1; } return 0; } groupByCategories(parsedTable) { const groups = []; for (let entry of parsedTable.data) { const group = this.findGroupByCategory(groups, entry.category); if (group != null) { group.sum += parseInt(entry.value); } else { groups.push({ name: entry.category, sum: entry.value, color: entry.color, }); } } return groups; } fi
roups, category) { for (let group of groups) { if (group.name == category) return group; } return null; } makeDataArray1(array): Array<any> { const returnTable = [array.length]; for (let i = 0; i < array.length; i++) { returnTable[i] = array[i].sum; } return returnTable; } makeColorArray1(array) { const table = []; const returnTable = []; for (let i = 0; i < array.length; i++) { returnTable.push([array[i].color]); } let barva = { backgroundColor: returnTable } table[0] = barva; return table; } makeLabelArray1(array) { const returnTable = [array.length]; for (let i = 0; i < array.length; i++) { returnTable[i] = array[i].name; } return returnTable; } generateDatasets(map) { var datasets: ChartDataSets[] = []; let keys = Array.from(map.keys()); for (let i of keys) { datasets.push(this.generateDataset(map.get(i),i)); } return datasets; } generateDataset(data,category): Object { var podatki = this.getData(data); return {data: podatki, label: category}; } getData(data): Array<number> { const arr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; let keys = Array.from(data.keys()); for (let i of keys) { var y: number = +i; arr[y-1]=(data.get(i).sum); } return arr; } getColors(data): Color[] { const arr: Color[] = []; let keys = Array.from(data.keys()); for (let i of keys) { a=data.get(i).color; var a = { borderColor: data.get(i).color } arr.push(a); } return arr; } generatePdf() { (<any>pdfMake).vfs = pdfFonts.pdfMake.vfs; var docDefinition = { content: [{ text: 'Expenses', style: 'header' }, { layout: 'lightHorizontalLines', // optional table: { // headers are automatically repeated if the table spans over multiple pages // you can declare how many rows should be treated as headers headerRows: 1, widths: [ '*', 'auto', 100, '*' ], body: [ [ 'Date', 'Category', 'Recipient', 'Value' ] ] }, margin: [ 0, 10, 0, 0 ] }], styles: { header: { fontSize: 22, bold: true }, body: { fontSize: 16 } } }; for (let expense of this.expenses) { docDefinition.content[1].table.body.push([`${expense['day']}-${expense['monthName']}-${expense['year']}`, expense['category'], expense['recipient'], `${expense['value']} ${expense['currency']}`]); } pdfMake.createPdf(docDefinition).open(); } changePage(page: number) { this.page = page; if (!this.filter) this.filter = ''; this.api.getExpense(this.filter, this.pageSize, (page - 1) * this.pageSize).then(result => { this.paginatedExpenses = result['expenses']; this.expenseCount = result['length']; }).catch(error => { this.authentication.logout(); this.router.navigate(['/']); }); } setFilter(filter: string) { this.filter = filter; this.changePage(this.page); } }
ndGroupByCategory(g
identifier_name
history.component.ts
import { Component, OnInit, ElementRef, ViewChild, Inject } from '@angular/core'; import { ApiService } from '../../services/api.service'; import { DOCUMENT } from '@angular/common' import { element } from 'protractor'; import { ChartDataSets } from 'chart.js'; import { Color } from 'ng2-charts'; import * as pdfMake from "pdfmake/build/pdfmake"; import * as pdfFonts from 'pdfmake/build/vfs_fonts'; import { Router } from '@angular/router'; import { AuthenticationService } from '../../services/authentication.service'; import { Expense } from '../../classes/expense'; import { ConnectionService } from '../../services/connection.service'; declare var $:any; declare var getTranslation: any; declare var setLanguage: any; @Component({ selector: 'app-history', templateUrl: './history.component.html', styleUrls: ['./history.component.css'] }) export class HistoryComponent implements OnInit { public categories: Array<Object> = []; public currency: string; public expenses: Array<Object> = []; fileName: string = "history"; graph: Object = { "used": true, "name": "HistoryChart" }; message: string = "messageHistory"; welcomeMessage: string = "welcomeMessageHistory"; logout: string = "Logout"; DASHBOAR: string = "DASHBOARD,"; ENVELOPES: string = "ENVELOPES"; GOALS: string = "GOALS"; BILLS: string = "BILLS"; HISTORY: string = "HISTORY"; UTILITIES: string = "UTILITIES"; user: string = "User"; settings: string = "Settings"; appearance: string = "Appearance"; light: string = "Light"; dark: string = "Dark"; chartData1: Array<any> = []; chartColors1: Array<any> = []; chartLabels1: Array<any> = []; chartType1: string = "doughnut"; chartData2: Array<any> = []; chartColors2: Array<any> = []; chartLabels2: Array<string> = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; chartType2: string = "line"; paginatedExpenses: Array<Expense> = []; expenseCount: number = 0; page: number = 1; pageSize: number = 10 filter: string = ''; historyAll = "historyAll"; historyExport = "historyExport"; historyTotal = "historyTotal"; constructor( private api: ApiService, @Inject(DOCUMENT) private document: HTMLDocument, private router: Router, private authentication: AuthenticationService, private connectionService: ConnectionService ) { } public hasConnection(): boolean { return this.connectionService.hasConnection; } async handlePaginatedOfflineAsync() { if (this.connectionService.hasConnection && this.paginatedExpenses.length === 0) { if (this.connectionService.hasConnection) { this.api.getExpense('', this.pageSize, (this.page - 1) * this.pageSize).then(result => { this.expenseCount = result['length']; this.paginatedExpenses = result['expenses']; }).catch(error => {}); } } } @ViewChild('color') color: ElementRef; ngOnInit(): void { this.api.getUser().then(result => { this.refreshLanguage(result.language); this.categories = result.categories; this.expenses = this.generateExpenses(result.expense); this.currency = result.defaultCurrency; //data za tabele const parsedTable = this.parseTable(this.expenses); document.querySelector(".totaltext").innerHTML = "<h5>" + this.historyTotal + ": " + parsedTable.sum.toFixed(2); + "€</h5>"; const pieChart = this.groupByCategories(parsedTable); const lineChartData = this.makeDataForGraph(this.filterByCategory(this.expenses)) this.chartData1 = this.makeDataArray1(pieChart); this.chartColors1 = this.makeColorArray1(pieChart); this.chartLabels1 = this.makeLabelArray1(pieChart); this.chartData2 = this.generateDatasets(lineChartData); this.chartColors2 = this.getColors(lineChartData); }).catch(error => { this.authentication.logout(); this.router.navigate(['/']); }); setInterval(()=> { this.handlePaginatedOfflineAsync() }, 1000); } refreshLanguage(language: string) { setLanguage(language); this.message = getTranslation("messageHistory"); this.welcomeMessage = getTranslation("welcomeMessageHistory"); this.historyAll = getTranslation("historyAll"); this.historyExport = getTranslation("historyExport"); this.historyTotal = getTranslation("historyTotal"); } parseTable(rows) { const parsedTable = { sum: 0, data: [] }; for (let row of rows) { parsedTable.data.push({ id: row.id, year: row.year, month: row.month, day: row.day, category: row.category, receiver: row.receiver, currency: row.currency, value: row.value, color: row.color, }); parsedTable.sum += row.value; } return parsedTable; } filterByCategory(table) { var categories = new Map(); for (var expense of table) { if (!categories.get(expense.category)) { categories.set(expense.category, []); } categories.get(expense.category).push(expense); categories.get(expense.category).color = expense.color; } return categories; } filterByMonth(expenses) { var month = new Map(); for (var expense of expenses) { if (!month.get(expense.month)) { month.set(expense.month, {}); month.get(expense.month).sum = 0; } month.get(expense.month).sum += expense.value; } return month; } makeDataForGraph(category) { var month = new Map(); let keys = Array.from(category.keys()); for (let name of keys) { month.set(name, this.filterByMonth(category.get(name))); month.get(name).color = category.get(name).color; } return month; } convertMonthsToName(month) { switch (month) { case 'JAN': return "January"; case 'FEB': return "February"; case 'MAR': return "March"; case 'APR': return "April"; case 'MAY': return "May"; case 'JUN': return "June"; case 'JUL': return "July"; case 'AUG': return "August"; case 'SEP': return "September"; case 'OCT': return "October"; case 'NOV': return "November"; case 'DEC': return "December"; } } generateExpenses(expense) { var expensesArray = [] for (var exp of expense) { var date = exp.date.split('T')[0].split('-'); expensesArray.push({ id: exp._id, year: date[0], month: date[1], monthName: this.translateMonth(date[1]), day: date[2], category: exp.category.name, recipient: exp.recipient, value: exp.value, currency: exp.currency, color: exp.category.color, }); } expensesArray.sort(this.compare) return expensesArray; } translateMonth(month) { switch (month) { case '01': return "JAN"; case '02': return "FEB"; case '03': return "MAR"; case '04': return "APR"; case '05': return "MAY"; case '06': return "JUN"; case '07': return "JUL"; case '08': return "AUG"; case '09': return "SEP"; case '10': return "OCT"; case '11': return "NOV"; case '12': return "DEC"; } } compare(a, b) { //1 menjava, -1 ni menjava if (a.year < b.year) { return 1; } else if (a.year == b.year) { if (a.month < b.month) {
lse if (a.month == b.month) { if (a.day < b.day) { return 1; } else { return -1; } } else { return -1; } } else { return -1; } return 0; } groupByCategories(parsedTable) { const groups = []; for (let entry of parsedTable.data) { const group = this.findGroupByCategory(groups, entry.category); if (group != null) { group.sum += parseInt(entry.value); } else { groups.push({ name: entry.category, sum: entry.value, color: entry.color, }); } } return groups; } findGroupByCategory(groups, category) { for (let group of groups) { if (group.name == category) return group; } return null; } makeDataArray1(array): Array<any> { const returnTable = [array.length]; for (let i = 0; i < array.length; i++) { returnTable[i] = array[i].sum; } return returnTable; } makeColorArray1(array) { const table = []; const returnTable = []; for (let i = 0; i < array.length; i++) { returnTable.push([array[i].color]); } let barva = { backgroundColor: returnTable } table[0] = barva; return table; } makeLabelArray1(array) { const returnTable = [array.length]; for (let i = 0; i < array.length; i++) { returnTable[i] = array[i].name; } return returnTable; } generateDatasets(map) { var datasets: ChartDataSets[] = []; let keys = Array.from(map.keys()); for (let i of keys) { datasets.push(this.generateDataset(map.get(i),i)); } return datasets; } generateDataset(data,category): Object { var podatki = this.getData(data); return {data: podatki, label: category}; } getData(data): Array<number> { const arr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; let keys = Array.from(data.keys()); for (let i of keys) { var y: number = +i; arr[y-1]=(data.get(i).sum); } return arr; } getColors(data): Color[] { const arr: Color[] = []; let keys = Array.from(data.keys()); for (let i of keys) { a=data.get(i).color; var a = { borderColor: data.get(i).color } arr.push(a); } return arr; } generatePdf() { (<any>pdfMake).vfs = pdfFonts.pdfMake.vfs; var docDefinition = { content: [{ text: 'Expenses', style: 'header' }, { layout: 'lightHorizontalLines', // optional table: { // headers are automatically repeated if the table spans over multiple pages // you can declare how many rows should be treated as headers headerRows: 1, widths: [ '*', 'auto', 100, '*' ], body: [ [ 'Date', 'Category', 'Recipient', 'Value' ] ] }, margin: [ 0, 10, 0, 0 ] }], styles: { header: { fontSize: 22, bold: true }, body: { fontSize: 16 } } }; for (let expense of this.expenses) { docDefinition.content[1].table.body.push([`${expense['day']}-${expense['monthName']}-${expense['year']}`, expense['category'], expense['recipient'], `${expense['value']} ${expense['currency']}`]); } pdfMake.createPdf(docDefinition).open(); } changePage(page: number) { this.page = page; if (!this.filter) this.filter = ''; this.api.getExpense(this.filter, this.pageSize, (page - 1) * this.pageSize).then(result => { this.paginatedExpenses = result['expenses']; this.expenseCount = result['length']; }).catch(error => { this.authentication.logout(); this.router.navigate(['/']); }); } setFilter(filter: string) { this.filter = filter; this.changePage(this.page); } }
return 1; } e
conditional_block
history.component.ts
import { Component, OnInit, ElementRef, ViewChild, Inject } from '@angular/core'; import { ApiService } from '../../services/api.service'; import { DOCUMENT } from '@angular/common' import { element } from 'protractor'; import { ChartDataSets } from 'chart.js'; import { Color } from 'ng2-charts'; import * as pdfMake from "pdfmake/build/pdfmake"; import * as pdfFonts from 'pdfmake/build/vfs_fonts'; import { Router } from '@angular/router'; import { AuthenticationService } from '../../services/authentication.service'; import { Expense } from '../../classes/expense'; import { ConnectionService } from '../../services/connection.service'; declare var $:any; declare var getTranslation: any; declare var setLanguage: any; @Component({ selector: 'app-history', templateUrl: './history.component.html', styleUrls: ['./history.component.css'] }) export class HistoryComponent implements OnInit { public categories: Array<Object> = []; public currency: string; public expenses: Array<Object> = []; fileName: string = "history"; graph: Object = { "used": true, "name": "HistoryChart" }; message: string = "messageHistory"; welcomeMessage: string = "welcomeMessageHistory"; logout: string = "Logout"; DASHBOAR: string = "DASHBOARD,"; ENVELOPES: string = "ENVELOPES"; GOALS: string = "GOALS"; BILLS: string = "BILLS"; HISTORY: string = "HISTORY"; UTILITIES: string = "UTILITIES"; user: string = "User"; settings: string = "Settings"; appearance: string = "Appearance"; light: string = "Light"; dark: string = "Dark"; chartData1: Array<any> = []; chartColors1: Array<any> = []; chartLabels1: Array<any> = []; chartType1: string = "doughnut"; chartData2: Array<any> = []; chartColors2: Array<any> = []; chartLabels2: Array<string> = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; chartType2: string = "line"; paginatedExpenses: Array<Expense> = []; expenseCount: number = 0; page: number = 1; pageSize: number = 10 filter: string = ''; historyAll = "historyAll"; historyExport = "historyExport"; historyTotal = "historyTotal"; constructor( private api: ApiService, @Inject(DOCUMENT) private document: HTMLDocument, private router: Router, private authentication: AuthenticationService, private connectionService: ConnectionService ) { } public hasConnection(): boolean { return this.connectionService.hasConnection; } async handlePaginatedOfflineAsync() { if (this.connectionService.hasConnection && this.paginatedExpenses.length === 0) { if (this.connectionService.hasConnection) { this.api.getExpense('', this.pageSize, (this.page - 1) * this.pageSize).then(result => { this.expenseCount = result['length']; this.paginatedExpenses = result['expenses']; }).catch(error => {}); } } } @ViewChild('color') color: ElementRef; ngOnInit(): void { this.api.getUser().then(result => { this.refreshLanguage(result.language); this.categories = result.categories; this.expenses = this.generateExpenses(result.expense); this.currency = result.defaultCurrency; //data za tabele const parsedTable = this.parseTable(this.expenses); document.querySelector(".totaltext").innerHTML = "<h5>" + this.historyTotal + ": " + parsedTable.sum.toFixed(2); + "€</h5>"; const pieChart = this.groupByCategories(parsedTable); const lineChartData = this.makeDataForGraph(this.filterByCategory(this.expenses)) this.chartData1 = this.makeDataArray1(pieChart); this.chartColors1 = this.makeColorArray1(pieChart); this.chartLabels1 = this.makeLabelArray1(pieChart); this.chartData2 = this.generateDatasets(lineChartData); this.chartColors2 = this.getColors(lineChartData); }).catch(error => { this.authentication.logout(); this.router.navigate(['/']); }); setInterval(()=> { this.handlePaginatedOfflineAsync() }, 1000); } refreshLanguage(language: string) { setLanguage(language); this.message = getTranslation("messageHistory"); this.welcomeMessage = getTranslation("welcomeMessageHistory"); this.historyAll = getTranslation("historyAll"); this.historyExport = getTranslation("historyExport"); this.historyTotal = getTranslation("historyTotal"); } parseTable(rows) { const parsedTable = { sum: 0, data: [] }; for (let row of rows) { parsedTable.data.push({ id: row.id, year: row.year, month: row.month, day: row.day, category: row.category, receiver: row.receiver, currency: row.currency, value: row.value, color: row.color, }); parsedTable.sum += row.value; } return parsedTable; } filterByCategory(table) { var categories = new Map(); for (var expense of table) { if (!categories.get(expense.category)) { categories.set(expense.category, []); } categories.get(expense.category).push(expense); categories.get(expense.category).color = expense.color; } return categories; } filterByMonth(expenses) { var month = new Map(); for (var expense of expenses) { if (!month.get(expense.month)) { month.set(expense.month, {}); month.get(expense.month).sum = 0; } month.get(expense.month).sum += expense.value; } return month; } makeDataForGraph(category) { var month = new Map(); let keys = Array.from(category.keys()); for (let name of keys) { month.set(name, this.filterByMonth(category.get(name))); month.get(name).color = category.get(name).color; } return month; } convertMonthsToName(month) { switch (month) { case 'JAN': return "January"; case 'FEB': return "February"; case 'MAR': return "March"; case 'APR': return "April"; case 'MAY': return "May"; case 'JUN': return "June"; case 'JUL': return "July"; case 'AUG': return "August"; case 'SEP': return "September"; case 'OCT': return "October"; case 'NOV': return "November"; case 'DEC': return "December"; } } generateExpenses(expense) {
translateMonth(month) { switch (month) { case '01': return "JAN"; case '02': return "FEB"; case '03': return "MAR"; case '04': return "APR"; case '05': return "MAY"; case '06': return "JUN"; case '07': return "JUL"; case '08': return "AUG"; case '09': return "SEP"; case '10': return "OCT"; case '11': return "NOV"; case '12': return "DEC"; } } compare(a, b) { //1 menjava, -1 ni menjava if (a.year < b.year) { return 1; } else if (a.year == b.year) { if (a.month < b.month) { return 1; } else if (a.month == b.month) { if (a.day < b.day) { return 1; } else { return -1; } } else { return -1; } } else { return -1; } return 0; } groupByCategories(parsedTable) { const groups = []; for (let entry of parsedTable.data) { const group = this.findGroupByCategory(groups, entry.category); if (group != null) { group.sum += parseInt(entry.value); } else { groups.push({ name: entry.category, sum: entry.value, color: entry.color, }); } } return groups; } findGroupByCategory(groups, category) { for (let group of groups) { if (group.name == category) return group; } return null; } makeDataArray1(array): Array<any> { const returnTable = [array.length]; for (let i = 0; i < array.length; i++) { returnTable[i] = array[i].sum; } return returnTable; } makeColorArray1(array) { const table = []; const returnTable = []; for (let i = 0; i < array.length; i++) { returnTable.push([array[i].color]); } let barva = { backgroundColor: returnTable } table[0] = barva; return table; } makeLabelArray1(array) { const returnTable = [array.length]; for (let i = 0; i < array.length; i++) { returnTable[i] = array[i].name; } return returnTable; } generateDatasets(map) { var datasets: ChartDataSets[] = []; let keys = Array.from(map.keys()); for (let i of keys) { datasets.push(this.generateDataset(map.get(i),i)); } return datasets; } generateDataset(data,category): Object { var podatki = this.getData(data); return {data: podatki, label: category}; } getData(data): Array<number> { const arr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; let keys = Array.from(data.keys()); for (let i of keys) { var y: number = +i; arr[y-1]=(data.get(i).sum); } return arr; } getColors(data): Color[] { const arr: Color[] = []; let keys = Array.from(data.keys()); for (let i of keys) { a=data.get(i).color; var a = { borderColor: data.get(i).color } arr.push(a); } return arr; } generatePdf() { (<any>pdfMake).vfs = pdfFonts.pdfMake.vfs; var docDefinition = { content: [{ text: 'Expenses', style: 'header' }, { layout: 'lightHorizontalLines', // optional table: { // headers are automatically repeated if the table spans over multiple pages // you can declare how many rows should be treated as headers headerRows: 1, widths: [ '*', 'auto', 100, '*' ], body: [ [ 'Date', 'Category', 'Recipient', 'Value' ] ] }, margin: [ 0, 10, 0, 0 ] }], styles: { header: { fontSize: 22, bold: true }, body: { fontSize: 16 } } }; for (let expense of this.expenses) { docDefinition.content[1].table.body.push([`${expense['day']}-${expense['monthName']}-${expense['year']}`, expense['category'], expense['recipient'], `${expense['value']} ${expense['currency']}`]); } pdfMake.createPdf(docDefinition).open(); } changePage(page: number) { this.page = page; if (!this.filter) this.filter = ''; this.api.getExpense(this.filter, this.pageSize, (page - 1) * this.pageSize).then(result => { this.paginatedExpenses = result['expenses']; this.expenseCount = result['length']; }).catch(error => { this.authentication.logout(); this.router.navigate(['/']); }); } setFilter(filter: string) { this.filter = filter; this.changePage(this.page); } }
var expensesArray = [] for (var exp of expense) { var date = exp.date.split('T')[0].split('-'); expensesArray.push({ id: exp._id, year: date[0], month: date[1], monthName: this.translateMonth(date[1]), day: date[2], category: exp.category.name, recipient: exp.recipient, value: exp.value, currency: exp.currency, color: exp.category.color, }); } expensesArray.sort(this.compare) return expensesArray; }
identifier_body
history.component.ts
import { Component, OnInit, ElementRef, ViewChild, Inject } from '@angular/core'; import { ApiService } from '../../services/api.service'; import { DOCUMENT } from '@angular/common' import { element } from 'protractor'; import { ChartDataSets } from 'chart.js'; import { Color } from 'ng2-charts'; import * as pdfMake from "pdfmake/build/pdfmake"; import * as pdfFonts from 'pdfmake/build/vfs_fonts'; import { Router } from '@angular/router'; import { AuthenticationService } from '../../services/authentication.service'; import { Expense } from '../../classes/expense'; import { ConnectionService } from '../../services/connection.service'; declare var $:any; declare var getTranslation: any; declare var setLanguage: any; @Component({ selector: 'app-history', templateUrl: './history.component.html', styleUrls: ['./history.component.css'] }) export class HistoryComponent implements OnInit { public categories: Array<Object> = []; public currency: string; public expenses: Array<Object> = []; fileName: string = "history"; graph: Object = { "used": true, "name": "HistoryChart" }; message: string = "messageHistory"; welcomeMessage: string = "welcomeMessageHistory"; logout: string = "Logout"; DASHBOAR: string = "DASHBOARD,"; ENVELOPES: string = "ENVELOPES"; GOALS: string = "GOALS"; BILLS: string = "BILLS"; HISTORY: string = "HISTORY"; UTILITIES: string = "UTILITIES"; user: string = "User"; settings: string = "Settings"; appearance: string = "Appearance"; light: string = "Light"; dark: string = "Dark"; chartData1: Array<any> = []; chartColors1: Array<any> = []; chartLabels1: Array<any> = []; chartType1: string = "doughnut"; chartData2: Array<any> = []; chartColors2: Array<any> = []; chartLabels2: Array<string> = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; chartType2: string = "line"; paginatedExpenses: Array<Expense> = []; expenseCount: number = 0; page: number = 1; pageSize: number = 10 filter: string = ''; historyAll = "historyAll"; historyExport = "historyExport"; historyTotal = "historyTotal"; constructor( private api: ApiService, @Inject(DOCUMENT) private document: HTMLDocument, private router: Router, private authentication: AuthenticationService, private connectionService: ConnectionService ) { } public hasConnection(): boolean { return this.connectionService.hasConnection; } async handlePaginatedOfflineAsync() { if (this.connectionService.hasConnection && this.paginatedExpenses.length === 0) { if (this.connectionService.hasConnection) { this.api.getExpense('', this.pageSize, (this.page - 1) * this.pageSize).then(result => { this.expenseCount = result['length']; this.paginatedExpenses = result['expenses']; }).catch(error => {}); } } } @ViewChild('color') color: ElementRef; ngOnInit(): void { this.api.getUser().then(result => { this.refreshLanguage(result.language); this.categories = result.categories; this.expenses = this.generateExpenses(result.expense); this.currency = result.defaultCurrency; //data za tabele
const pieChart = this.groupByCategories(parsedTable); const lineChartData = this.makeDataForGraph(this.filterByCategory(this.expenses)) this.chartData1 = this.makeDataArray1(pieChart); this.chartColors1 = this.makeColorArray1(pieChart); this.chartLabels1 = this.makeLabelArray1(pieChart); this.chartData2 = this.generateDatasets(lineChartData); this.chartColors2 = this.getColors(lineChartData); }).catch(error => { this.authentication.logout(); this.router.navigate(['/']); }); setInterval(()=> { this.handlePaginatedOfflineAsync() }, 1000); } refreshLanguage(language: string) { setLanguage(language); this.message = getTranslation("messageHistory"); this.welcomeMessage = getTranslation("welcomeMessageHistory"); this.historyAll = getTranslation("historyAll"); this.historyExport = getTranslation("historyExport"); this.historyTotal = getTranslation("historyTotal"); } parseTable(rows) { const parsedTable = { sum: 0, data: [] }; for (let row of rows) { parsedTable.data.push({ id: row.id, year: row.year, month: row.month, day: row.day, category: row.category, receiver: row.receiver, currency: row.currency, value: row.value, color: row.color, }); parsedTable.sum += row.value; } return parsedTable; } filterByCategory(table) { var categories = new Map(); for (var expense of table) { if (!categories.get(expense.category)) { categories.set(expense.category, []); } categories.get(expense.category).push(expense); categories.get(expense.category).color = expense.color; } return categories; } filterByMonth(expenses) { var month = new Map(); for (var expense of expenses) { if (!month.get(expense.month)) { month.set(expense.month, {}); month.get(expense.month).sum = 0; } month.get(expense.month).sum += expense.value; } return month; } makeDataForGraph(category) { var month = new Map(); let keys = Array.from(category.keys()); for (let name of keys) { month.set(name, this.filterByMonth(category.get(name))); month.get(name).color = category.get(name).color; } return month; } convertMonthsToName(month) { switch (month) { case 'JAN': return "January"; case 'FEB': return "February"; case 'MAR': return "March"; case 'APR': return "April"; case 'MAY': return "May"; case 'JUN': return "June"; case 'JUL': return "July"; case 'AUG': return "August"; case 'SEP': return "September"; case 'OCT': return "October"; case 'NOV': return "November"; case 'DEC': return "December"; } } generateExpenses(expense) { var expensesArray = [] for (var exp of expense) { var date = exp.date.split('T')[0].split('-'); expensesArray.push({ id: exp._id, year: date[0], month: date[1], monthName: this.translateMonth(date[1]), day: date[2], category: exp.category.name, recipient: exp.recipient, value: exp.value, currency: exp.currency, color: exp.category.color, }); } expensesArray.sort(this.compare) return expensesArray; } translateMonth(month) { switch (month) { case '01': return "JAN"; case '02': return "FEB"; case '03': return "MAR"; case '04': return "APR"; case '05': return "MAY"; case '06': return "JUN"; case '07': return "JUL"; case '08': return "AUG"; case '09': return "SEP"; case '10': return "OCT"; case '11': return "NOV"; case '12': return "DEC"; } } compare(a, b) { //1 menjava, -1 ni menjava if (a.year < b.year) { return 1; } else if (a.year == b.year) { if (a.month < b.month) { return 1; } else if (a.month == b.month) { if (a.day < b.day) { return 1; } else { return -1; } } else { return -1; } } else { return -1; } return 0; } groupByCategories(parsedTable) { const groups = []; for (let entry of parsedTable.data) { const group = this.findGroupByCategory(groups, entry.category); if (group != null) { group.sum += parseInt(entry.value); } else { groups.push({ name: entry.category, sum: entry.value, color: entry.color, }); } } return groups; } findGroupByCategory(groups, category) { for (let group of groups) { if (group.name == category) return group; } return null; } makeDataArray1(array): Array<any> { const returnTable = [array.length]; for (let i = 0; i < array.length; i++) { returnTable[i] = array[i].sum; } return returnTable; } makeColorArray1(array) { const table = []; const returnTable = []; for (let i = 0; i < array.length; i++) { returnTable.push([array[i].color]); } let barva = { backgroundColor: returnTable } table[0] = barva; return table; } makeLabelArray1(array) { const returnTable = [array.length]; for (let i = 0; i < array.length; i++) { returnTable[i] = array[i].name; } return returnTable; } generateDatasets(map) { var datasets: ChartDataSets[] = []; let keys = Array.from(map.keys()); for (let i of keys) { datasets.push(this.generateDataset(map.get(i),i)); } return datasets; } generateDataset(data,category): Object { var podatki = this.getData(data); return {data: podatki, label: category}; } getData(data): Array<number> { const arr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; let keys = Array.from(data.keys()); for (let i of keys) { var y: number = +i; arr[y-1]=(data.get(i).sum); } return arr; } getColors(data): Color[] { const arr: Color[] = []; let keys = Array.from(data.keys()); for (let i of keys) { a=data.get(i).color; var a = { borderColor: data.get(i).color } arr.push(a); } return arr; } generatePdf() { (<any>pdfMake).vfs = pdfFonts.pdfMake.vfs; var docDefinition = { content: [{ text: 'Expenses', style: 'header' }, { layout: 'lightHorizontalLines', // optional table: { // headers are automatically repeated if the table spans over multiple pages // you can declare how many rows should be treated as headers headerRows: 1, widths: [ '*', 'auto', 100, '*' ], body: [ [ 'Date', 'Category', 'Recipient', 'Value' ] ] }, margin: [ 0, 10, 0, 0 ] }], styles: { header: { fontSize: 22, bold: true }, body: { fontSize: 16 } } }; for (let expense of this.expenses) { docDefinition.content[1].table.body.push([`${expense['day']}-${expense['monthName']}-${expense['year']}`, expense['category'], expense['recipient'], `${expense['value']} ${expense['currency']}`]); } pdfMake.createPdf(docDefinition).open(); } changePage(page: number) { this.page = page; if (!this.filter) this.filter = ''; this.api.getExpense(this.filter, this.pageSize, (page - 1) * this.pageSize).then(result => { this.paginatedExpenses = result['expenses']; this.expenseCount = result['length']; }).catch(error => { this.authentication.logout(); this.router.navigate(['/']); }); } setFilter(filter: string) { this.filter = filter; this.changePage(this.page); } }
const parsedTable = this.parseTable(this.expenses); document.querySelector(".totaltext").innerHTML = "<h5>" + this.historyTotal + ": " + parsedTable.sum.toFixed(2); + "€</h5>";
random_line_split
blueberry_segmentation.py
# -*- coding: utf-8 -*- """blueberry_segmentation.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1fez-oHMJuNSvBrawtIQ9S5Wf9d8UeanD # Mount Google Drive """ from google.colab import drive drive.mount('/content/gdrive/', force_remount=True) """# Install external libraries""" !pip install albumentations==0.4.6 !pip install torch !pip install torchvision """# Imports""" import os import matplotlib.pyplot as plt from cv2 import imread, cvtColor, COLOR_BGR2RGB, COLOR_BGR2GRAY from PIL import Image import albumentations as A from albumentations.pytorch.transforms import ToTensorV2 import torch import torch.nn as nn from torchvision import models from torchvision import transforms from torch.utils.data import Dataset, DataLoader from torch import optim from torch.optim import lr_scheduler import time import copy """# Constants""" BASE_DIR = 'gdrive/MyDrive/datasets/Deep_BlueBerry_databases/instancesegmentation' IMAGES_DIR = BASE_DIR + '/images/' MASKS_DIR = BASE_DIR + '/masks/' TEST_DIR = BASE_DIR + '/test' IMAGE_HEIGHT = 512 IMAGE_WIDTH = 512 """# Dataset""" train_transform = A.Compose( [ A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5, val_shift_limit=5), A.Rotate(), A.GaussNoise(), A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH), ], additional_targets={ 'image' : 'image', 'mask' : 'image', } ) test_transform = A.Compose( [ A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH) ] ) to_grayscale = A.Compose( [ ToTensorV2() ] ) class BlueberryDataset(Dataset): def __init__(self, base_path, image_path, mask_path, transform=None): self.images = [] self.masks = [] self.transform = transform self.to_tensor = transforms.Compose([transforms.ToTensor()]) self.process_mask = transforms.Compose( [ transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ] ) for image_file in os.listdir(image_path): self.images.append(os.path.join(image_path, image_file)) mask_file = image_file[-12:-3] + 'png' self.masks.append(os.path.join(mask_path, mask_file)) def __len__(self): return len(self.images) def
(self, index): image = imread(self.images[index]) image = cvtColor(image, COLOR_BGR2RGB) mask = imread(self.masks[index]) mask = cvtColor(mask, COLOR_BGR2RGB) transformed = self.transform(image=image, mask=mask) image = transformed['image'] mask = transformed['mask'] image = self.to_tensor(image) mask = Image.fromarray(mask) mask = self.process_mask(mask) return image, mask class BlueberryTestDataset(Dataset): def __init__(self, base_path, image_path, transform=None): self.images = [] self.transform = transform self.to_tensor = transforms.Compose([transforms.ToTensor()]) for image_file in os.listdir(image_path): self.images.append(os.path.join(image_path, image_file)) def __len__(self): return len(self.images) def __getitem__(self, index): image = imread(self.images[index]) image = cvtColor(image, COLOR_BGR2RGB) transformed = self.transform(image=image) image = transformed['image'] image = self.to_tensor(image) return image dataset = BlueberryDataset(BASE_DIR, IMAGES_DIR, MASKS_DIR, transform=train_transform) train_set, val_set = torch.utils.data.random_split(dataset, [6, 1]) test = BlueberryTestDataset(BASE_DIR, TEST_DIR, test_transform) dataloaders = { 'train': DataLoader(train_set, batch_size=1, shuffle=True), 'val': DataLoader(val_set, batch_size=1, shuffle=True), 'test': DataLoader(test, batch_size=1, shuffle=True) } """# Visualize Data""" image1, mask1 = dataset[0] image2, mask2 = dataset[2] fig = plt.figure(figsize=(20,20)) ax1 = fig.add_subplot(2, 5, 1) ax1.imshow(image1.permute(1, 2, 0)) ax2 = fig.add_subplot(2, 5, 2) ax2.imshow(mask1.permute(1, 2, 0).squeeze(2)) ax3 = fig.add_subplot(2, 5, 3) ax3.imshow(image2.permute(1, 2, 0)) ax4 = fig.add_subplot(2, 5, 4) ax4.imshow(mask2.permute(1, 2, 0).squeeze(2)) plt.show() """# Build model""" # Source: https://github.com/usuyama/pytorch-unet from torch import nn def convrelu(in_channels, out_channels, kernel, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel, padding=padding), nn.ReLU(inplace=True), ) class ResNetUNet(nn.Module): def __init__(self, n_class): super().__init__() self.base_model = models.resnet18(pretrained=True) self.base_layers = list(self.base_model.children()) self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2) self.layer0_1x1 = convrelu(64, 64, 1, 0) self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4) self.layer1_1x1 = convrelu(64, 64, 1, 0) self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8) self.layer2_1x1 = convrelu(128, 128, 1, 0) self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16) self.layer3_1x1 = convrelu(256, 256, 1, 0) self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32) self.layer4_1x1 = convrelu(512, 512, 1, 0) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv_up3 = convrelu(256 + 512, 512, 3, 1) self.conv_up2 = convrelu(128 + 512, 256, 3, 1) self.conv_up1 = convrelu(64 + 256, 256, 3, 1) self.conv_up0 = convrelu(64 + 256, 128, 3, 1) self.conv_original_size0 = convrelu(3, 64, 3, 1) self.conv_original_size1 = convrelu(64, 64, 3, 1) self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1) self.conv_last = nn.Conv2d(64, n_class, 1) def forward(self, input): x_original = self.conv_original_size0(input) x_original = self.conv_original_size1(x_original) layer0 = self.layer0(input) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) layer4 = self.layer4_1x1(layer4) x = self.upsample(layer4) layer3 = self.layer3_1x1(layer3) x = torch.cat([x, layer3], dim=1) x = self.conv_up3(x) x = self.upsample(x) layer2 = self.layer2_1x1(layer2) x = torch.cat([x, layer2], dim=1) x = self.conv_up2(x) x = self.upsample(x) layer1 = self.layer1_1x1(layer1) x = torch.cat([x, layer1], dim=1) x = self.conv_up1(x) x = self.upsample(x) layer0 = self.layer0_1x1(layer0) x = torch.cat([x, layer0], dim=1) x = self.conv_up0(x) x = self.upsample(x) x = torch.cat([x, x_original], dim=1) x = self.conv_original_size2(x) out = self.conv_last(x) return out unet = ResNetUNet(n_class=3) """# Model summary""" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = ResNetUNet(n_class=1) model = model.to(device) from torchsummary import summary summary(model, input_size=(3, 512, 512)) """# Define training functions""" # Source: https://github.com/usuyama/pytorch-unet from collections import defaultdict import torch.nn.functional as F def dice_loss(pred, target, smooth = 1.): pred = pred.contiguous() target = target.contiguous() intersection = (pred * target).sum(dim=2).sum(dim=2) loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth))) return loss.mean() def calc_loss(pred, target, metrics, bce_weight=0.5): bce = F.binary_cross_entropy_with_logits(pred, target) pred = F.sigmoid(pred) dice = dice_loss(pred, target) loss = bce * bce_weight + dice * (1 - bce_weight) metrics['bce'] += bce.data.cpu().numpy() * target.size(0) metrics['dice'] += dice.data.cpu().numpy() * target.size(0) metrics['loss'] += loss.data.cpu().numpy() * target.size(0) return loss def print_metrics(metrics, epoch_samples, phase): outputs = [] for k in metrics.keys(): outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples)) print("{}: {}".format(phase, ", ".join(outputs))) def train_model(model, optimizer, scheduler, num_epochs=25): training_loss_array = [] validation_loss_array = [] best_model_wts = copy.deepcopy(model.state_dict()) best_loss = 1e10 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) since = time.time() # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': scheduler.step() for param_group in optimizer.param_groups: print("LR", param_group['lr']) model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode metrics = defaultdict(float) epoch_samples = 0 for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) loss = calc_loss(outputs, labels, metrics) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics epoch_samples += inputs.size(0) print_metrics(metrics, epoch_samples, phase) epoch_loss = metrics['loss'] / epoch_samples # deep copy the model if phase == 'val' and epoch_loss < best_loss: print("saving best model") best_loss = epoch_loss best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'val': validation_loss_array.append(epoch_loss) else: training_loss_array.append(epoch_loss) time_elapsed = time.time() - since print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val loss: {:4f}'.format(best_loss)) print('\n\n') plt.plot(range(len(training_loss_array)), training_loss_array, label='training') plt.plot(range(len(validation_loss_array)), validation_loss_array, label='validation') plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.show() # load best model weights model.load_state_dict(best_model_wts) return model """# Train""" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) num_class = 1 model = ResNetUNet(num_class).to(device) optimizer_ft = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=30, gamma=0.1) model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=20) """# Prediction""" import math import numpy as np model.eval() # Get the first batch inputs = next(iter(dataloaders['test'])) inputs = inputs.to(device) # Predict pred = model(inputs) # The loss functions include the sigmoid function. pred = F.sigmoid(pred) pred = pred.data.cpu().numpy() prediction_image = pred[0][0] prediction_image[prediction_image >= 0.5] = 1 prediction_image[prediction_image < 0.5] = 0 original_image = inputs.cpu()[0] original_image = original_image.permute(1, 2, 0) # Rearrange RGB in the correct order fig = plt.figure(figsize=(20,20)) ax1 = fig.add_subplot(2, 5, 1) ax1.imshow(prediction_image) ax2 = fig.add_subplot(2, 5, 2) ax2.imshow(original_image) plt.show()
__getitem__
identifier_name
blueberry_segmentation.py
# -*- coding: utf-8 -*- """blueberry_segmentation.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1fez-oHMJuNSvBrawtIQ9S5Wf9d8UeanD # Mount Google Drive """ from google.colab import drive drive.mount('/content/gdrive/', force_remount=True) """# Install external libraries"""
"""# Imports""" import os import matplotlib.pyplot as plt from cv2 import imread, cvtColor, COLOR_BGR2RGB, COLOR_BGR2GRAY from PIL import Image import albumentations as A from albumentations.pytorch.transforms import ToTensorV2 import torch import torch.nn as nn from torchvision import models from torchvision import transforms from torch.utils.data import Dataset, DataLoader from torch import optim from torch.optim import lr_scheduler import time import copy """# Constants""" BASE_DIR = 'gdrive/MyDrive/datasets/Deep_BlueBerry_databases/instancesegmentation' IMAGES_DIR = BASE_DIR + '/images/' MASKS_DIR = BASE_DIR + '/masks/' TEST_DIR = BASE_DIR + '/test' IMAGE_HEIGHT = 512 IMAGE_WIDTH = 512 """# Dataset""" train_transform = A.Compose( [ A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5, val_shift_limit=5), A.Rotate(), A.GaussNoise(), A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH), ], additional_targets={ 'image' : 'image', 'mask' : 'image', } ) test_transform = A.Compose( [ A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH) ] ) to_grayscale = A.Compose( [ ToTensorV2() ] ) class BlueberryDataset(Dataset): def __init__(self, base_path, image_path, mask_path, transform=None): self.images = [] self.masks = [] self.transform = transform self.to_tensor = transforms.Compose([transforms.ToTensor()]) self.process_mask = transforms.Compose( [ transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ] ) for image_file in os.listdir(image_path): self.images.append(os.path.join(image_path, image_file)) mask_file = image_file[-12:-3] + 'png' self.masks.append(os.path.join(mask_path, mask_file)) def __len__(self): return len(self.images) def __getitem__(self, index): image = imread(self.images[index]) image = cvtColor(image, COLOR_BGR2RGB) mask = imread(self.masks[index]) mask = cvtColor(mask, COLOR_BGR2RGB) transformed = self.transform(image=image, mask=mask) image = transformed['image'] mask = transformed['mask'] image = self.to_tensor(image) mask = Image.fromarray(mask) mask = self.process_mask(mask) return image, mask class BlueberryTestDataset(Dataset): def __init__(self, base_path, image_path, transform=None): self.images = [] self.transform = transform self.to_tensor = transforms.Compose([transforms.ToTensor()]) for image_file in os.listdir(image_path): self.images.append(os.path.join(image_path, image_file)) def __len__(self): return len(self.images) def __getitem__(self, index): image = imread(self.images[index]) image = cvtColor(image, COLOR_BGR2RGB) transformed = self.transform(image=image) image = transformed['image'] image = self.to_tensor(image) return image dataset = BlueberryDataset(BASE_DIR, IMAGES_DIR, MASKS_DIR, transform=train_transform) train_set, val_set = torch.utils.data.random_split(dataset, [6, 1]) test = BlueberryTestDataset(BASE_DIR, TEST_DIR, test_transform) dataloaders = { 'train': DataLoader(train_set, batch_size=1, shuffle=True), 'val': DataLoader(val_set, batch_size=1, shuffle=True), 'test': DataLoader(test, batch_size=1, shuffle=True) } """# Visualize Data""" image1, mask1 = dataset[0] image2, mask2 = dataset[2] fig = plt.figure(figsize=(20,20)) ax1 = fig.add_subplot(2, 5, 1) ax1.imshow(image1.permute(1, 2, 0)) ax2 = fig.add_subplot(2, 5, 2) ax2.imshow(mask1.permute(1, 2, 0).squeeze(2)) ax3 = fig.add_subplot(2, 5, 3) ax3.imshow(image2.permute(1, 2, 0)) ax4 = fig.add_subplot(2, 5, 4) ax4.imshow(mask2.permute(1, 2, 0).squeeze(2)) plt.show() """# Build model""" # Source: https://github.com/usuyama/pytorch-unet from torch import nn def convrelu(in_channels, out_channels, kernel, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel, padding=padding), nn.ReLU(inplace=True), ) class ResNetUNet(nn.Module): def __init__(self, n_class): super().__init__() self.base_model = models.resnet18(pretrained=True) self.base_layers = list(self.base_model.children()) self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2) self.layer0_1x1 = convrelu(64, 64, 1, 0) self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4) self.layer1_1x1 = convrelu(64, 64, 1, 0) self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8) self.layer2_1x1 = convrelu(128, 128, 1, 0) self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16) self.layer3_1x1 = convrelu(256, 256, 1, 0) self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32) self.layer4_1x1 = convrelu(512, 512, 1, 0) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv_up3 = convrelu(256 + 512, 512, 3, 1) self.conv_up2 = convrelu(128 + 512, 256, 3, 1) self.conv_up1 = convrelu(64 + 256, 256, 3, 1) self.conv_up0 = convrelu(64 + 256, 128, 3, 1) self.conv_original_size0 = convrelu(3, 64, 3, 1) self.conv_original_size1 = convrelu(64, 64, 3, 1) self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1) self.conv_last = nn.Conv2d(64, n_class, 1) def forward(self, input): x_original = self.conv_original_size0(input) x_original = self.conv_original_size1(x_original) layer0 = self.layer0(input) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) layer4 = self.layer4_1x1(layer4) x = self.upsample(layer4) layer3 = self.layer3_1x1(layer3) x = torch.cat([x, layer3], dim=1) x = self.conv_up3(x) x = self.upsample(x) layer2 = self.layer2_1x1(layer2) x = torch.cat([x, layer2], dim=1) x = self.conv_up2(x) x = self.upsample(x) layer1 = self.layer1_1x1(layer1) x = torch.cat([x, layer1], dim=1) x = self.conv_up1(x) x = self.upsample(x) layer0 = self.layer0_1x1(layer0) x = torch.cat([x, layer0], dim=1) x = self.conv_up0(x) x = self.upsample(x) x = torch.cat([x, x_original], dim=1) x = self.conv_original_size2(x) out = self.conv_last(x) return out unet = ResNetUNet(n_class=3) """# Model summary""" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = ResNetUNet(n_class=1) model = model.to(device) from torchsummary import summary summary(model, input_size=(3, 512, 512)) """# Define training functions""" # Source: https://github.com/usuyama/pytorch-unet from collections import defaultdict import torch.nn.functional as F def dice_loss(pred, target, smooth = 1.): pred = pred.contiguous() target = target.contiguous() intersection = (pred * target).sum(dim=2).sum(dim=2) loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth))) return loss.mean() def calc_loss(pred, target, metrics, bce_weight=0.5): bce = F.binary_cross_entropy_with_logits(pred, target) pred = F.sigmoid(pred) dice = dice_loss(pred, target) loss = bce * bce_weight + dice * (1 - bce_weight) metrics['bce'] += bce.data.cpu().numpy() * target.size(0) metrics['dice'] += dice.data.cpu().numpy() * target.size(0) metrics['loss'] += loss.data.cpu().numpy() * target.size(0) return loss def print_metrics(metrics, epoch_samples, phase): outputs = [] for k in metrics.keys(): outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples)) print("{}: {}".format(phase, ", ".join(outputs))) def train_model(model, optimizer, scheduler, num_epochs=25): training_loss_array = [] validation_loss_array = [] best_model_wts = copy.deepcopy(model.state_dict()) best_loss = 1e10 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) since = time.time() # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': scheduler.step() for param_group in optimizer.param_groups: print("LR", param_group['lr']) model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode metrics = defaultdict(float) epoch_samples = 0 for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) loss = calc_loss(outputs, labels, metrics) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics epoch_samples += inputs.size(0) print_metrics(metrics, epoch_samples, phase) epoch_loss = metrics['loss'] / epoch_samples # deep copy the model if phase == 'val' and epoch_loss < best_loss: print("saving best model") best_loss = epoch_loss best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'val': validation_loss_array.append(epoch_loss) else: training_loss_array.append(epoch_loss) time_elapsed = time.time() - since print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val loss: {:4f}'.format(best_loss)) print('\n\n') plt.plot(range(len(training_loss_array)), training_loss_array, label='training') plt.plot(range(len(validation_loss_array)), validation_loss_array, label='validation') plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.show() # load best model weights model.load_state_dict(best_model_wts) return model """# Train""" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) num_class = 1 model = ResNetUNet(num_class).to(device) optimizer_ft = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=30, gamma=0.1) model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=20) """# Prediction""" import math import numpy as np model.eval() # Get the first batch inputs = next(iter(dataloaders['test'])) inputs = inputs.to(device) # Predict pred = model(inputs) # The loss functions include the sigmoid function. pred = F.sigmoid(pred) pred = pred.data.cpu().numpy() prediction_image = pred[0][0] prediction_image[prediction_image >= 0.5] = 1 prediction_image[prediction_image < 0.5] = 0 original_image = inputs.cpu()[0] original_image = original_image.permute(1, 2, 0) # Rearrange RGB in the correct order fig = plt.figure(figsize=(20,20)) ax1 = fig.add_subplot(2, 5, 1) ax1.imshow(prediction_image) ax2 = fig.add_subplot(2, 5, 2) ax2.imshow(original_image) plt.show()
!pip install albumentations==0.4.6 !pip install torch !pip install torchvision
random_line_split
blueberry_segmentation.py
# -*- coding: utf-8 -*- """blueberry_segmentation.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1fez-oHMJuNSvBrawtIQ9S5Wf9d8UeanD # Mount Google Drive """ from google.colab import drive drive.mount('/content/gdrive/', force_remount=True) """# Install external libraries""" !pip install albumentations==0.4.6 !pip install torch !pip install torchvision """# Imports""" import os import matplotlib.pyplot as plt from cv2 import imread, cvtColor, COLOR_BGR2RGB, COLOR_BGR2GRAY from PIL import Image import albumentations as A from albumentations.pytorch.transforms import ToTensorV2 import torch import torch.nn as nn from torchvision import models from torchvision import transforms from torch.utils.data import Dataset, DataLoader from torch import optim from torch.optim import lr_scheduler import time import copy """# Constants""" BASE_DIR = 'gdrive/MyDrive/datasets/Deep_BlueBerry_databases/instancesegmentation' IMAGES_DIR = BASE_DIR + '/images/' MASKS_DIR = BASE_DIR + '/masks/' TEST_DIR = BASE_DIR + '/test' IMAGE_HEIGHT = 512 IMAGE_WIDTH = 512 """# Dataset""" train_transform = A.Compose( [ A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5, val_shift_limit=5), A.Rotate(), A.GaussNoise(), A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH), ], additional_targets={ 'image' : 'image', 'mask' : 'image', } ) test_transform = A.Compose( [ A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH) ] ) to_grayscale = A.Compose( [ ToTensorV2() ] ) class BlueberryDataset(Dataset): def __init__(self, base_path, image_path, mask_path, transform=None): self.images = [] self.masks = [] self.transform = transform self.to_tensor = transforms.Compose([transforms.ToTensor()]) self.process_mask = transforms.Compose( [ transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ] ) for image_file in os.listdir(image_path): self.images.append(os.path.join(image_path, image_file)) mask_file = image_file[-12:-3] + 'png' self.masks.append(os.path.join(mask_path, mask_file)) def __len__(self): return len(self.images) def __getitem__(self, index): image = imread(self.images[index]) image = cvtColor(image, COLOR_BGR2RGB) mask = imread(self.masks[index]) mask = cvtColor(mask, COLOR_BGR2RGB) transformed = self.transform(image=image, mask=mask) image = transformed['image'] mask = transformed['mask'] image = self.to_tensor(image) mask = Image.fromarray(mask) mask = self.process_mask(mask) return image, mask class BlueberryTestDataset(Dataset): def __init__(self, base_path, image_path, transform=None): self.images = [] self.transform = transform self.to_tensor = transforms.Compose([transforms.ToTensor()]) for image_file in os.listdir(image_path): self.images.append(os.path.join(image_path, image_file)) def __len__(self): return len(self.images) def __getitem__(self, index): image = imread(self.images[index]) image = cvtColor(image, COLOR_BGR2RGB) transformed = self.transform(image=image) image = transformed['image'] image = self.to_tensor(image) return image dataset = BlueberryDataset(BASE_DIR, IMAGES_DIR, MASKS_DIR, transform=train_transform) train_set, val_set = torch.utils.data.random_split(dataset, [6, 1]) test = BlueberryTestDataset(BASE_DIR, TEST_DIR, test_transform) dataloaders = { 'train': DataLoader(train_set, batch_size=1, shuffle=True), 'val': DataLoader(val_set, batch_size=1, shuffle=True), 'test': DataLoader(test, batch_size=1, shuffle=True) } """# Visualize Data""" image1, mask1 = dataset[0] image2, mask2 = dataset[2] fig = plt.figure(figsize=(20,20)) ax1 = fig.add_subplot(2, 5, 1) ax1.imshow(image1.permute(1, 2, 0)) ax2 = fig.add_subplot(2, 5, 2) ax2.imshow(mask1.permute(1, 2, 0).squeeze(2)) ax3 = fig.add_subplot(2, 5, 3) ax3.imshow(image2.permute(1, 2, 0)) ax4 = fig.add_subplot(2, 5, 4) ax4.imshow(mask2.permute(1, 2, 0).squeeze(2)) plt.show() """# Build model""" # Source: https://github.com/usuyama/pytorch-unet from torch import nn def convrelu(in_channels, out_channels, kernel, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel, padding=padding), nn.ReLU(inplace=True), ) class ResNetUNet(nn.Module): def __init__(self, n_class): super().__init__() self.base_model = models.resnet18(pretrained=True) self.base_layers = list(self.base_model.children()) self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2) self.layer0_1x1 = convrelu(64, 64, 1, 0) self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4) self.layer1_1x1 = convrelu(64, 64, 1, 0) self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8) self.layer2_1x1 = convrelu(128, 128, 1, 0) self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16) self.layer3_1x1 = convrelu(256, 256, 1, 0) self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32) self.layer4_1x1 = convrelu(512, 512, 1, 0) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv_up3 = convrelu(256 + 512, 512, 3, 1) self.conv_up2 = convrelu(128 + 512, 256, 3, 1) self.conv_up1 = convrelu(64 + 256, 256, 3, 1) self.conv_up0 = convrelu(64 + 256, 128, 3, 1) self.conv_original_size0 = convrelu(3, 64, 3, 1) self.conv_original_size1 = convrelu(64, 64, 3, 1) self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1) self.conv_last = nn.Conv2d(64, n_class, 1) def forward(self, input): x_original = self.conv_original_size0(input) x_original = self.conv_original_size1(x_original) layer0 = self.layer0(input) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) layer4 = self.layer4_1x1(layer4) x = self.upsample(layer4) layer3 = self.layer3_1x1(layer3) x = torch.cat([x, layer3], dim=1) x = self.conv_up3(x) x = self.upsample(x) layer2 = self.layer2_1x1(layer2) x = torch.cat([x, layer2], dim=1) x = self.conv_up2(x) x = self.upsample(x) layer1 = self.layer1_1x1(layer1) x = torch.cat([x, layer1], dim=1) x = self.conv_up1(x) x = self.upsample(x) layer0 = self.layer0_1x1(layer0) x = torch.cat([x, layer0], dim=1) x = self.conv_up0(x) x = self.upsample(x) x = torch.cat([x, x_original], dim=1) x = self.conv_original_size2(x) out = self.conv_last(x) return out unet = ResNetUNet(n_class=3) """# Model summary""" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = ResNetUNet(n_class=1) model = model.to(device) from torchsummary import summary summary(model, input_size=(3, 512, 512)) """# Define training functions""" # Source: https://github.com/usuyama/pytorch-unet from collections import defaultdict import torch.nn.functional as F def dice_loss(pred, target, smooth = 1.): pred = pred.contiguous() target = target.contiguous() intersection = (pred * target).sum(dim=2).sum(dim=2) loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth))) return loss.mean() def calc_loss(pred, target, metrics, bce_weight=0.5): bce = F.binary_cross_entropy_with_logits(pred, target) pred = F.sigmoid(pred) dice = dice_loss(pred, target) loss = bce * bce_weight + dice * (1 - bce_weight) metrics['bce'] += bce.data.cpu().numpy() * target.size(0) metrics['dice'] += dice.data.cpu().numpy() * target.size(0) metrics['loss'] += loss.data.cpu().numpy() * target.size(0) return loss def print_metrics(metrics, epoch_samples, phase): outputs = [] for k in metrics.keys(): outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples)) print("{}: {}".format(phase, ", ".join(outputs))) def train_model(model, optimizer, scheduler, num_epochs=25): training_loss_array = [] validation_loss_array = [] best_model_wts = copy.deepcopy(model.state_dict()) best_loss = 1e10 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) since = time.time() # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': scheduler.step() for param_group in optimizer.param_groups: print("LR", param_group['lr']) model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode metrics = defaultdict(float) epoch_samples = 0 for inputs, labels in dataloaders[phase]:
print_metrics(metrics, epoch_samples, phase) epoch_loss = metrics['loss'] / epoch_samples # deep copy the model if phase == 'val' and epoch_loss < best_loss: print("saving best model") best_loss = epoch_loss best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'val': validation_loss_array.append(epoch_loss) else: training_loss_array.append(epoch_loss) time_elapsed = time.time() - since print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val loss: {:4f}'.format(best_loss)) print('\n\n') plt.plot(range(len(training_loss_array)), training_loss_array, label='training') plt.plot(range(len(validation_loss_array)), validation_loss_array, label='validation') plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.show() # load best model weights model.load_state_dict(best_model_wts) return model """# Train""" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) num_class = 1 model = ResNetUNet(num_class).to(device) optimizer_ft = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=30, gamma=0.1) model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=20) """# Prediction""" import math import numpy as np model.eval() # Get the first batch inputs = next(iter(dataloaders['test'])) inputs = inputs.to(device) # Predict pred = model(inputs) # The loss functions include the sigmoid function. pred = F.sigmoid(pred) pred = pred.data.cpu().numpy() prediction_image = pred[0][0] prediction_image[prediction_image >= 0.5] = 1 prediction_image[prediction_image < 0.5] = 0 original_image = inputs.cpu()[0] original_image = original_image.permute(1, 2, 0) # Rearrange RGB in the correct order fig = plt.figure(figsize=(20,20)) ax1 = fig.add_subplot(2, 5, 1) ax1.imshow(prediction_image) ax2 = fig.add_subplot(2, 5, 2) ax2.imshow(original_image) plt.show()
inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) loss = calc_loss(outputs, labels, metrics) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics epoch_samples += inputs.size(0)
conditional_block
blueberry_segmentation.py
# -*- coding: utf-8 -*- """blueberry_segmentation.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1fez-oHMJuNSvBrawtIQ9S5Wf9d8UeanD # Mount Google Drive """ from google.colab import drive drive.mount('/content/gdrive/', force_remount=True) """# Install external libraries""" !pip install albumentations==0.4.6 !pip install torch !pip install torchvision """# Imports""" import os import matplotlib.pyplot as plt from cv2 import imread, cvtColor, COLOR_BGR2RGB, COLOR_BGR2GRAY from PIL import Image import albumentations as A from albumentations.pytorch.transforms import ToTensorV2 import torch import torch.nn as nn from torchvision import models from torchvision import transforms from torch.utils.data import Dataset, DataLoader from torch import optim from torch.optim import lr_scheduler import time import copy """# Constants""" BASE_DIR = 'gdrive/MyDrive/datasets/Deep_BlueBerry_databases/instancesegmentation' IMAGES_DIR = BASE_DIR + '/images/' MASKS_DIR = BASE_DIR + '/masks/' TEST_DIR = BASE_DIR + '/test' IMAGE_HEIGHT = 512 IMAGE_WIDTH = 512 """# Dataset""" train_transform = A.Compose( [ A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5, val_shift_limit=5), A.Rotate(), A.GaussNoise(), A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH), ], additional_targets={ 'image' : 'image', 'mask' : 'image', } ) test_transform = A.Compose( [ A.Resize(IMAGE_HEIGHT, IMAGE_WIDTH) ] ) to_grayscale = A.Compose( [ ToTensorV2() ] ) class BlueberryDataset(Dataset): def __init__(self, base_path, image_path, mask_path, transform=None):
def __len__(self): return len(self.images) def __getitem__(self, index): image = imread(self.images[index]) image = cvtColor(image, COLOR_BGR2RGB) mask = imread(self.masks[index]) mask = cvtColor(mask, COLOR_BGR2RGB) transformed = self.transform(image=image, mask=mask) image = transformed['image'] mask = transformed['mask'] image = self.to_tensor(image) mask = Image.fromarray(mask) mask = self.process_mask(mask) return image, mask class BlueberryTestDataset(Dataset): def __init__(self, base_path, image_path, transform=None): self.images = [] self.transform = transform self.to_tensor = transforms.Compose([transforms.ToTensor()]) for image_file in os.listdir(image_path): self.images.append(os.path.join(image_path, image_file)) def __len__(self): return len(self.images) def __getitem__(self, index): image = imread(self.images[index]) image = cvtColor(image, COLOR_BGR2RGB) transformed = self.transform(image=image) image = transformed['image'] image = self.to_tensor(image) return image dataset = BlueberryDataset(BASE_DIR, IMAGES_DIR, MASKS_DIR, transform=train_transform) train_set, val_set = torch.utils.data.random_split(dataset, [6, 1]) test = BlueberryTestDataset(BASE_DIR, TEST_DIR, test_transform) dataloaders = { 'train': DataLoader(train_set, batch_size=1, shuffle=True), 'val': DataLoader(val_set, batch_size=1, shuffle=True), 'test': DataLoader(test, batch_size=1, shuffle=True) } """# Visualize Data""" image1, mask1 = dataset[0] image2, mask2 = dataset[2] fig = plt.figure(figsize=(20,20)) ax1 = fig.add_subplot(2, 5, 1) ax1.imshow(image1.permute(1, 2, 0)) ax2 = fig.add_subplot(2, 5, 2) ax2.imshow(mask1.permute(1, 2, 0).squeeze(2)) ax3 = fig.add_subplot(2, 5, 3) ax3.imshow(image2.permute(1, 2, 0)) ax4 = fig.add_subplot(2, 5, 4) ax4.imshow(mask2.permute(1, 2, 0).squeeze(2)) plt.show() """# Build model""" # Source: https://github.com/usuyama/pytorch-unet from torch import nn def convrelu(in_channels, out_channels, kernel, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel, padding=padding), nn.ReLU(inplace=True), ) class ResNetUNet(nn.Module): def __init__(self, n_class): super().__init__() self.base_model = models.resnet18(pretrained=True) self.base_layers = list(self.base_model.children()) self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2) self.layer0_1x1 = convrelu(64, 64, 1, 0) self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4) self.layer1_1x1 = convrelu(64, 64, 1, 0) self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8) self.layer2_1x1 = convrelu(128, 128, 1, 0) self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16) self.layer3_1x1 = convrelu(256, 256, 1, 0) self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32) self.layer4_1x1 = convrelu(512, 512, 1, 0) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.conv_up3 = convrelu(256 + 512, 512, 3, 1) self.conv_up2 = convrelu(128 + 512, 256, 3, 1) self.conv_up1 = convrelu(64 + 256, 256, 3, 1) self.conv_up0 = convrelu(64 + 256, 128, 3, 1) self.conv_original_size0 = convrelu(3, 64, 3, 1) self.conv_original_size1 = convrelu(64, 64, 3, 1) self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1) self.conv_last = nn.Conv2d(64, n_class, 1) def forward(self, input): x_original = self.conv_original_size0(input) x_original = self.conv_original_size1(x_original) layer0 = self.layer0(input) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) layer4 = self.layer4_1x1(layer4) x = self.upsample(layer4) layer3 = self.layer3_1x1(layer3) x = torch.cat([x, layer3], dim=1) x = self.conv_up3(x) x = self.upsample(x) layer2 = self.layer2_1x1(layer2) x = torch.cat([x, layer2], dim=1) x = self.conv_up2(x) x = self.upsample(x) layer1 = self.layer1_1x1(layer1) x = torch.cat([x, layer1], dim=1) x = self.conv_up1(x) x = self.upsample(x) layer0 = self.layer0_1x1(layer0) x = torch.cat([x, layer0], dim=1) x = self.conv_up0(x) x = self.upsample(x) x = torch.cat([x, x_original], dim=1) x = self.conv_original_size2(x) out = self.conv_last(x) return out unet = ResNetUNet(n_class=3) """# Model summary""" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = ResNetUNet(n_class=1) model = model.to(device) from torchsummary import summary summary(model, input_size=(3, 512, 512)) """# Define training functions""" # Source: https://github.com/usuyama/pytorch-unet from collections import defaultdict import torch.nn.functional as F def dice_loss(pred, target, smooth = 1.): pred = pred.contiguous() target = target.contiguous() intersection = (pred * target).sum(dim=2).sum(dim=2) loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth))) return loss.mean() def calc_loss(pred, target, metrics, bce_weight=0.5): bce = F.binary_cross_entropy_with_logits(pred, target) pred = F.sigmoid(pred) dice = dice_loss(pred, target) loss = bce * bce_weight + dice * (1 - bce_weight) metrics['bce'] += bce.data.cpu().numpy() * target.size(0) metrics['dice'] += dice.data.cpu().numpy() * target.size(0) metrics['loss'] += loss.data.cpu().numpy() * target.size(0) return loss def print_metrics(metrics, epoch_samples, phase): outputs = [] for k in metrics.keys(): outputs.append("{}: {:4f}".format(k, metrics[k] / epoch_samples)) print("{}: {}".format(phase, ", ".join(outputs))) def train_model(model, optimizer, scheduler, num_epochs=25): training_loss_array = [] validation_loss_array = [] best_model_wts = copy.deepcopy(model.state_dict()) best_loss = 1e10 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) since = time.time() # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': scheduler.step() for param_group in optimizer.param_groups: print("LR", param_group['lr']) model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode metrics = defaultdict(float) epoch_samples = 0 for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) loss = calc_loss(outputs, labels, metrics) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics epoch_samples += inputs.size(0) print_metrics(metrics, epoch_samples, phase) epoch_loss = metrics['loss'] / epoch_samples # deep copy the model if phase == 'val' and epoch_loss < best_loss: print("saving best model") best_loss = epoch_loss best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'val': validation_loss_array.append(epoch_loss) else: training_loss_array.append(epoch_loss) time_elapsed = time.time() - since print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val loss: {:4f}'.format(best_loss)) print('\n\n') plt.plot(range(len(training_loss_array)), training_loss_array, label='training') plt.plot(range(len(validation_loss_array)), validation_loss_array, label='validation') plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.show() # load best model weights model.load_state_dict(best_model_wts) return model """# Train""" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) num_class = 1 model = ResNetUNet(num_class).to(device) optimizer_ft = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=30, gamma=0.1) model = train_model(model, optimizer_ft, exp_lr_scheduler, num_epochs=20) """# Prediction""" import math import numpy as np model.eval() # Get the first batch inputs = next(iter(dataloaders['test'])) inputs = inputs.to(device) # Predict pred = model(inputs) # The loss functions include the sigmoid function. pred = F.sigmoid(pred) pred = pred.data.cpu().numpy() prediction_image = pred[0][0] prediction_image[prediction_image >= 0.5] = 1 prediction_image[prediction_image < 0.5] = 0 original_image = inputs.cpu()[0] original_image = original_image.permute(1, 2, 0) # Rearrange RGB in the correct order fig = plt.figure(figsize=(20,20)) ax1 = fig.add_subplot(2, 5, 1) ax1.imshow(prediction_image) ax2 = fig.add_subplot(2, 5, 2) ax2.imshow(original_image) plt.show()
self.images = [] self.masks = [] self.transform = transform self.to_tensor = transforms.Compose([transforms.ToTensor()]) self.process_mask = transforms.Compose( [ transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ] ) for image_file in os.listdir(image_path): self.images.append(os.path.join(image_path, image_file)) mask_file = image_file[-12:-3] + 'png' self.masks.append(os.path.join(mask_path, mask_file))
identifier_body
post.page.ts
import { Component, ViewChild,Input } from '@angular/core'; import { NavController, IonInfiniteScroll, Platform, ActionSheetController, ToastController, MenuController, ModalController, AlertController } from '@ionic/angular'; import { Router } from '@angular/router'; import { Camera, CameraOptions } from '@ionic-native/camera/ngx'; import { PostService } from '../../service/post.service'; import { UserService } from '../../service/user.service'; import { FileTransfer, FileUploadOptions, FileTransferObject } from '@ionic-native/file-transfer/ngx'; import { File } from '@ionic-native/file/ngx'; import { CommentsPage } from '../comments/comments.page'; @Component({ selector: 'app-post', templateUrl: 'post.page.html', styleUrls: ['post.page.scss'] }) export class PostPage { @ViewChild(IonInfiniteScroll) infiniteScroll: IonInfiniteScroll; @Input('handle') handle; countCarItem = 99; badgeCount = 6; postFeeds: any = []; post_type: any = { shared: 'shared', link: 'shared a link', poll: 'created a poll', product: 'added new product for sell', article: 'added new article', video : 'added a video', audio: 'added an audio', file: 'added a file', photos: 'added a photo', profile_picture_male: 'updated his profile picture', profile_picture_female: 'updated her profile picture', profile_cover_male: 'updated his cover photo', profile_cover_female: 'updated her cover photo', page_picture: 'updated page picture', page_cover: 'updated cover photo', group_picture: 'updated group picture', group_cover: 'updated group cover', event_cover: 'updated event cover' }; sub : any = ''; slidesPerView : number = 1; public postElement = []; public sharedInfo = []; private pageCount = 2; private arrayPosition = 0; private isAndroid = false; private mediapath = "https://followthebirds.com/content/uploads/"; usermayknow : any = []; stories : any = []; height : number = 300; width : number = 300; private user_picture = localStorage.getItem('user_picture'); slideOpts = { initialSlide: 3, speed: 400 }; constructor( public navCtrl: NavController, public toastCtrl: ToastController, private camera: Camera, public actionSheetCtrl: ActionSheetController, public menu: MenuController, public modalCtrl: ModalController, private transfer: FileTransfer, private file: File, private platform: Platform, private alertCtrl: AlertController, private post: PostService, public user: UserService, public router: Router ) { platform.ready().then((readySource) => { this.width = platform.width(); this.height = platform.height(); }); } ngOnInit(){ this.getStories(); this.isAndroid = this.platform.is("android"); this.postElement['handle'] = "me"; this.postElement['id'] = ''; this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{}) .then(data => { this.postFeeds = []; let item = data[0]; localStorage.setItem('last_post_live',item[0].post_id); for (var key in item) { if(item[key].post_type == 'photos'){ this.post_type.photos = "added "+item[key].photos_num+"photos"; } this.postFeeds.push(item[key]); } }); } doInfinite(event) { setTimeout(() => { this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{'page': this.pageCount}) .then(data => { if(data[0].length > 0) { let item = data[0]; for (var key in item) { this.postFeeds.push(item[key]); } } }); this.pageCount = this.pageCount + 1; event.target.complete(); }, 500); } doRefresh(event) { this.ngOnInit(); setTimeout(() => { console.log('Async operation has ended'); event.target.complete(); }, 2000); } getPeopleYouMayKnow(){ this.user.getPeopleYouMayKnow('may_know',parseInt(localStorage.getItem('user_id'))) .then(data => { this.usermayknow = data[0]; }); } getStories(){ this.user.getStories({user_id:localStorage.getItem('user_id')}) .then(data => { this.stories = data[0]; console.log("stories",data) }); } viewStory(story){ this.router.navigate(['/StoryPage',{story: story}]); } viewPost(post) { if(post.photos_num == '1'){ this.router.navigate(['/view-photo',{photo: post.photos[0]}]); } else { this.router.navigate(['/view-post',{post: post}]); } } viewProfile(post) { if(post.user_type == 'user'){ this.router.navigate(['/profile',{user_name: post.user_name,user_id:post.user_id}]); } if(post.user_type == 'page'){ this.router.navigate(['/PageProfilePage',{pageProfile:post}]); } if(post.user_type == 'group'){ this.router.navigate(['/GroupProfilePage',{groupProfile:post}]); } if(post.user_type == 'event'){ this.router.navigate(['/EventProfilePage',{eventProfile:post}]); } } downloadAttachment(filePath){ let arr = filePath.split('/'); var filename = arr.pop(); let url = encodeURI(filePath); const fileTransfer: FileTransferObject = this.transfer.create(); fileTransfer.download(this.mediapath+filePath, this.file.dataDirectory + filename).then((entry) => { let toast = this.toastCtrl.create({ message: "Attachment bas been download", duration: 3000, position: 'top' }); }, (error) => { // handle error let toast = this.toastCtrl.create({ message: "Downloading failure! retry.", duration: 3000, position: 'top' }); }); } async viewComments(index,comments,post_id){ const modal = await this.modalCtrl.create({ component: CommentsPage, componentProps: { 'comments': comments, 'post_id': post_id, 'handle': 'post' } }); await modal.present(); } async sharePostCtrl(post_id) { let prompt = await this.alertCtrl.create({ message: 'Share this post', inputs : [ { type:'radio', label:'Share post now ', value:post_id }, { type:'radio', label:'Write Post', value:post_id }], buttons : [ { text: "Cancel", handler: data => { console.log("cancel clicked"); } }, { text: "Share", handler: data => { this.sharePost('share',post_id); } }]}); await prompt.present(); } async postActivity(event,post) { let buttons : any = [ { icon: !this.platform.is('ios') ? 'ios-bookmark' : null, text: 'Save Post', handler: () => { this.reactAction('save_post',post.post_id); } } ]; if(post.author_id != localStorage.getItem('user_id')){ let report : any = { icon: !this.platform.is('ios') ? 'ios-flag' : null, text: 'Report Post', handler: () => { this.reportAction("post",post.post_id) } }; let hide : any = { icon: !this.platform.is('ios') ? 'ios-eye-off' : null, text: 'Hide Post', handler: () => { event.target.parentNode.parentNode.parentNode.parentNode.remove(); this.reactAction("hide_post",post.post_id) } }; buttons.push(report); buttons.push(hide); } if(post.author_id == localStorage.getItem('user_id')){ let btn : any = { icon: !this.platform.is('ios') ? 'ios-trash' : null, text: 'Delete Post', handler: async () => { const confirm = await this.alertCtrl.create({ header: 'Delete post?', message: 'Once you delete you can not undo this step.', buttons: [ { text: 'Cancel', handler: () => { } } ,{ text: 'Delete', handler: () => { event.target.parentNode.parentNode.parentNode.parentNode.remove(); this.reactAction("delete_post",post.post_id) } } ] }); await confirm.present(); } }; buttons.push(btn); } const actionSheet = await this.actionSheetCtrl.create({ buttons }); await actionSheet.present(); } getBackgroundStyle(url) { if(!url){ return 'url(assets/followthebirdImgs/no-profile-img.jpeg)' } else { return 'url(' + this.mediapath+url + ')' } } getStoryBackgroundStyle(media) { if(media != 'null'){ console.log(media); let obj = JSON.parse(media) return 'url(' + this.mediapath+obj[0].src + ')' } else { return 'url(assets/followthebirdImgs/story_background.png)' } } getMedia(media) { let obj = JSON.parse(media) return this.mediapath+obj[0].src; } sharePost(type,id)
reactAction(type,post_id){ let params :any = { 'do': type, 'id': post_id, 'my_id' : localStorage.getItem('user_id') }; this.post.reaction(params).subscribe((resp) => { }, (err) => { }); } reportAction(handle,id){ let params :any = { 'handle': handle, 'id': id, 'my_id' : localStorage.getItem('user_id') }; this.user.report(params).subscribe(async (resp) => { const toast = await this.toastCtrl.create({ message: "Report has been submitted successfully", duration: 3000, position: 'top' }); toast.present(); }, async (err) => { const toast = await this.toastCtrl.create({ message: "Failed to Submit Report. Please Try Again", duration: 3000, position: 'top', }); toast.present(); }); } AddStory(){ this.router.navigate(['/AddStoryPage']); } getLiveLitePost(){ let items :any = { type:'newsfeed', user_id:localStorage.getItem('user_id'), last_post_live:localStorage.getItem('last_post_live') } this.user.getLiveLitePost(items).then((data) => { let item : any = data; if(item.length > 0){ localStorage.setItem('last_post_live',data[0].post_id); for (var key in item) { this.postFeeds.unshift(item[key]); } } }, (err) => { }); } }
{ this.post.sharePost({'do':type,id:id,my_id:localStorage.getItem('user_id')}).subscribe(async (resp) => { const toast = await this.toastCtrl.create({ message: "Post has been shared successfully", duration: 3000, position: 'top' }); toast.present(); }, async (err) => { const toast = await this.toastCtrl.create({ message: "Unable to post. Retry", duration: 3000, position: 'top', }); toast.present(); }); }
identifier_body
post.page.ts
import { Component, ViewChild,Input } from '@angular/core'; import { NavController, IonInfiniteScroll, Platform, ActionSheetController, ToastController, MenuController, ModalController, AlertController } from '@ionic/angular'; import { Router } from '@angular/router'; import { Camera, CameraOptions } from '@ionic-native/camera/ngx'; import { PostService } from '../../service/post.service'; import { UserService } from '../../service/user.service'; import { FileTransfer, FileUploadOptions, FileTransferObject } from '@ionic-native/file-transfer/ngx'; import { File } from '@ionic-native/file/ngx'; import { CommentsPage } from '../comments/comments.page'; @Component({ selector: 'app-post', templateUrl: 'post.page.html', styleUrls: ['post.page.scss'] }) export class PostPage { @ViewChild(IonInfiniteScroll) infiniteScroll: IonInfiniteScroll; @Input('handle') handle; countCarItem = 99; badgeCount = 6; postFeeds: any = []; post_type: any = { shared: 'shared', link: 'shared a link', poll: 'created a poll', product: 'added new product for sell', article: 'added new article', video : 'added a video', audio: 'added an audio', file: 'added a file', photos: 'added a photo', profile_picture_male: 'updated his profile picture', profile_picture_female: 'updated her profile picture', profile_cover_male: 'updated his cover photo', profile_cover_female: 'updated her cover photo', page_picture: 'updated page picture', page_cover: 'updated cover photo', group_picture: 'updated group picture', group_cover: 'updated group cover', event_cover: 'updated event cover' }; sub : any = ''; slidesPerView : number = 1; public postElement = []; public sharedInfo = []; private pageCount = 2; private arrayPosition = 0; private isAndroid = false; private mediapath = "https://followthebirds.com/content/uploads/"; usermayknow : any = []; stories : any = []; height : number = 300; width : number = 300; private user_picture = localStorage.getItem('user_picture'); slideOpts = { initialSlide: 3, speed: 400 }; constructor( public navCtrl: NavController, public toastCtrl: ToastController, private camera: Camera, public actionSheetCtrl: ActionSheetController, public menu: MenuController, public modalCtrl: ModalController, private transfer: FileTransfer, private file: File, private platform: Platform, private alertCtrl: AlertController, private post: PostService, public user: UserService, public router: Router ) { platform.ready().then((readySource) => { this.width = platform.width(); this.height = platform.height(); }); } ngOnInit(){ this.getStories(); this.isAndroid = this.platform.is("android"); this.postElement['handle'] = "me"; this.postElement['id'] = ''; this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{}) .then(data => { this.postFeeds = []; let item = data[0]; localStorage.setItem('last_post_live',item[0].post_id); for (var key in item) { if(item[key].post_type == 'photos'){ this.post_type.photos = "added "+item[key].photos_num+"photos"; } this.postFeeds.push(item[key]); } }); } doInfinite(event) { setTimeout(() => { this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{'page': this.pageCount}) .then(data => { if(data[0].length > 0) { let item = data[0]; for (var key in item) { this.postFeeds.push(item[key]); } } }); this.pageCount = this.pageCount + 1; event.target.complete(); }, 500); } doRefresh(event) { this.ngOnInit(); setTimeout(() => { console.log('Async operation has ended'); event.target.complete(); }, 2000); } getPeopleYouMayKnow(){ this.user.getPeopleYouMayKnow('may_know',parseInt(localStorage.getItem('user_id'))) .then(data => { this.usermayknow = data[0]; }); } getStories(){ this.user.getStories({user_id:localStorage.getItem('user_id')}) .then(data => { this.stories = data[0]; console.log("stories",data) }); } viewStory(story){ this.router.navigate(['/StoryPage',{story: story}]); } viewPost(post) { if(post.photos_num == '1'){ this.router.navigate(['/view-photo',{photo: post.photos[0]}]); } else { this.router.navigate(['/view-post',{post: post}]); } } viewProfile(post) { if(post.user_type == 'user'){ this.router.navigate(['/profile',{user_name: post.user_name,user_id:post.user_id}]); } if(post.user_type == 'page'){ this.router.navigate(['/PageProfilePage',{pageProfile:post}]); } if(post.user_type == 'group'){ this.router.navigate(['/GroupProfilePage',{groupProfile:post}]); } if(post.user_type == 'event'){ this.router.navigate(['/EventProfilePage',{eventProfile:post}]); } } downloadAttachment(filePath){ let arr = filePath.split('/'); var filename = arr.pop(); let url = encodeURI(filePath); const fileTransfer: FileTransferObject = this.transfer.create(); fileTransfer.download(this.mediapath+filePath, this.file.dataDirectory + filename).then((entry) => { let toast = this.toastCtrl.create({ message: "Attachment bas been download", duration: 3000, position: 'top' }); }, (error) => { // handle error let toast = this.toastCtrl.create({ message: "Downloading failure! retry.", duration: 3000, position: 'top' }); }); } async viewComments(index,comments,post_id){ const modal = await this.modalCtrl.create({ component: CommentsPage, componentProps: { 'comments': comments, 'post_id': post_id, 'handle': 'post' } }); await modal.present(); } async sharePostCtrl(post_id) { let prompt = await this.alertCtrl.create({ message: 'Share this post', inputs : [ { type:'radio', label:'Share post now ', value:post_id }, { type:'radio', label:'Write Post', value:post_id }], buttons : [ { text: "Cancel", handler: data => { console.log("cancel clicked"); } }, { text: "Share", handler: data => { this.sharePost('share',post_id); } }]}); await prompt.present(); } async postActivity(event,post) { let buttons : any = [ { icon: !this.platform.is('ios') ? 'ios-bookmark' : null, text: 'Save Post', handler: () => { this.reactAction('save_post',post.post_id); } } ]; if(post.author_id != localStorage.getItem('user_id')){ let report : any = { icon: !this.platform.is('ios') ? 'ios-flag' : null, text: 'Report Post', handler: () => { this.reportAction("post",post.post_id) } }; let hide : any = { icon: !this.platform.is('ios') ? 'ios-eye-off' : null, text: 'Hide Post', handler: () => { event.target.parentNode.parentNode.parentNode.parentNode.remove(); this.reactAction("hide_post",post.post_id) } }; buttons.push(report); buttons.push(hide); } if(post.author_id == localStorage.getItem('user_id')){ let btn : any = { icon: !this.platform.is('ios') ? 'ios-trash' : null, text: 'Delete Post', handler: async () => { const confirm = await this.alertCtrl.create({ header: 'Delete post?', message: 'Once you delete you can not undo this step.', buttons: [ { text: 'Cancel', handler: () => { } } ,{ text: 'Delete', handler: () => { event.target.parentNode.parentNode.parentNode.parentNode.remove(); this.reactAction("delete_post",post.post_id) } } ] }); await confirm.present(); } }; buttons.push(btn); } const actionSheet = await this.actionSheetCtrl.create({ buttons }); await actionSheet.present(); } getBackgroundStyle(url) { if(!url){ return 'url(assets/followthebirdImgs/no-profile-img.jpeg)' } else { return 'url(' + this.mediapath+url + ')' } } getStoryBackgroundStyle(media) { if(media != 'null')
else { return 'url(assets/followthebirdImgs/story_background.png)' } } getMedia(media) { let obj = JSON.parse(media) return this.mediapath+obj[0].src; } sharePost(type,id){ this.post.sharePost({'do':type,id:id,my_id:localStorage.getItem('user_id')}).subscribe(async (resp) => { const toast = await this.toastCtrl.create({ message: "Post has been shared successfully", duration: 3000, position: 'top' }); toast.present(); }, async (err) => { const toast = await this.toastCtrl.create({ message: "Unable to post. Retry", duration: 3000, position: 'top', }); toast.present(); }); } reactAction(type,post_id){ let params :any = { 'do': type, 'id': post_id, 'my_id' : localStorage.getItem('user_id') }; this.post.reaction(params).subscribe((resp) => { }, (err) => { }); } reportAction(handle,id){ let params :any = { 'handle': handle, 'id': id, 'my_id' : localStorage.getItem('user_id') }; this.user.report(params).subscribe(async (resp) => { const toast = await this.toastCtrl.create({ message: "Report has been submitted successfully", duration: 3000, position: 'top' }); toast.present(); }, async (err) => { const toast = await this.toastCtrl.create({ message: "Failed to Submit Report. Please Try Again", duration: 3000, position: 'top', }); toast.present(); }); } AddStory(){ this.router.navigate(['/AddStoryPage']); } getLiveLitePost(){ let items :any = { type:'newsfeed', user_id:localStorage.getItem('user_id'), last_post_live:localStorage.getItem('last_post_live') } this.user.getLiveLitePost(items).then((data) => { let item : any = data; if(item.length > 0){ localStorage.setItem('last_post_live',data[0].post_id); for (var key in item) { this.postFeeds.unshift(item[key]); } } }, (err) => { }); } }
{ console.log(media); let obj = JSON.parse(media) return 'url(' + this.mediapath+obj[0].src + ')' }
conditional_block
post.page.ts
import { Component, ViewChild,Input } from '@angular/core'; import { NavController, IonInfiniteScroll, Platform, ActionSheetController, ToastController, MenuController, ModalController, AlertController } from '@ionic/angular'; import { Router } from '@angular/router'; import { Camera, CameraOptions } from '@ionic-native/camera/ngx'; import { PostService } from '../../service/post.service'; import { UserService } from '../../service/user.service'; import { FileTransfer, FileUploadOptions, FileTransferObject } from '@ionic-native/file-transfer/ngx'; import { File } from '@ionic-native/file/ngx'; import { CommentsPage } from '../comments/comments.page'; @Component({ selector: 'app-post', templateUrl: 'post.page.html', styleUrls: ['post.page.scss'] }) export class PostPage { @ViewChild(IonInfiniteScroll) infiniteScroll: IonInfiniteScroll; @Input('handle') handle; countCarItem = 99; badgeCount = 6; postFeeds: any = []; post_type: any = { shared: 'shared', link: 'shared a link', poll: 'created a poll', product: 'added new product for sell', article: 'added new article', video : 'added a video', audio: 'added an audio', file: 'added a file', photos: 'added a photo', profile_picture_male: 'updated his profile picture', profile_picture_female: 'updated her profile picture', profile_cover_male: 'updated his cover photo', profile_cover_female: 'updated her cover photo', page_picture: 'updated page picture', page_cover: 'updated cover photo', group_picture: 'updated group picture', group_cover: 'updated group cover', event_cover: 'updated event cover' }; sub : any = ''; slidesPerView : number = 1; public postElement = []; public sharedInfo = []; private pageCount = 2; private arrayPosition = 0; private isAndroid = false; private mediapath = "https://followthebirds.com/content/uploads/"; usermayknow : any = []; stories : any = []; height : number = 300; width : number = 300; private user_picture = localStorage.getItem('user_picture'); slideOpts = { initialSlide: 3, speed: 400 }; constructor( public navCtrl: NavController, public toastCtrl: ToastController, private camera: Camera, public actionSheetCtrl: ActionSheetController, public menu: MenuController, public modalCtrl: ModalController, private transfer: FileTransfer, private file: File, private platform: Platform, private alertCtrl: AlertController, private post: PostService, public user: UserService, public router: Router ) { platform.ready().then((readySource) => { this.width = platform.width(); this.height = platform.height(); }); } ngOnInit(){ this.getStories(); this.isAndroid = this.platform.is("android"); this.postElement['handle'] = "me";
let item = data[0]; localStorage.setItem('last_post_live',item[0].post_id); for (var key in item) { if(item[key].post_type == 'photos'){ this.post_type.photos = "added "+item[key].photos_num+"photos"; } this.postFeeds.push(item[key]); } }); } doInfinite(event) { setTimeout(() => { this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{'page': this.pageCount}) .then(data => { if(data[0].length > 0) { let item = data[0]; for (var key in item) { this.postFeeds.push(item[key]); } } }); this.pageCount = this.pageCount + 1; event.target.complete(); }, 500); } doRefresh(event) { this.ngOnInit(); setTimeout(() => { console.log('Async operation has ended'); event.target.complete(); }, 2000); } getPeopleYouMayKnow(){ this.user.getPeopleYouMayKnow('may_know',parseInt(localStorage.getItem('user_id'))) .then(data => { this.usermayknow = data[0]; }); } getStories(){ this.user.getStories({user_id:localStorage.getItem('user_id')}) .then(data => { this.stories = data[0]; console.log("stories",data) }); } viewStory(story){ this.router.navigate(['/StoryPage',{story: story}]); } viewPost(post) { if(post.photos_num == '1'){ this.router.navigate(['/view-photo',{photo: post.photos[0]}]); } else { this.router.navigate(['/view-post',{post: post}]); } } viewProfile(post) { if(post.user_type == 'user'){ this.router.navigate(['/profile',{user_name: post.user_name,user_id:post.user_id}]); } if(post.user_type == 'page'){ this.router.navigate(['/PageProfilePage',{pageProfile:post}]); } if(post.user_type == 'group'){ this.router.navigate(['/GroupProfilePage',{groupProfile:post}]); } if(post.user_type == 'event'){ this.router.navigate(['/EventProfilePage',{eventProfile:post}]); } } downloadAttachment(filePath){ let arr = filePath.split('/'); var filename = arr.pop(); let url = encodeURI(filePath); const fileTransfer: FileTransferObject = this.transfer.create(); fileTransfer.download(this.mediapath+filePath, this.file.dataDirectory + filename).then((entry) => { let toast = this.toastCtrl.create({ message: "Attachment bas been download", duration: 3000, position: 'top' }); }, (error) => { // handle error let toast = this.toastCtrl.create({ message: "Downloading failure! retry.", duration: 3000, position: 'top' }); }); } async viewComments(index,comments,post_id){ const modal = await this.modalCtrl.create({ component: CommentsPage, componentProps: { 'comments': comments, 'post_id': post_id, 'handle': 'post' } }); await modal.present(); } async sharePostCtrl(post_id) { let prompt = await this.alertCtrl.create({ message: 'Share this post', inputs : [ { type:'radio', label:'Share post now ', value:post_id }, { type:'radio', label:'Write Post', value:post_id }], buttons : [ { text: "Cancel", handler: data => { console.log("cancel clicked"); } }, { text: "Share", handler: data => { this.sharePost('share',post_id); } }]}); await prompt.present(); } async postActivity(event,post) { let buttons : any = [ { icon: !this.platform.is('ios') ? 'ios-bookmark' : null, text: 'Save Post', handler: () => { this.reactAction('save_post',post.post_id); } } ]; if(post.author_id != localStorage.getItem('user_id')){ let report : any = { icon: !this.platform.is('ios') ? 'ios-flag' : null, text: 'Report Post', handler: () => { this.reportAction("post",post.post_id) } }; let hide : any = { icon: !this.platform.is('ios') ? 'ios-eye-off' : null, text: 'Hide Post', handler: () => { event.target.parentNode.parentNode.parentNode.parentNode.remove(); this.reactAction("hide_post",post.post_id) } }; buttons.push(report); buttons.push(hide); } if(post.author_id == localStorage.getItem('user_id')){ let btn : any = { icon: !this.platform.is('ios') ? 'ios-trash' : null, text: 'Delete Post', handler: async () => { const confirm = await this.alertCtrl.create({ header: 'Delete post?', message: 'Once you delete you can not undo this step.', buttons: [ { text: 'Cancel', handler: () => { } } ,{ text: 'Delete', handler: () => { event.target.parentNode.parentNode.parentNode.parentNode.remove(); this.reactAction("delete_post",post.post_id) } } ] }); await confirm.present(); } }; buttons.push(btn); } const actionSheet = await this.actionSheetCtrl.create({ buttons }); await actionSheet.present(); } getBackgroundStyle(url) { if(!url){ return 'url(assets/followthebirdImgs/no-profile-img.jpeg)' } else { return 'url(' + this.mediapath+url + ')' } } getStoryBackgroundStyle(media) { if(media != 'null'){ console.log(media); let obj = JSON.parse(media) return 'url(' + this.mediapath+obj[0].src + ')' } else { return 'url(assets/followthebirdImgs/story_background.png)' } } getMedia(media) { let obj = JSON.parse(media) return this.mediapath+obj[0].src; } sharePost(type,id){ this.post.sharePost({'do':type,id:id,my_id:localStorage.getItem('user_id')}).subscribe(async (resp) => { const toast = await this.toastCtrl.create({ message: "Post has been shared successfully", duration: 3000, position: 'top' }); toast.present(); }, async (err) => { const toast = await this.toastCtrl.create({ message: "Unable to post. Retry", duration: 3000, position: 'top', }); toast.present(); }); } reactAction(type,post_id){ let params :any = { 'do': type, 'id': post_id, 'my_id' : localStorage.getItem('user_id') }; this.post.reaction(params).subscribe((resp) => { }, (err) => { }); } reportAction(handle,id){ let params :any = { 'handle': handle, 'id': id, 'my_id' : localStorage.getItem('user_id') }; this.user.report(params).subscribe(async (resp) => { const toast = await this.toastCtrl.create({ message: "Report has been submitted successfully", duration: 3000, position: 'top' }); toast.present(); }, async (err) => { const toast = await this.toastCtrl.create({ message: "Failed to Submit Report. Please Try Again", duration: 3000, position: 'top', }); toast.present(); }); } AddStory(){ this.router.navigate(['/AddStoryPage']); } getLiveLitePost(){ let items :any = { type:'newsfeed', user_id:localStorage.getItem('user_id'), last_post_live:localStorage.getItem('last_post_live') } this.user.getLiveLitePost(items).then((data) => { let item : any = data; if(item.length > 0){ localStorage.setItem('last_post_live',data[0].post_id); for (var key in item) { this.postFeeds.unshift(item[key]); } } }, (err) => { }); } }
this.postElement['id'] = ''; this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{}) .then(data => { this.postFeeds = [];
random_line_split
post.page.ts
import { Component, ViewChild,Input } from '@angular/core'; import { NavController, IonInfiniteScroll, Platform, ActionSheetController, ToastController, MenuController, ModalController, AlertController } from '@ionic/angular'; import { Router } from '@angular/router'; import { Camera, CameraOptions } from '@ionic-native/camera/ngx'; import { PostService } from '../../service/post.service'; import { UserService } from '../../service/user.service'; import { FileTransfer, FileUploadOptions, FileTransferObject } from '@ionic-native/file-transfer/ngx'; import { File } from '@ionic-native/file/ngx'; import { CommentsPage } from '../comments/comments.page'; @Component({ selector: 'app-post', templateUrl: 'post.page.html', styleUrls: ['post.page.scss'] }) export class PostPage { @ViewChild(IonInfiniteScroll) infiniteScroll: IonInfiniteScroll; @Input('handle') handle; countCarItem = 99; badgeCount = 6; postFeeds: any = []; post_type: any = { shared: 'shared', link: 'shared a link', poll: 'created a poll', product: 'added new product for sell', article: 'added new article', video : 'added a video', audio: 'added an audio', file: 'added a file', photos: 'added a photo', profile_picture_male: 'updated his profile picture', profile_picture_female: 'updated her profile picture', profile_cover_male: 'updated his cover photo', profile_cover_female: 'updated her cover photo', page_picture: 'updated page picture', page_cover: 'updated cover photo', group_picture: 'updated group picture', group_cover: 'updated group cover', event_cover: 'updated event cover' }; sub : any = ''; slidesPerView : number = 1; public postElement = []; public sharedInfo = []; private pageCount = 2; private arrayPosition = 0; private isAndroid = false; private mediapath = "https://followthebirds.com/content/uploads/"; usermayknow : any = []; stories : any = []; height : number = 300; width : number = 300; private user_picture = localStorage.getItem('user_picture'); slideOpts = { initialSlide: 3, speed: 400 }; constructor( public navCtrl: NavController, public toastCtrl: ToastController, private camera: Camera, public actionSheetCtrl: ActionSheetController, public menu: MenuController, public modalCtrl: ModalController, private transfer: FileTransfer, private file: File, private platform: Platform, private alertCtrl: AlertController, private post: PostService, public user: UserService, public router: Router ) { platform.ready().then((readySource) => { this.width = platform.width(); this.height = platform.height(); }); } ngOnInit(){ this.getStories(); this.isAndroid = this.platform.is("android"); this.postElement['handle'] = "me"; this.postElement['id'] = ''; this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{}) .then(data => { this.postFeeds = []; let item = data[0]; localStorage.setItem('last_post_live',item[0].post_id); for (var key in item) { if(item[key].post_type == 'photos'){ this.post_type.photos = "added "+item[key].photos_num+"photos"; } this.postFeeds.push(item[key]); } }); } doInfinite(event) { setTimeout(() => { this.post.getfeeds('newsfeed',localStorage.getItem('user_id'),localStorage.getItem('user_id'),{'page': this.pageCount}) .then(data => { if(data[0].length > 0) { let item = data[0]; for (var key in item) { this.postFeeds.push(item[key]); } } }); this.pageCount = this.pageCount + 1; event.target.complete(); }, 500); } doRefresh(event) { this.ngOnInit(); setTimeout(() => { console.log('Async operation has ended'); event.target.complete(); }, 2000); } getPeopleYouMayKnow(){ this.user.getPeopleYouMayKnow('may_know',parseInt(localStorage.getItem('user_id'))) .then(data => { this.usermayknow = data[0]; }); } getStories(){ this.user.getStories({user_id:localStorage.getItem('user_id')}) .then(data => { this.stories = data[0]; console.log("stories",data) }); } viewStory(story){ this.router.navigate(['/StoryPage',{story: story}]); } viewPost(post) { if(post.photos_num == '1'){ this.router.navigate(['/view-photo',{photo: post.photos[0]}]); } else { this.router.navigate(['/view-post',{post: post}]); } } viewProfile(post) { if(post.user_type == 'user'){ this.router.navigate(['/profile',{user_name: post.user_name,user_id:post.user_id}]); } if(post.user_type == 'page'){ this.router.navigate(['/PageProfilePage',{pageProfile:post}]); } if(post.user_type == 'group'){ this.router.navigate(['/GroupProfilePage',{groupProfile:post}]); } if(post.user_type == 'event'){ this.router.navigate(['/EventProfilePage',{eventProfile:post}]); } }
(filePath){ let arr = filePath.split('/'); var filename = arr.pop(); let url = encodeURI(filePath); const fileTransfer: FileTransferObject = this.transfer.create(); fileTransfer.download(this.mediapath+filePath, this.file.dataDirectory + filename).then((entry) => { let toast = this.toastCtrl.create({ message: "Attachment bas been download", duration: 3000, position: 'top' }); }, (error) => { // handle error let toast = this.toastCtrl.create({ message: "Downloading failure! retry.", duration: 3000, position: 'top' }); }); } async viewComments(index,comments,post_id){ const modal = await this.modalCtrl.create({ component: CommentsPage, componentProps: { 'comments': comments, 'post_id': post_id, 'handle': 'post' } }); await modal.present(); } async sharePostCtrl(post_id) { let prompt = await this.alertCtrl.create({ message: 'Share this post', inputs : [ { type:'radio', label:'Share post now ', value:post_id }, { type:'radio', label:'Write Post', value:post_id }], buttons : [ { text: "Cancel", handler: data => { console.log("cancel clicked"); } }, { text: "Share", handler: data => { this.sharePost('share',post_id); } }]}); await prompt.present(); } async postActivity(event,post) { let buttons : any = [ { icon: !this.platform.is('ios') ? 'ios-bookmark' : null, text: 'Save Post', handler: () => { this.reactAction('save_post',post.post_id); } } ]; if(post.author_id != localStorage.getItem('user_id')){ let report : any = { icon: !this.platform.is('ios') ? 'ios-flag' : null, text: 'Report Post', handler: () => { this.reportAction("post",post.post_id) } }; let hide : any = { icon: !this.platform.is('ios') ? 'ios-eye-off' : null, text: 'Hide Post', handler: () => { event.target.parentNode.parentNode.parentNode.parentNode.remove(); this.reactAction("hide_post",post.post_id) } }; buttons.push(report); buttons.push(hide); } if(post.author_id == localStorage.getItem('user_id')){ let btn : any = { icon: !this.platform.is('ios') ? 'ios-trash' : null, text: 'Delete Post', handler: async () => { const confirm = await this.alertCtrl.create({ header: 'Delete post?', message: 'Once you delete you can not undo this step.', buttons: [ { text: 'Cancel', handler: () => { } } ,{ text: 'Delete', handler: () => { event.target.parentNode.parentNode.parentNode.parentNode.remove(); this.reactAction("delete_post",post.post_id) } } ] }); await confirm.present(); } }; buttons.push(btn); } const actionSheet = await this.actionSheetCtrl.create({ buttons }); await actionSheet.present(); } getBackgroundStyle(url) { if(!url){ return 'url(assets/followthebirdImgs/no-profile-img.jpeg)' } else { return 'url(' + this.mediapath+url + ')' } } getStoryBackgroundStyle(media) { if(media != 'null'){ console.log(media); let obj = JSON.parse(media) return 'url(' + this.mediapath+obj[0].src + ')' } else { return 'url(assets/followthebirdImgs/story_background.png)' } } getMedia(media) { let obj = JSON.parse(media) return this.mediapath+obj[0].src; } sharePost(type,id){ this.post.sharePost({'do':type,id:id,my_id:localStorage.getItem('user_id')}).subscribe(async (resp) => { const toast = await this.toastCtrl.create({ message: "Post has been shared successfully", duration: 3000, position: 'top' }); toast.present(); }, async (err) => { const toast = await this.toastCtrl.create({ message: "Unable to post. Retry", duration: 3000, position: 'top', }); toast.present(); }); } reactAction(type,post_id){ let params :any = { 'do': type, 'id': post_id, 'my_id' : localStorage.getItem('user_id') }; this.post.reaction(params).subscribe((resp) => { }, (err) => { }); } reportAction(handle,id){ let params :any = { 'handle': handle, 'id': id, 'my_id' : localStorage.getItem('user_id') }; this.user.report(params).subscribe(async (resp) => { const toast = await this.toastCtrl.create({ message: "Report has been submitted successfully", duration: 3000, position: 'top' }); toast.present(); }, async (err) => { const toast = await this.toastCtrl.create({ message: "Failed to Submit Report. Please Try Again", duration: 3000, position: 'top', }); toast.present(); }); } AddStory(){ this.router.navigate(['/AddStoryPage']); } getLiveLitePost(){ let items :any = { type:'newsfeed', user_id:localStorage.getItem('user_id'), last_post_live:localStorage.getItem('last_post_live') } this.user.getLiveLitePost(items).then((data) => { let item : any = data; if(item.length > 0){ localStorage.setItem('last_post_live',data[0].post_id); for (var key in item) { this.postFeeds.unshift(item[key]); } } }, (err) => { }); } }
downloadAttachment
identifier_name
clarans.py
"""! @brief Cluster analysis algorithm: CLARANS. @details Implementation based on paper @cite article::clarans::1. @authors Andrei Novikov (pyclustering@yandex.ru) @date 2014-2019 @copyright GNU Public License @cond GNU_PUBLIC_LICENSE PyClustering is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PyClustering is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. @endcond """ import random import scipy import itertools import graphviz import numpy as np from clustviz.pam import plot_pam from pyclustering.cluster.encoder import type_encoding from pyclustering.utils import euclidean_distance_square class clarans: """! @brief Class represents clustering algorithm CLARANS (a method for clustering objects for spatial data mining). """ def __init__(self, data, number_clusters, numlocal, maxneighbor): """! @brief Constructor of clustering algorithm CLARANS. @details The higher the value of maxneighbor, the closer is CLARANS to K-Medoids, and the longer is each search of a local minima. @param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple. @param[in] number_clusters (uint): Amount of clusters that should be allocated. @param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the problem). @param[in] maxneighbor (uint): The maximum number of neighbors examined. """ self.__pointer_data = data self.__numlocal = numlocal self.__maxneighbor = maxneighbor self.__number_clusters = number_clusters self.__clusters = [] self.__current = [] self.__belong = [] self.__optimal_medoids = [] self.__optimal_estimation = float("inf") self.__verify_arguments() def __verify_arguments(self): """! @brief Verify input parameters for the algorithm and throw exception in case of incorrectness. """ if len(self.__pointer_data) == 0: raise ValueError( "Input data is empty (size: '%d')." % len(self.__pointer_data) ) if self.__number_clusters <= 0: raise ValueError( "Amount of cluster (current value: '%d') for allocation should be greater than 0." % self.__number_clusters ) if self.__numlocal < 0: raise ValueError( "Local minima (current value: '%d') should be greater or equal to 0." % self.__numlocal ) if self.__maxneighbor < 0: raise ValueError( "Maximum number of neighbors (current value: '%d') should be greater or " "equal to 0." % self.__maxneighbor ) def process(self, plotting=False): """! @brief Performs cluster analysis in line with rules of CLARANS algorithm. @return (clarans) Returns itself (CLARANS instance). @see get_clusters() @see get_medoids() """ random.seed() # loop for a numlocal number of times for _ in range(0, self.__numlocal): print("numlocal: ", _) # set (current) random medoids self.__current = random.sample( range(0, len(self.__pointer_data)), self.__number_clusters ) # update clusters in line with random allocated medoids self.__update_clusters(self.__current) # optimize configuration self.__optimize_configuration() # obtain cost of current cluster configuration and compare it with the best obtained estimation = self.__calculate_estimation() if estimation < self.__optimal_estimation: print( "Better configuration found with medoids: {0} and cost: {1}".format( self.__current[:], estimation ) ) self.__optimal_medoids = self.__current[:] self.__optimal_estimation = estimation if plotting is True: self.__update_clusters(self.__optimal_medoids) plot_pam( self.__pointer_data, dict(zip(self.__optimal_medoids, self.__clusters)), ) else: print( "Configuration found does not improve current best one because its cost is {0}".format( estimation ) ) if plotting is True: self.__update_clusters(self.__current[:]) plot_pam( self.__pointer_data, dict(zip(self.__current[:], self.__clusters)), ) self.__update_clusters(self.__optimal_medoids) if plotting is True: print("FINAL RESULT:") plot_pam( self.__pointer_data, dict(zip(self.__optimal_medoids, self.__clusters)), ) return self def get_clusters(self): """! @brief Returns allocated clusters by the algorithm. @remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty list is returned. @return (list) List of allocated clusters, each cluster contains indexes of objects in list of data. @see process() @see get_medoids() """ return self.__clusters def get_medoids(self): """! @brief Returns list of medoids of allocated clusters. @see process() @see get_clusters() """ return self.__optimal_medoids def get_cluster_encoding(self): """! @brief Returns clustering result representation type that indicate how clusters are encoded. @return (type_encoding) Clustering result representation. @see get_clusters() """ return type_encoding.CLUSTER_INDEX_LIST_SEPARATION def __update_clusters(self, medoids): """! @brief Forms cluster in line with specified medoids by calculation distance from each point to medoids. """ self.__belong = [0] * len(self.__pointer_data) self.__clusters = [[] for _ in range(len(medoids))] for index_point in range(len(self.__pointer_data)): index_optim = -1 dist_optim = 0.0 for index in range(len(medoids)): dist = euclidean_distance_square( self.__pointer_data[index_point], self.__pointer_data[medoids[index]], ) if (dist < dist_optim) or (index == 0): index_optim = index dist_optim = dist self.__clusters[index_optim].append(index_point) self.__belong[index_point] = index_optim # If cluster is not able to capture object it should be removed self.__clusters = [ cluster for cluster in self.__clusters if len(cluster) > 0 ] def __optimize_configuration(self): """! @brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules. """ index_neighbor = 0 counter = 0 while index_neighbor < self.__maxneighbor: # get random current medoid that is to be replaced current_medoid_index = self.__current[ random.randint(0, self.__number_clusters - 1) ] current_medoid_cluster_index = self.__belong[current_medoid_index] # get new candidate to be medoid candidate_medoid_index = random.randint( 0, len(self.__pointer_data) - 1 ) while candidate_medoid_index in self.__current: candidate_medoid_index = random.randint( 0, len(self.__pointer_data) - 1 ) candidate_cost = 0.0 for point_index in range(0, len(self.__pointer_data)): if point_index not in self.__current: # get non-medoid point and its medoid point_cluster_index = self.__belong[point_index] point_medoid_index = self.__current[point_cluster_index] # get other medoid that is nearest to the point (except current and candidate) other_medoid_index = self.__find_another_nearest_medoid( point_index, current_medoid_index ) other_medoid_cluster_index = self.__belong[ other_medoid_index ] # for optimization calculate all required distances # from the point to current medoid distance_current = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[current_medoid_index], ) # from the point to candidate median distance_candidate = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index], ) # from the point to nearest (own) medoid distance_nearest = float("inf") if (point_medoid_index != candidate_medoid_index) and ( point_medoid_index != current_medoid_cluster_index ): distance_nearest = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[point_medoid_index], ) # apply rules for cost calculation if point_cluster_index == current_medoid_cluster_index: # case 1: if distance_candidate >= distance_nearest: candidate_cost += ( distance_nearest - distance_current ) # case 2: else: candidate_cost += ( distance_candidate - distance_current ) elif point_cluster_index == other_medoid_cluster_index: # case 3 ('nearest medoid' is the representative object of that cluster and object is more # similar to 'nearest' than to 'candidate'): if distance_candidate > distance_nearest:
# case 4: else: candidate_cost += ( distance_candidate - distance_nearest ) if candidate_cost < 0: counter += 1 # set candidate that has won self.__current[ current_medoid_cluster_index ] = candidate_medoid_index # recalculate clusters self.__update_clusters(self.__current) # reset iterations and starts investigation from the begining index_neighbor = 0 else: index_neighbor += 1 print("Medoid set changed {0} times".format(counter)) def __find_another_nearest_medoid(self, point_index, current_medoid_index): """! @brief Finds the another nearest medoid for the specified point that is different from the specified medoid. @param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids is performed. @param[in] current_medoid_index: index of medoid that shouldn't be considered as a nearest. @return (uint) index of the another nearest medoid for the point. """ other_medoid_index = -1 other_distance_nearest = float("inf") for index_medoid in self.__current: if index_medoid != current_medoid_index: other_distance_candidate = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[current_medoid_index], ) if other_distance_candidate < other_distance_nearest: other_distance_nearest = other_distance_candidate other_medoid_index = index_medoid return other_medoid_index def __calculate_estimation(self): """! @brief Calculates estimation (cost) of the current clusters. The lower the estimation, the more optimally configuration of clusters. @return (double) estimation of current clusters. """ estimation = 0.0 for index_cluster in range(0, len(self.__clusters)): cluster = self.__clusters[index_cluster] index_medoid = self.__current[index_cluster] for index_point in cluster: estimation += euclidean_distance_square( self.__pointer_data[index_point], self.__pointer_data[index_medoid], ) return estimation def compute_cost_clarans(data, _cur_choice): # modified from that of CLARA """A function to compute the configuration cost. :param data: The input dataframe. :param _cur_choice: The current set of medoid choices. :return: The total configuration cost, the medoids. """ total_cost = 0.0 medoids = {} for idx in _cur_choice: medoids[idx] = [] for i in list(data.index): choice = -1 min_cost = np.inf for m in medoids: # fast_euclidean from CLARA tmp = np.linalg.norm(data.loc[m] - data.loc[i]) if tmp < min_cost: choice = m min_cost = tmp medoids[choice].append(i) total_cost += min_cost # print("total_cost: ", total_cost) return total_cost, medoids def plot_tree_clarans(data, k): """ Plots G_{k,n} as in the paper of CLARANS; only to use with small input data. :param data: input DataFrame. :param k: number of points in each combination (possible set of medoids). """ n = len(data) num_points = int(scipy.special.binom(n, k)) num_neigh = k * (n - k) if (num_points > 50) or (num_neigh > 10): print( "Either graph nodes are more than 50 or neighbors are more than 10, the graph would be too big" ) return # all possibile combinations of k elements from input data name_nodes = list(itertools.combinations(list(data.index), k)) dot = graphviz.Digraph(comment="Clustering") # draw nodes, also adding the configuration cost for i in range(num_points): tot_cost, meds = compute_cost_clarans(data, list(name_nodes[i])) tc = round(tot_cost, 3) dot.node(str(name_nodes[i]), str(name_nodes[i]) + ": " + str(tc)) # only connect nodes if they have k-1 common elements for i in range(num_points): for j in range(num_points): if i != j: if ( len(set(list(name_nodes[i])) & set(list(name_nodes[j]))) == k - 1 ): dot.edge(str(name_nodes[i]), str(name_nodes[j])) graph = graphviz.Source(dot) # .view() display(graph)
pass
conditional_block
clarans.py
"""! @brief Cluster analysis algorithm: CLARANS. @details Implementation based on paper @cite article::clarans::1. @authors Andrei Novikov (pyclustering@yandex.ru) @date 2014-2019 @copyright GNU Public License @cond GNU_PUBLIC_LICENSE PyClustering is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PyClustering is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. @endcond """ import random import scipy import itertools import graphviz import numpy as np from clustviz.pam import plot_pam from pyclustering.cluster.encoder import type_encoding from pyclustering.utils import euclidean_distance_square class clarans: """! @brief Class represents clustering algorithm CLARANS (a method for clustering objects for spatial data mining). """ def __init__(self, data, number_clusters, numlocal, maxneighbor): """! @brief Constructor of clustering algorithm CLARANS. @details The higher the value of maxneighbor, the closer is CLARANS to K-Medoids, and the longer is each search of a local minima. @param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple. @param[in] number_clusters (uint): Amount of clusters that should be allocated. @param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the problem). @param[in] maxneighbor (uint): The maximum number of neighbors examined. """ self.__pointer_data = data self.__numlocal = numlocal self.__maxneighbor = maxneighbor self.__number_clusters = number_clusters self.__clusters = [] self.__current = [] self.__belong = [] self.__optimal_medoids = [] self.__optimal_estimation = float("inf") self.__verify_arguments() def __verify_arguments(self): """! @brief Verify input parameters for the algorithm and throw exception in case of incorrectness. """ if len(self.__pointer_data) == 0: raise ValueError( "Input data is empty (size: '%d')." % len(self.__pointer_data) ) if self.__number_clusters <= 0: raise ValueError( "Amount of cluster (current value: '%d') for allocation should be greater than 0." % self.__number_clusters ) if self.__numlocal < 0: raise ValueError( "Local minima (current value: '%d') should be greater or equal to 0." % self.__numlocal ) if self.__maxneighbor < 0: raise ValueError( "Maximum number of neighbors (current value: '%d') should be greater or " "equal to 0." % self.__maxneighbor ) def process(self, plotting=False): """! @brief Performs cluster analysis in line with rules of CLARANS algorithm. @return (clarans) Returns itself (CLARANS instance). @see get_clusters() @see get_medoids() """ random.seed() # loop for a numlocal number of times for _ in range(0, self.__numlocal): print("numlocal: ", _) # set (current) random medoids self.__current = random.sample( range(0, len(self.__pointer_data)), self.__number_clusters ) # update clusters in line with random allocated medoids self.__update_clusters(self.__current) # optimize configuration self.__optimize_configuration() # obtain cost of current cluster configuration and compare it with the best obtained estimation = self.__calculate_estimation() if estimation < self.__optimal_estimation: print( "Better configuration found with medoids: {0} and cost: {1}".format( self.__current[:], estimation ) ) self.__optimal_medoids = self.__current[:] self.__optimal_estimation = estimation if plotting is True: self.__update_clusters(self.__optimal_medoids) plot_pam( self.__pointer_data, dict(zip(self.__optimal_medoids, self.__clusters)), ) else: print( "Configuration found does not improve current best one because its cost is {0}".format( estimation ) ) if plotting is True: self.__update_clusters(self.__current[:]) plot_pam( self.__pointer_data, dict(zip(self.__current[:], self.__clusters)), ) self.__update_clusters(self.__optimal_medoids) if plotting is True: print("FINAL RESULT:") plot_pam( self.__pointer_data, dict(zip(self.__optimal_medoids, self.__clusters)), ) return self def get_clusters(self): """! @brief Returns allocated clusters by the algorithm. @remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty list is returned. @return (list) List of allocated clusters, each cluster contains indexes of objects in list of data. @see process() @see get_medoids() """ return self.__clusters def get_medoids(self): """! @brief Returns list of medoids of allocated clusters. @see process() @see get_clusters() """ return self.__optimal_medoids def get_cluster_encoding(self): """! @brief Returns clustering result representation type that indicate how clusters are encoded. @return (type_encoding) Clustering result representation. @see get_clusters() """ return type_encoding.CLUSTER_INDEX_LIST_SEPARATION def __update_clusters(self, medoids): """! @brief Forms cluster in line with specified medoids by calculation distance from each point to medoids. """ self.__belong = [0] * len(self.__pointer_data) self.__clusters = [[] for _ in range(len(medoids))] for index_point in range(len(self.__pointer_data)): index_optim = -1 dist_optim = 0.0 for index in range(len(medoids)): dist = euclidean_distance_square( self.__pointer_data[index_point], self.__pointer_data[medoids[index]], ) if (dist < dist_optim) or (index == 0): index_optim = index dist_optim = dist self.__clusters[index_optim].append(index_point) self.__belong[index_point] = index_optim # If cluster is not able to capture object it should be removed self.__clusters = [ cluster for cluster in self.__clusters if len(cluster) > 0 ] def __optimize_configuration(self): """! @brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules. """ index_neighbor = 0 counter = 0 while index_neighbor < self.__maxneighbor: # get random current medoid that is to be replaced current_medoid_index = self.__current[ random.randint(0, self.__number_clusters - 1) ] current_medoid_cluster_index = self.__belong[current_medoid_index] # get new candidate to be medoid candidate_medoid_index = random.randint( 0, len(self.__pointer_data) - 1 ) while candidate_medoid_index in self.__current: candidate_medoid_index = random.randint( 0, len(self.__pointer_data) - 1 ) candidate_cost = 0.0 for point_index in range(0, len(self.__pointer_data)): if point_index not in self.__current: # get non-medoid point and its medoid point_cluster_index = self.__belong[point_index] point_medoid_index = self.__current[point_cluster_index] # get other medoid that is nearest to the point (except current and candidate) other_medoid_index = self.__find_another_nearest_medoid( point_index, current_medoid_index ) other_medoid_cluster_index = self.__belong[ other_medoid_index ] # for optimization calculate all required distances # from the point to current medoid distance_current = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[current_medoid_index], ) # from the point to candidate median distance_candidate = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index], ) # from the point to nearest (own) medoid distance_nearest = float("inf") if (point_medoid_index != candidate_medoid_index) and ( point_medoid_index != current_medoid_cluster_index ): distance_nearest = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[point_medoid_index], ) # apply rules for cost calculation if point_cluster_index == current_medoid_cluster_index: # case 1: if distance_candidate >= distance_nearest: candidate_cost += ( distance_nearest - distance_current ) # case 2: else: candidate_cost += ( distance_candidate - distance_current ) elif point_cluster_index == other_medoid_cluster_index: # case 3 ('nearest medoid' is the representative object of that cluster and object is more # similar to 'nearest' than to 'candidate'): if distance_candidate > distance_nearest: pass # case 4: else: candidate_cost += ( distance_candidate - distance_nearest ) if candidate_cost < 0: counter += 1 # set candidate that has won self.__current[ current_medoid_cluster_index ] = candidate_medoid_index # recalculate clusters self.__update_clusters(self.__current) # reset iterations and starts investigation from the begining index_neighbor = 0 else: index_neighbor += 1 print("Medoid set changed {0} times".format(counter)) def __find_another_nearest_medoid(self, point_index, current_medoid_index): """! @brief Finds the another nearest medoid for the specified point that is different from the specified medoid. @param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids is performed. @param[in] current_medoid_index: index of medoid that shouldn't be considered as a nearest. @return (uint) index of the another nearest medoid for the point. """ other_medoid_index = -1 other_distance_nearest = float("inf") for index_medoid in self.__current: if index_medoid != current_medoid_index: other_distance_candidate = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[current_medoid_index], ) if other_distance_candidate < other_distance_nearest: other_distance_nearest = other_distance_candidate other_medoid_index = index_medoid return other_medoid_index def __calculate_estimation(self): """! @brief Calculates estimation (cost) of the current clusters. The lower the estimation, the more optimally configuration of clusters. @return (double) estimation of current clusters. """ estimation = 0.0 for index_cluster in range(0, len(self.__clusters)): cluster = self.__clusters[index_cluster] index_medoid = self.__current[index_cluster] for index_point in cluster: estimation += euclidean_distance_square( self.__pointer_data[index_point], self.__pointer_data[index_medoid], ) return estimation def
(data, _cur_choice): # modified from that of CLARA """A function to compute the configuration cost. :param data: The input dataframe. :param _cur_choice: The current set of medoid choices. :return: The total configuration cost, the medoids. """ total_cost = 0.0 medoids = {} for idx in _cur_choice: medoids[idx] = [] for i in list(data.index): choice = -1 min_cost = np.inf for m in medoids: # fast_euclidean from CLARA tmp = np.linalg.norm(data.loc[m] - data.loc[i]) if tmp < min_cost: choice = m min_cost = tmp medoids[choice].append(i) total_cost += min_cost # print("total_cost: ", total_cost) return total_cost, medoids def plot_tree_clarans(data, k): """ Plots G_{k,n} as in the paper of CLARANS; only to use with small input data. :param data: input DataFrame. :param k: number of points in each combination (possible set of medoids). """ n = len(data) num_points = int(scipy.special.binom(n, k)) num_neigh = k * (n - k) if (num_points > 50) or (num_neigh > 10): print( "Either graph nodes are more than 50 or neighbors are more than 10, the graph would be too big" ) return # all possibile combinations of k elements from input data name_nodes = list(itertools.combinations(list(data.index), k)) dot = graphviz.Digraph(comment="Clustering") # draw nodes, also adding the configuration cost for i in range(num_points): tot_cost, meds = compute_cost_clarans(data, list(name_nodes[i])) tc = round(tot_cost, 3) dot.node(str(name_nodes[i]), str(name_nodes[i]) + ": " + str(tc)) # only connect nodes if they have k-1 common elements for i in range(num_points): for j in range(num_points): if i != j: if ( len(set(list(name_nodes[i])) & set(list(name_nodes[j]))) == k - 1 ): dot.edge(str(name_nodes[i]), str(name_nodes[j])) graph = graphviz.Source(dot) # .view() display(graph)
compute_cost_clarans
identifier_name
clarans.py
@brief Cluster analysis algorithm: CLARANS. @details Implementation based on paper @cite article::clarans::1. @authors Andrei Novikov (pyclustering@yandex.ru) @date 2014-2019 @copyright GNU Public License @cond GNU_PUBLIC_LICENSE PyClustering is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PyClustering is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. @endcond """ import random import scipy import itertools import graphviz import numpy as np from clustviz.pam import plot_pam from pyclustering.cluster.encoder import type_encoding from pyclustering.utils import euclidean_distance_square class clarans: """! @brief Class represents clustering algorithm CLARANS (a method for clustering objects for spatial data mining). """ def __init__(self, data, number_clusters, numlocal, maxneighbor): """! @brief Constructor of clustering algorithm CLARANS. @details The higher the value of maxneighbor, the closer is CLARANS to K-Medoids, and the longer is each search of a local minima. @param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple. @param[in] number_clusters (uint): Amount of clusters that should be allocated. @param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the problem). @param[in] maxneighbor (uint): The maximum number of neighbors examined. """ self.__pointer_data = data self.__numlocal = numlocal self.__maxneighbor = maxneighbor self.__number_clusters = number_clusters self.__clusters = [] self.__current = [] self.__belong = [] self.__optimal_medoids = [] self.__optimal_estimation = float("inf") self.__verify_arguments() def __verify_arguments(self): """! @brief Verify input parameters for the algorithm and throw exception in case of incorrectness. """ if len(self.__pointer_data) == 0: raise ValueError( "Input data is empty (size: '%d')." % len(self.__pointer_data) ) if self.__number_clusters <= 0: raise ValueError( "Amount of cluster (current value: '%d') for allocation should be greater than 0." % self.__number_clusters ) if self.__numlocal < 0: raise ValueError( "Local minima (current value: '%d') should be greater or equal to 0." % self.__numlocal ) if self.__maxneighbor < 0: raise ValueError( "Maximum number of neighbors (current value: '%d') should be greater or " "equal to 0." % self.__maxneighbor ) def process(self, plotting=False): """! @brief Performs cluster analysis in line with rules of CLARANS algorithm. @return (clarans) Returns itself (CLARANS instance). @see get_clusters() @see get_medoids() """ random.seed() # loop for a numlocal number of times for _ in range(0, self.__numlocal): print("numlocal: ", _) # set (current) random medoids self.__current = random.sample( range(0, len(self.__pointer_data)), self.__number_clusters ) # update clusters in line with random allocated medoids self.__update_clusters(self.__current) # optimize configuration self.__optimize_configuration() # obtain cost of current cluster configuration and compare it with the best obtained estimation = self.__calculate_estimation() if estimation < self.__optimal_estimation: print( "Better configuration found with medoids: {0} and cost: {1}".format( self.__current[:], estimation ) ) self.__optimal_medoids = self.__current[:] self.__optimal_estimation = estimation if plotting is True: self.__update_clusters(self.__optimal_medoids) plot_pam( self.__pointer_data, dict(zip(self.__optimal_medoids, self.__clusters)), ) else: print( "Configuration found does not improve current best one because its cost is {0}".format( estimation ) ) if plotting is True: self.__update_clusters(self.__current[:]) plot_pam( self.__pointer_data, dict(zip(self.__current[:], self.__clusters)), ) self.__update_clusters(self.__optimal_medoids) if plotting is True: print("FINAL RESULT:") plot_pam( self.__pointer_data, dict(zip(self.__optimal_medoids, self.__clusters)), ) return self def get_clusters(self): """! @brief Returns allocated clusters by the algorithm. @remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty list is returned. @return (list) List of allocated clusters, each cluster contains indexes of objects in list of data. @see process() @see get_medoids() """ return self.__clusters def get_medoids(self): """! @brief Returns list of medoids of allocated clusters. @see process() @see get_clusters() """ return self.__optimal_medoids def get_cluster_encoding(self): """! @brief Returns clustering result representation type that indicate how clusters are encoded. @return (type_encoding) Clustering result representation. @see get_clusters() """ return type_encoding.CLUSTER_INDEX_LIST_SEPARATION def __update_clusters(self, medoids): """! @brief Forms cluster in line with specified medoids by calculation distance from each point to medoids. """ self.__belong = [0] * len(self.__pointer_data) self.__clusters = [[] for _ in range(len(medoids))] for index_point in range(len(self.__pointer_data)): index_optim = -1 dist_optim = 0.0 for index in range(len(medoids)): dist = euclidean_distance_square( self.__pointer_data[index_point], self.__pointer_data[medoids[index]], ) if (dist < dist_optim) or (index == 0): index_optim = index dist_optim = dist self.__clusters[index_optim].append(index_point) self.__belong[index_point] = index_optim # If cluster is not able to capture object it should be removed self.__clusters = [ cluster for cluster in self.__clusters if len(cluster) > 0 ] def __optimize_configuration(self): """! @brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules. """ index_neighbor = 0 counter = 0 while index_neighbor < self.__maxneighbor: # get random current medoid that is to be replaced current_medoid_index = self.__current[ random.randint(0, self.__number_clusters - 1) ] current_medoid_cluster_index = self.__belong[current_medoid_index] # get new candidate to be medoid candidate_medoid_index = random.randint( 0, len(self.__pointer_data) - 1 ) while candidate_medoid_index in self.__current: candidate_medoid_index = random.randint( 0, len(self.__pointer_data) - 1 ) candidate_cost = 0.0 for point_index in range(0, len(self.__pointer_data)): if point_index not in self.__current: # get non-medoid point and its medoid point_cluster_index = self.__belong[point_index] point_medoid_index = self.__current[point_cluster_index] # get other medoid that is nearest to the point (except current and candidate) other_medoid_index = self.__find_another_nearest_medoid( point_index, current_medoid_index ) other_medoid_cluster_index = self.__belong[ other_medoid_index ] # for optimization calculate all required distances # from the point to current medoid distance_current = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[current_medoid_index], ) # from the point to candidate median distance_candidate = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index], ) # from the point to nearest (own) medoid distance_nearest = float("inf") if (point_medoid_index != candidate_medoid_index) and ( point_medoid_index != current_medoid_cluster_index ): distance_nearest = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[point_medoid_index], ) # apply rules for cost calculation if point_cluster_index == current_medoid_cluster_index: # case 1: if distance_candidate >= distance_nearest: candidate_cost += ( distance_nearest - distance_current ) # case 2: else: candidate_cost += ( distance_candidate - distance_current ) elif point_cluster_index == other_medoid_cluster_index: # case 3 ('nearest medoid' is the representative object of that cluster and object is more # similar to 'nearest' than to 'candidate'): if distance_candidate > distance_nearest: pass # case 4: else: candidate_cost += ( distance_candidate - distance_nearest ) if candidate_cost < 0: counter += 1 # set candidate that has won self.__current[ current_medoid_cluster_index ] = candidate_medoid_index # recalculate clusters self.__update_clusters(self.__current) # reset iterations and starts investigation from the begining index_neighbor = 0 else: index_neighbor += 1 print("Medoid set changed {0} times".format(counter)) def __find_another_nearest_medoid(self, point_index, current_medoid_index): """! @brief Finds the another nearest medoid for the specified point that is different from the specified medoid. @param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids is performed. @param[in] current_medoid_index: index of medoid that shouldn't be considered as a nearest. @return (uint) index of the another nearest medoid for the point. """ other_medoid_index = -1 other_distance_nearest = float("inf") for index_medoid in self.__current: if index_medoid != current_medoid_index: other_distance_candidate = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[current_medoid_index], ) if other_distance_candidate < other_distance_nearest: other_distance_nearest = other_distance_candidate other_medoid_index = index_medoid return other_medoid_index def __calculate_estimation(self): """! @brief Calculates estimation (cost) of the current clusters. The lower the estimation, the more optimally configuration of clusters. @return (double) estimation of current clusters. """ estimation = 0.0 for index_cluster in range(0, len(self.__clusters)): cluster = self.__clusters[index_cluster] index_medoid = self.__current[index_cluster] for index_point in cluster: estimation += euclidean_distance_square( self.__pointer_data[index_point], self.__pointer_data[index_medoid], ) return estimation def compute_cost_clarans(data, _cur_choice): # modified from that of CLARA """A function to compute the configuration cost. :param data: The input dataframe. :param _cur_choice: The current set of medoid choices. :return: The total configuration cost, the medoids. """ total_cost = 0.0 medoids = {} for idx in _cur_choice: medoids[idx] = [] for i in list(data.index): choice = -1 min_cost = np.inf for m in medoids: # fast_euclidean from CLARA tmp = np.linalg.norm(data.loc[m] - data.loc[i]) if tmp < min_cost: choice = m min_cost = tmp medoids[choice].append(i) total_cost += min_cost # print("total_cost: ", total_cost) return total_cost, medoids def plot_tree_clarans(data, k): """ Plots G_{k,n} as in the paper of CLARANS; only to use with small input data. :param data: input DataFrame. :param k: number of points in each combination (possible set of medoids). """ n = len(data) num_points = int(scipy.special.binom(n, k)) num_neigh = k * (n - k) if (num_points > 50) or (num_neigh > 10): print( "Either graph nodes are more than 50 or neighbors are more than 10, the graph would be too big" ) return # all possibile combinations of k elements from input data name_nodes = list(itertools.combinations(list(data.index), k)) dot = graphviz.Digraph(comment="Clustering") # draw nodes, also adding the configuration cost for i in range(num_points): tot_cost, meds = compute_cost_clarans(data, list(name_nodes[i])) tc = round(tot_cost, 3) dot.node(str(name_nodes[i]), str(name_nodes[i]) + ": " + str(tc)) # only connect nodes if they have k-1 common elements for i in range(num_points): for j in range(num_points): if i != j: if ( len(set(list(name_nodes[i])) & set(list(name_nodes[j]))) == k - 1 ): dot.edge(str(name_nodes[i]), str(name_nodes[j])) graph = graphviz.Source(dot) # .view() display(graph)
"""!
random_line_split
clarans.py
"""! @brief Cluster analysis algorithm: CLARANS. @details Implementation based on paper @cite article::clarans::1. @authors Andrei Novikov (pyclustering@yandex.ru) @date 2014-2019 @copyright GNU Public License @cond GNU_PUBLIC_LICENSE PyClustering is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PyClustering is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. @endcond """ import random import scipy import itertools import graphviz import numpy as np from clustviz.pam import plot_pam from pyclustering.cluster.encoder import type_encoding from pyclustering.utils import euclidean_distance_square class clarans: """! @brief Class represents clustering algorithm CLARANS (a method for clustering objects for spatial data mining). """ def __init__(self, data, number_clusters, numlocal, maxneighbor): """! @brief Constructor of clustering algorithm CLARANS. @details The higher the value of maxneighbor, the closer is CLARANS to K-Medoids, and the longer is each search of a local minima. @param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple. @param[in] number_clusters (uint): Amount of clusters that should be allocated. @param[in] numlocal (uint): The number of local minima obtained (amount of iterations for solving the problem). @param[in] maxneighbor (uint): The maximum number of neighbors examined. """ self.__pointer_data = data self.__numlocal = numlocal self.__maxneighbor = maxneighbor self.__number_clusters = number_clusters self.__clusters = [] self.__current = [] self.__belong = [] self.__optimal_medoids = [] self.__optimal_estimation = float("inf") self.__verify_arguments() def __verify_arguments(self): """! @brief Verify input parameters for the algorithm and throw exception in case of incorrectness. """ if len(self.__pointer_data) == 0: raise ValueError( "Input data is empty (size: '%d')." % len(self.__pointer_data) ) if self.__number_clusters <= 0: raise ValueError( "Amount of cluster (current value: '%d') for allocation should be greater than 0." % self.__number_clusters ) if self.__numlocal < 0: raise ValueError( "Local minima (current value: '%d') should be greater or equal to 0." % self.__numlocal ) if self.__maxneighbor < 0: raise ValueError( "Maximum number of neighbors (current value: '%d') should be greater or " "equal to 0." % self.__maxneighbor ) def process(self, plotting=False):
def get_clusters(self): """! @brief Returns allocated clusters by the algorithm. @remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty list is returned. @return (list) List of allocated clusters, each cluster contains indexes of objects in list of data. @see process() @see get_medoids() """ return self.__clusters def get_medoids(self): """! @brief Returns list of medoids of allocated clusters. @see process() @see get_clusters() """ return self.__optimal_medoids def get_cluster_encoding(self): """! @brief Returns clustering result representation type that indicate how clusters are encoded. @return (type_encoding) Clustering result representation. @see get_clusters() """ return type_encoding.CLUSTER_INDEX_LIST_SEPARATION def __update_clusters(self, medoids): """! @brief Forms cluster in line with specified medoids by calculation distance from each point to medoids. """ self.__belong = [0] * len(self.__pointer_data) self.__clusters = [[] for _ in range(len(medoids))] for index_point in range(len(self.__pointer_data)): index_optim = -1 dist_optim = 0.0 for index in range(len(medoids)): dist = euclidean_distance_square( self.__pointer_data[index_point], self.__pointer_data[medoids[index]], ) if (dist < dist_optim) or (index == 0): index_optim = index dist_optim = dist self.__clusters[index_optim].append(index_point) self.__belong[index_point] = index_optim # If cluster is not able to capture object it should be removed self.__clusters = [ cluster for cluster in self.__clusters if len(cluster) > 0 ] def __optimize_configuration(self): """! @brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules. """ index_neighbor = 0 counter = 0 while index_neighbor < self.__maxneighbor: # get random current medoid that is to be replaced current_medoid_index = self.__current[ random.randint(0, self.__number_clusters - 1) ] current_medoid_cluster_index = self.__belong[current_medoid_index] # get new candidate to be medoid candidate_medoid_index = random.randint( 0, len(self.__pointer_data) - 1 ) while candidate_medoid_index in self.__current: candidate_medoid_index = random.randint( 0, len(self.__pointer_data) - 1 ) candidate_cost = 0.0 for point_index in range(0, len(self.__pointer_data)): if point_index not in self.__current: # get non-medoid point and its medoid point_cluster_index = self.__belong[point_index] point_medoid_index = self.__current[point_cluster_index] # get other medoid that is nearest to the point (except current and candidate) other_medoid_index = self.__find_another_nearest_medoid( point_index, current_medoid_index ) other_medoid_cluster_index = self.__belong[ other_medoid_index ] # for optimization calculate all required distances # from the point to current medoid distance_current = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[current_medoid_index], ) # from the point to candidate median distance_candidate = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index], ) # from the point to nearest (own) medoid distance_nearest = float("inf") if (point_medoid_index != candidate_medoid_index) and ( point_medoid_index != current_medoid_cluster_index ): distance_nearest = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[point_medoid_index], ) # apply rules for cost calculation if point_cluster_index == current_medoid_cluster_index: # case 1: if distance_candidate >= distance_nearest: candidate_cost += ( distance_nearest - distance_current ) # case 2: else: candidate_cost += ( distance_candidate - distance_current ) elif point_cluster_index == other_medoid_cluster_index: # case 3 ('nearest medoid' is the representative object of that cluster and object is more # similar to 'nearest' than to 'candidate'): if distance_candidate > distance_nearest: pass # case 4: else: candidate_cost += ( distance_candidate - distance_nearest ) if candidate_cost < 0: counter += 1 # set candidate that has won self.__current[ current_medoid_cluster_index ] = candidate_medoid_index # recalculate clusters self.__update_clusters(self.__current) # reset iterations and starts investigation from the begining index_neighbor = 0 else: index_neighbor += 1 print("Medoid set changed {0} times".format(counter)) def __find_another_nearest_medoid(self, point_index, current_medoid_index): """! @brief Finds the another nearest medoid for the specified point that is different from the specified medoid. @param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids is performed. @param[in] current_medoid_index: index of medoid that shouldn't be considered as a nearest. @return (uint) index of the another nearest medoid for the point. """ other_medoid_index = -1 other_distance_nearest = float("inf") for index_medoid in self.__current: if index_medoid != current_medoid_index: other_distance_candidate = euclidean_distance_square( self.__pointer_data[point_index], self.__pointer_data[current_medoid_index], ) if other_distance_candidate < other_distance_nearest: other_distance_nearest = other_distance_candidate other_medoid_index = index_medoid return other_medoid_index def __calculate_estimation(self): """! @brief Calculates estimation (cost) of the current clusters. The lower the estimation, the more optimally configuration of clusters. @return (double) estimation of current clusters. """ estimation = 0.0 for index_cluster in range(0, len(self.__clusters)): cluster = self.__clusters[index_cluster] index_medoid = self.__current[index_cluster] for index_point in cluster: estimation += euclidean_distance_square( self.__pointer_data[index_point], self.__pointer_data[index_medoid], ) return estimation def compute_cost_clarans(data, _cur_choice): # modified from that of CLARA """A function to compute the configuration cost. :param data: The input dataframe. :param _cur_choice: The current set of medoid choices. :return: The total configuration cost, the medoids. """ total_cost = 0.0 medoids = {} for idx in _cur_choice: medoids[idx] = [] for i in list(data.index): choice = -1 min_cost = np.inf for m in medoids: # fast_euclidean from CLARA tmp = np.linalg.norm(data.loc[m] - data.loc[i]) if tmp < min_cost: choice = m min_cost = tmp medoids[choice].append(i) total_cost += min_cost # print("total_cost: ", total_cost) return total_cost, medoids def plot_tree_clarans(data, k): """ Plots G_{k,n} as in the paper of CLARANS; only to use with small input data. :param data: input DataFrame. :param k: number of points in each combination (possible set of medoids). """ n = len(data) num_points = int(scipy.special.binom(n, k)) num_neigh = k * (n - k) if (num_points > 50) or (num_neigh > 10): print( "Either graph nodes are more than 50 or neighbors are more than 10, the graph would be too big" ) return # all possibile combinations of k elements from input data name_nodes = list(itertools.combinations(list(data.index), k)) dot = graphviz.Digraph(comment="Clustering") # draw nodes, also adding the configuration cost for i in range(num_points): tot_cost, meds = compute_cost_clarans(data, list(name_nodes[i])) tc = round(tot_cost, 3) dot.node(str(name_nodes[i]), str(name_nodes[i]) + ": " + str(tc)) # only connect nodes if they have k-1 common elements for i in range(num_points): for j in range(num_points): if i != j: if ( len(set(list(name_nodes[i])) & set(list(name_nodes[j]))) == k - 1 ): dot.edge(str(name_nodes[i]), str(name_nodes[j])) graph = graphviz.Source(dot) # .view() display(graph)
"""! @brief Performs cluster analysis in line with rules of CLARANS algorithm. @return (clarans) Returns itself (CLARANS instance). @see get_clusters() @see get_medoids() """ random.seed() # loop for a numlocal number of times for _ in range(0, self.__numlocal): print("numlocal: ", _) # set (current) random medoids self.__current = random.sample( range(0, len(self.__pointer_data)), self.__number_clusters ) # update clusters in line with random allocated medoids self.__update_clusters(self.__current) # optimize configuration self.__optimize_configuration() # obtain cost of current cluster configuration and compare it with the best obtained estimation = self.__calculate_estimation() if estimation < self.__optimal_estimation: print( "Better configuration found with medoids: {0} and cost: {1}".format( self.__current[:], estimation ) ) self.__optimal_medoids = self.__current[:] self.__optimal_estimation = estimation if plotting is True: self.__update_clusters(self.__optimal_medoids) plot_pam( self.__pointer_data, dict(zip(self.__optimal_medoids, self.__clusters)), ) else: print( "Configuration found does not improve current best one because its cost is {0}".format( estimation ) ) if plotting is True: self.__update_clusters(self.__current[:]) plot_pam( self.__pointer_data, dict(zip(self.__current[:], self.__clusters)), ) self.__update_clusters(self.__optimal_medoids) if plotting is True: print("FINAL RESULT:") plot_pam( self.__pointer_data, dict(zip(self.__optimal_medoids, self.__clusters)), ) return self
identifier_body
mod.rs
// Copyright (c) The XPeer Core Contributors // SPDX-License-Identifier: Apache-2.0 //! In a leader based consensus algorithm, each participant maintains a block tree that looks like //! the following: //! ```text //! Height 5 6 7 ... //! //! Committed -> B5 -> B6 -> B7 //! | //! └--> B5' -> B6' -> B7' //! | //! └----> B7" //! ``` //! This module implements `BlockTree` that is an in-memory representation of this tree. #[cfg(test)] mod block_tree_test; use crypto::HashValue; use failure::bail_err; use std::collections::{hash_map, HashMap, HashSet}; /// Each block has a unique identifier that is a `HashValue` computed by consensus. It has exactly /// one parent and zero or more children. pub trait Block: std::fmt::Debug { /// The output of executing this block. type Output; /// The signatures on this block. type Signature; /// Whether consensus has decided to commit this block. This kind of blocks are expected to be /// sent to storage very soon, unless execution is lagging behind. fn is_committed(&self) -> bool; /// Marks this block as committed. fn set_committed(&mut self); /// Whether this block has finished execution. fn is_executed(&self) -> bool; /// Sets the output of this block. fn set_output(&mut self, output: Self::Output); /// Sets the signatures for this block. fn set_signature(&mut self, signature: Self::Signature); /// The id of this block. fn id(&self) -> HashValue; /// The id of the parent block. fn parent_id(&self) -> HashValue; /// Adds a block as its child. fn add_child(&mut self, child_id: HashValue); /// The list of children of this block. fn children(&self) -> &HashSet<HashValue>; } /// The `BlockTree` implementation. #[derive(Debug)] pub struct BlockTree<B> { /// A map that keeps track of all existing blocks by their ids. id_to_block: HashMap<HashValue, B>, /// The blocks at the lowest height in the map. B5 and B5' in the following example. /// ```text /// Committed(B0..4) -> B5 -> B6 -> B7 /// | /// └--> B5' -> B6' -> B7' /// | /// └----> B7" /// ``` heads: HashSet<HashValue>, /// Id of the last committed block. B4 in the above example. last_committed_id: HashValue, } impl<B> BlockTree<B> where B: Block, { /// Constructs a new `BlockTree`. pub fn new(last_committed_id: HashValue) -> Self { BlockTree { id_to_block: HashMap::new(), heads: HashSet::new(), last_committed_id, } } /// Adds a new block to the tree. pub fn add_block(&mut self, block: B) -> Result<(), AddBlockError<B>> {
/ Returns a reference to a specific block, if it exists in the tree. pub fn get_block(&self, id: HashValue) -> Option<&B> { self.id_to_block.get(&id) } /// Returns a mutable reference to a specific block, if it exists in the tree. pub fn get_block_mut(&mut self, id: HashValue) -> Option<&mut B> { self.id_to_block.get_mut(&id) } /// Returns id of a block that is ready to be sent to VM for execution (its parent has finished /// execution), if such block exists in the tree. pub fn get_block_to_execute(&mut self) -> Option<HashValue> { let mut to_visit: Vec<HashValue> = self.heads.iter().cloned().collect(); while let Some(id) = to_visit.pop() { let block = self .id_to_block .get(&id) .expect("Missing block in id_to_block."); if !block.is_executed() { return Some(id); } to_visit.extend(block.children().iter().cloned()); } None } /// Marks given block and all its uncommitted ancestors as committed. This does not cause these /// blocks to be sent to storage immediately. pub fn mark_as_committed( &mut self, id: HashValue, signature: B::Signature, ) -> Result<(), CommitBlockError> { // First put the signatures in the block. Note that if this causes multiple blocks to be // marked as committed, only the last one will have the signatures. match self.id_to_block.get_mut(&id) { Some(block) => { if block.is_committed() { bail_err!(CommitBlockError::BlockAlreadyMarkedAsCommitted { id }); } else { block.set_signature(signature); } } None => bail_err!(CommitBlockError::BlockNotFound { id }), } // Mark the current block as committed. Go to parent block and repeat until a committed // block is found, or no more blocks. let mut current_id = id; while let Some(block) = self.id_to_block.get_mut(&current_id) { if block.is_committed() { break; } block.set_committed(); current_id = block.parent_id(); } Ok(()) } /// Removes all blocks in the tree that conflict with committed blocks. Returns a list of /// blocks that are ready to be sent to storage (all the committed blocks that have been /// executed). pub fn prune(&mut self) -> Vec<B> { let mut blocks_to_store = vec![]; // First find if there is a committed block in current heads. Since these blocks are at the // same height, at most one of them can be committed. If all of them are pending we have // nothing to do here. Otherwise, one of the branches is committed. Throw away the rest of // them and advance to the next height. let mut current_heads = self.heads.clone(); while let Some(committed_head) = self.get_committed_head(&current_heads) { assert!( current_heads.remove(&committed_head), "committed_head should exist.", ); for id in current_heads { self.remove_branch(id); } match self.id_to_block.entry(committed_head) { hash_map::Entry::Occupied(entry) => { current_heads = entry.get().children().clone(); let current_id = *entry.key(); let parent_id = entry.get().parent_id(); if entry.get().is_executed() { // If this block has been executed, all its proper ancestors must have // finished execution and present in `blocks_to_store`. self.heads = current_heads.clone(); self.last_committed_id = current_id; blocks_to_store.push(entry.remove()); } else { // The current block has not finished execution. If the parent block does // not exist in the map, that means parent block (also committed) has been // executed and removed. Otherwise self.heads does not need to be changed. if !self.id_to_block.contains_key(&parent_id) { self.heads = HashSet::new(); self.heads.insert(current_id); } } } hash_map::Entry::Vacant(_) => unreachable!("committed_head_id should exist."), } } blocks_to_store } /// Given a list of heads, returns the committed one if it exists. fn get_committed_head(&self, heads: &HashSet<HashValue>) -> Option<HashValue> { let mut committed_head = None; for head in heads { let block = self .id_to_block .get(head) .expect("Head should exist in id_to_block."); if block.is_committed() { assert!( committed_head.is_none(), "Conflicting blocks are both committed.", ); committed_head = Some(*head); } } committed_head } /// Removes a branch at block `head`. fn remove_branch(&mut self, head: HashValue) { let mut remaining = vec![head]; while let Some(current_block_id) = remaining.pop() { let block = self .id_to_block .remove(&current_block_id) .unwrap_or_else(|| { panic!( "Trying to remove a non-existing block {:x}.", current_block_id, ) }); assert!( !block.is_committed(), "Trying to remove a committed block {:x}.", current_block_id, ); remaining.extend(block.children().iter()); } } /// Removes the entire subtree at block `id`. pub fn remove_subtree(&mut self, id: HashValue) { self.heads.remove(&id); self.remove_branch(id); } /// Resets the block tree with a new `last_committed_id`. This removes all the in-memory /// blocks. pub fn reset(&mut self, last_committed_id: HashValue) { let mut new_block_tree = BlockTree::new(last_committed_id); std::mem::swap(self, &mut new_block_tree); } } /// An error returned by `add_block`. The error contains the block being added so the caller does /// not lose it. #[derive(Debug, Eq, PartialEq)] pub enum AddBlockError<B: Block> { ParentNotFound { block: B }, BlockAlreadyExists { block: B }, } impl<B> AddBlockError<B> where B: Block, { pub fn into_block(self) -> B { match self { AddBlockError::ParentNotFound { block } => block, AddBlockError::BlockAlreadyExists { block } => block, } } } impl<B> std::fmt::Display for AddBlockError<B> where B: Block, { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { AddBlockError::ParentNotFound { block } => { write!(f, "Parent block {:x} was not found.", block.parent_id()) } AddBlockError::BlockAlreadyExists { block } => { write!(f, "Block {:x} already exists.", block.id()) } } } } /// An error returned by `mark_as_committed`. The error contains id of the block the caller wants /// to commit. #[derive(Debug, Eq, PartialEq)] pub enum CommitBlockError { BlockNotFound { id: HashValue }, BlockAlreadyMarkedAsCommitted { id: HashValue }, } impl std::fmt::Display for CommitBlockError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { CommitBlockError::BlockNotFound { id } => write!(f, "Block {:x} was not found.", id), CommitBlockError::BlockAlreadyMarkedAsCommitted { id } => { write!(f, "Block {:x} was already marked as committed.", id) } } } }
assert!(!self.id_to_block.contains_key(&self.last_committed_id)); let id = block.id(); if self.id_to_block.contains_key(&id) { bail_err!(AddBlockError::BlockAlreadyExists { block }); } let parent_id = block.parent_id(); if parent_id == self.last_committed_id { assert!(self.heads.insert(id), "Block already existed in heads."); self.id_to_block.insert(id, block); return Ok(()); } match self.id_to_block.entry(parent_id) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().add_child(id); assert!( self.id_to_block.insert(id, block).is_none(), "Block {:x} already existed.", id, ); } hash_map::Entry::Vacant(_) => bail_err!(AddBlockError::ParentNotFound { block }), } Ok(()) } //
identifier_body
mod.rs
// Copyright (c) The XPeer Core Contributors // SPDX-License-Identifier: Apache-2.0 //! In a leader based consensus algorithm, each participant maintains a block tree that looks like //! the following: //! ```text //! Height 5 6 7 ... //! //! Committed -> B5 -> B6 -> B7 //! | //! └--> B5' -> B6' -> B7' //! | //! └----> B7" //! ``` //! This module implements `BlockTree` that is an in-memory representation of this tree. #[cfg(test)] mod block_tree_test; use crypto::HashValue; use failure::bail_err; use std::collections::{hash_map, HashMap, HashSet}; /// Each block has a unique identifier that is a `HashValue` computed by consensus. It has exactly /// one parent and zero or more children. pub trait Block: std::fmt::Debug { /// The output of executing this block. type Output; /// The signatures on this block. type Signature; /// Whether consensus has decided to commit this block. This kind of blocks are expected to be /// sent to storage very soon, unless execution is lagging behind. fn is_committed(&self) -> bool; /// Marks this block as committed. fn set_committed(&mut self); /// Whether this block has finished execution. fn is_executed(&self) -> bool; /// Sets the output of this block. fn set_output(&mut self, output: Self::Output); /// Sets the signatures for this block. fn set_signature(&mut self, signature: Self::Signature); /// The id of this block. fn id(&self) -> HashValue; /// The id of the parent block. fn parent_id(&self) -> HashValue; /// Adds a block as its child. fn add_child(&mut self, child_id: HashValue); /// The list of children of this block. fn children(&self) -> &HashSet<HashValue>; } /// The `BlockTree` implementation. #[derive(Debug)] pub struct BlockTree<B> { /// A map that keeps track of all existing blocks by their ids. id_to_block: HashMap<HashValue, B>, /// The blocks at the lowest height in the map. B5 and B5' in the following example. /// ```text /// Committed(B0..4) -> B5 -> B6 -> B7 /// | /// └--> B5' -> B6' -> B7' /// | /// └----> B7" /// ``` heads: HashSet<HashValue>, /// Id of the last committed block. B4 in the above example. last_committed_id: HashValue, } impl<B> BlockTree<B> where B: Block, { /// Constructs a new `BlockTree`. pub fn new(last_committed_id: HashValue) -> Self { BlockTree { id_to_block: HashMap::new(), heads: HashSet::new(), last_committed_id, } } /// Adds a new block to the tree. pub fn add_block(&mut self, block: B) -> Result<(), AddBlockError<B>> { assert!(!self.id_to_block.contains_key(&self.last_committed_id)); let id = block.id(); if self.id_to_block.contains_key(&id) { bail_err!(AddBlockError::BlockAlreadyExists { block }); } let parent_id = block.parent_id(); if parent_id == self.last_committed_id { assert!(self.heads.insert(id), "Block already existed in heads."); self.id_to_block.insert(id, block); return Ok(()); } match self.id_to_block.entry(parent_id) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().add_child(id); assert!( self.id_to_block.insert(id, block).is_none(), "Block {:x} already existed.", id, ); } hash_map::Entry::Vacant(_) => bail_err!(AddBlockError::ParentNotFound { block }), } Ok(()) } /// Returns a reference to a specific block, if it exists in the tree. pub fn get_block(&self, id: HashValue) -> Option<&B> { self.id_to_block.get(&id) } /// Returns a mutable reference to a specific block, if it exists in the tree. pub fn get_block_mut(&mut self, id: HashValue) -> Option<&mut B> { self.id_to_block.get_mut(&id) } /// Returns id of a block that is ready to be sent to VM for execution (its parent has finished /// execution), if such block exists in the tree. pub fn get_block_to_execute(&mut self) -> Option<HashValue> { let mut to_visit: Vec<HashValue> = self.heads.iter().cloned().collect(); while let Some(id) = to_visit.pop() { let block = self .id_to_block .get(&id) .expect("Missing block in id_to_block."); if !block.is_executed() { return Some(id); } to_visit.extend(block.children().iter().cloned()); } None } /// Marks given block and all its uncommitted ancestors as committed. This does not cause these /// blocks to be sent to storage immediately. pub fn mark_as_committed( &mut self, id: HashValue, signature: B::Signature, ) -> Result<(), CommitBlockError> { // First put the signatures in the block. Note that if this causes multiple blocks to be // marked as committed, only the last one will have the signatures. match self.id_to_block.get_mut(&id) { Some(block) => { if block.is_committed() { bail_err!(CommitBlockError::BlockAlreadyMarkedAsCommitted { id }); } else { block.set_signature(signature); } } None => bail_err!(CommitBlockError::BlockNotFound { id }), } // Mark the current block as committed. Go to parent block and repeat until a committed // block is found, or no more blocks. let mut current_id = id; while let Some(block) = self.id_to_block.get_mut(&current_id) { if block.is_committed() { break; } block.set_committed(); current_id = block.parent_id(); } Ok(()) } /// Removes all blocks in the tree that conflict with committed blocks. Returns a list of /// blocks that are ready to be sent to storage (all the committed blocks that have been /// executed). pub fn prune(&mut self) -> Vec<B> { let mut blocks_to_store = vec![]; // First find if there is a committed block in current heads. Since these blocks are at the // same height, at most one of them can be committed. If all of them are pending we have // nothing to do here. Otherwise, one of the branches is committed. Throw away the rest of // them and advance to the next height. let mut current_heads = self.heads.clone(); while let Some(committed_head) = self.get_committed_head(&current_heads) { assert!( current_heads.remove(&committed_head), "committed_head should exist.", ); for id in current_heads { self.remove_branch(id); } match self.id_to_block.entry(committed_head) { hash_map::Entry::Occupied(entry) => { current_heads = entry.get().children().clone(); let current_id = *entry.key(); let parent_id = entry.get().parent_id(); if entry.get().is_executed() { // If this block has been executed, all its proper ancestors must have // finished execution and present in `blocks_to_store`. self.heads = current_heads.clone(); self.last_committed_id = current_id; blocks_to_store.push(entry.remove()); } else { // The current block has not finished execution. If the parent block does // not exist in the map, that means parent block (also committed) has been // executed and removed. Otherwise self.heads does not need to be changed. if !self.id_to_block.contains_key(&parent_id) { self.heads = HashSet::new(); self.heads.insert(current_id); } } } hash_map::Entry::Vacant(_) => unreachable!("committed_head_id should exist."), } } blocks_to_store } /// Given a list of heads, returns the committed one if it exists. fn get_committed_head(&self, heads: &HashSet<HashValue>) -> Option<HashValue> { let mut committed_head = None; for head in heads { let block = self .id_to_block .get(head) .expect("Head should exist in id_to_block."); if block.is_committed() { assert!( committed_head.is_none(), "Conflicting blocks are both committed.", ); committed_head = Some(*head); } } committed_head } /// Removes a branch at block `head`. fn remove_branch(&mut self, head: HashValue) { let mut remaining = vec![head]; while let Some(current_block_id) = remaining.pop() { let block = self .id_to_block .remove(&current_block_id) .unwrap_or_else(|| { panic!( "Trying to remove a non-existing block {:x}.", current_block_id, ) }); assert!( !block.is_committed(), "Trying to remove a committed block {:x}.", current_block_id, ); remaining.extend(block.children().iter()); } } /// Removes the entire subtree at block `id`. pub fn remove_subtree(&mut self, id: HashValue) { self.heads.remove(&id); self.remove_branch(id); } /// Resets the block tree with a new `last_committed_id`. This removes all the in-memory /// blocks. pub fn reset(&m
lf, last_committed_id: HashValue) { let mut new_block_tree = BlockTree::new(last_committed_id); std::mem::swap(self, &mut new_block_tree); } } /// An error returned by `add_block`. The error contains the block being added so the caller does /// not lose it. #[derive(Debug, Eq, PartialEq)] pub enum AddBlockError<B: Block> { ParentNotFound { block: B }, BlockAlreadyExists { block: B }, } impl<B> AddBlockError<B> where B: Block, { pub fn into_block(self) -> B { match self { AddBlockError::ParentNotFound { block } => block, AddBlockError::BlockAlreadyExists { block } => block, } } } impl<B> std::fmt::Display for AddBlockError<B> where B: Block, { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { AddBlockError::ParentNotFound { block } => { write!(f, "Parent block {:x} was not found.", block.parent_id()) } AddBlockError::BlockAlreadyExists { block } => { write!(f, "Block {:x} already exists.", block.id()) } } } } /// An error returned by `mark_as_committed`. The error contains id of the block the caller wants /// to commit. #[derive(Debug, Eq, PartialEq)] pub enum CommitBlockError { BlockNotFound { id: HashValue }, BlockAlreadyMarkedAsCommitted { id: HashValue }, } impl std::fmt::Display for CommitBlockError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { CommitBlockError::BlockNotFound { id } => write!(f, "Block {:x} was not found.", id), CommitBlockError::BlockAlreadyMarkedAsCommitted { id } => { write!(f, "Block {:x} was already marked as committed.", id) } } } }
ut se
identifier_name
mod.rs
// Copyright (c) The XPeer Core Contributors // SPDX-License-Identifier: Apache-2.0 //! In a leader based consensus algorithm, each participant maintains a block tree that looks like //! the following: //! ```text //! Height 5 6 7 ... //! //! Committed -> B5 -> B6 -> B7 //! | //! └--> B5' -> B6' -> B7' //! | //! └----> B7" //! ``` //! This module implements `BlockTree` that is an in-memory representation of this tree. #[cfg(test)] mod block_tree_test; use crypto::HashValue; use failure::bail_err; use std::collections::{hash_map, HashMap, HashSet}; /// Each block has a unique identifier that is a `HashValue` computed by consensus. It has exactly /// one parent and zero or more children. pub trait Block: std::fmt::Debug { /// The output of executing this block. type Output; /// The signatures on this block. type Signature; /// Whether consensus has decided to commit this block. This kind of blocks are expected to be /// sent to storage very soon, unless execution is lagging behind. fn is_committed(&self) -> bool; /// Marks this block as committed. fn set_committed(&mut self); /// Whether this block has finished execution. fn is_executed(&self) -> bool; /// Sets the output of this block. fn set_output(&mut self, output: Self::Output); /// Sets the signatures for this block. fn set_signature(&mut self, signature: Self::Signature); /// The id of this block. fn id(&self) -> HashValue; /// The id of the parent block. fn parent_id(&self) -> HashValue; /// Adds a block as its child. fn add_child(&mut self, child_id: HashValue); /// The list of children of this block. fn children(&self) -> &HashSet<HashValue>; } /// The `BlockTree` implementation. #[derive(Debug)] pub struct BlockTree<B> { /// A map that keeps track of all existing blocks by their ids. id_to_block: HashMap<HashValue, B>, /// The blocks at the lowest height in the map. B5 and B5' in the following example. /// ```text /// Committed(B0..4) -> B5 -> B6 -> B7 /// | /// └--> B5' -> B6' -> B7' /// | /// └----> B7" /// ``` heads: HashSet<HashValue>, /// Id of the last committed block. B4 in the above example. last_committed_id: HashValue, } impl<B> BlockTree<B> where B: Block, { /// Constructs a new `BlockTree`. pub fn new(last_committed_id: HashValue) -> Self { BlockTree { id_to_block: HashMap::new(), heads: HashSet::new(), last_committed_id, } } /// Adds a new block to the tree. pub fn add_block(&mut self, block: B) -> Result<(), AddBlockError<B>> { assert!(!self.id_to_block.contains_key(&self.last_committed_id)); let id = block.id(); if self.id_to_block.contains_key(&id) { bail_err!(AddBlockError::BlockAlreadyExists { block }); } let parent_id = block.parent_id(); if parent_id == self.last_committed_id { assert!(self.heads.insert(id), "Block already existed in heads."); self.id_to_block.insert(id, block); return Ok(()); } match self.id_to_block.entry(parent_id) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().add_child(id); assert!( self.id_to_block.insert(id, block).is_none(), "Block {:x} already existed.", id, ); } hash_map::Entry::Vacant(_) => bail_err!(AddBlockError::ParentNotFound { block }), } Ok(()) } /// Returns a reference to a specific block, if it exists in the tree. pub fn get_block(&self, id: HashValue) -> Option<&B> { self.id_to_block.get(&id) } /// Returns a mutable reference to a specific block, if it exists in the tree. pub fn get_block_mut(&mut self, id: HashValue) -> Option<&mut B> { self.id_to_block.get_mut(&id) } /// Returns id of a block that is ready to be sent to VM for execution (its parent has finished /// execution), if such block exists in the tree. pub fn get_block_to_execute(&mut self) -> Option<HashValue> { let mut to_visit: Vec<HashValue> = self.heads.iter().cloned().collect(); while let Some(id) = to_visit.pop() { let block = self .id_to_block .get(&id) .expect("Missing block in id_to_block."); if !block.is_executed() { return Some(id); } to_visit.extend(block.children().iter().cloned()); } None } /// Marks given block and all its uncommitted ancestors as committed. This does not cause these /// blocks to be sent to storage immediately. pub fn mark_as_committed( &mut self, id: HashValue, signature: B::Signature, ) -> Result<(), CommitBlockError> { // First put the signatures in the block. Note that if this causes multiple blocks to be // marked as committed, only the last one will have the signatures. match self.id_to_block.get_mut(&id) { Some(block) => { if block.is_committed() { bail_err!(CommitBlockError::BlockAlreadyMarkedAsCommitted { id }); } else { block.set_signature(signature); } } None => bail_err!(CommitBlockError::BlockNotFound { id }), } // Mark the current block as committed. Go to parent block and repeat until a committed // block is found, or no more blocks. let mut current_id = id; while let Some(block) = self.id_to_block.get_mut(&current_id) { if block.is_committed() { break; } block.set_committed(); current_id = block.parent_id(); } Ok(()) } /// Removes all blocks in the tree that conflict with committed blocks. Returns a list of /// blocks that are ready to be sent to storage (all the committed blocks that have been /// executed). pub fn prune(&mut self) -> Vec<B> { let mut blocks_to_store = vec![];
let mut current_heads = self.heads.clone(); while let Some(committed_head) = self.get_committed_head(&current_heads) { assert!( current_heads.remove(&committed_head), "committed_head should exist.", ); for id in current_heads { self.remove_branch(id); } match self.id_to_block.entry(committed_head) { hash_map::Entry::Occupied(entry) => { current_heads = entry.get().children().clone(); let current_id = *entry.key(); let parent_id = entry.get().parent_id(); if entry.get().is_executed() { // If this block has been executed, all its proper ancestors must have // finished execution and present in `blocks_to_store`. self.heads = current_heads.clone(); self.last_committed_id = current_id; blocks_to_store.push(entry.remove()); } else { // The current block has not finished execution. If the parent block does // not exist in the map, that means parent block (also committed) has been // executed and removed. Otherwise self.heads does not need to be changed. if !self.id_to_block.contains_key(&parent_id) { self.heads = HashSet::new(); self.heads.insert(current_id); } } } hash_map::Entry::Vacant(_) => unreachable!("committed_head_id should exist."), } } blocks_to_store } /// Given a list of heads, returns the committed one if it exists. fn get_committed_head(&self, heads: &HashSet<HashValue>) -> Option<HashValue> { let mut committed_head = None; for head in heads { let block = self .id_to_block .get(head) .expect("Head should exist in id_to_block."); if block.is_committed() { assert!( committed_head.is_none(), "Conflicting blocks are both committed.", ); committed_head = Some(*head); } } committed_head } /// Removes a branch at block `head`. fn remove_branch(&mut self, head: HashValue) { let mut remaining = vec![head]; while let Some(current_block_id) = remaining.pop() { let block = self .id_to_block .remove(&current_block_id) .unwrap_or_else(|| { panic!( "Trying to remove a non-existing block {:x}.", current_block_id, ) }); assert!( !block.is_committed(), "Trying to remove a committed block {:x}.", current_block_id, ); remaining.extend(block.children().iter()); } } /// Removes the entire subtree at block `id`. pub fn remove_subtree(&mut self, id: HashValue) { self.heads.remove(&id); self.remove_branch(id); } /// Resets the block tree with a new `last_committed_id`. This removes all the in-memory /// blocks. pub fn reset(&mut self, last_committed_id: HashValue) { let mut new_block_tree = BlockTree::new(last_committed_id); std::mem::swap(self, &mut new_block_tree); } } /// An error returned by `add_block`. The error contains the block being added so the caller does /// not lose it. #[derive(Debug, Eq, PartialEq)] pub enum AddBlockError<B: Block> { ParentNotFound { block: B }, BlockAlreadyExists { block: B }, } impl<B> AddBlockError<B> where B: Block, { pub fn into_block(self) -> B { match self { AddBlockError::ParentNotFound { block } => block, AddBlockError::BlockAlreadyExists { block } => block, } } } impl<B> std::fmt::Display for AddBlockError<B> where B: Block, { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { AddBlockError::ParentNotFound { block } => { write!(f, "Parent block {:x} was not found.", block.parent_id()) } AddBlockError::BlockAlreadyExists { block } => { write!(f, "Block {:x} already exists.", block.id()) } } } } /// An error returned by `mark_as_committed`. The error contains id of the block the caller wants /// to commit. #[derive(Debug, Eq, PartialEq)] pub enum CommitBlockError { BlockNotFound { id: HashValue }, BlockAlreadyMarkedAsCommitted { id: HashValue }, } impl std::fmt::Display for CommitBlockError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { CommitBlockError::BlockNotFound { id } => write!(f, "Block {:x} was not found.", id), CommitBlockError::BlockAlreadyMarkedAsCommitted { id } => { write!(f, "Block {:x} was already marked as committed.", id) } } } }
// First find if there is a committed block in current heads. Since these blocks are at the // same height, at most one of them can be committed. If all of them are pending we have // nothing to do here. Otherwise, one of the branches is committed. Throw away the rest of // them and advance to the next height.
random_line_split
mod.rs
use anyhow::{bail, format_err, Error}; use std::ffi::{CStr, CString}; mod tm_editor; pub use tm_editor::*; /// Safe bindings to libc timelocal /// /// We set tm_isdst to -1. /// This also normalizes the parameter pub fn timelocal(t: &mut libc::tm) -> Result<i64, Error> { t.tm_isdst = -1; let epoch = unsafe { libc::mktime(t) }; if epoch == -1 { bail!("libc::mktime failed for {:?}", t); } Ok(epoch) } /// Safe bindings to libc timegm /// /// We set tm_isdst to 0. /// This also normalizes the parameter pub fn timegm(t: &mut libc::tm) -> Result<i64, Error> { t.tm_isdst = 0; let epoch = unsafe { libc::timegm(t) }; if epoch == -1 { bail!("libc::timegm failed for {:?}", t); } Ok(epoch) } fn new_libc_tm() -> libc::tm { libc::tm { tm_sec: 0, tm_min: 0, tm_hour: 0, tm_mday: 0, tm_mon: 0, tm_year: 0, tm_wday: 0, tm_yday: 0, tm_isdst: 0, tm_gmtoff: 0, tm_zone: std::ptr::null(), } } /// Safe bindings to libc localtime pub fn localtime(epoch: i64) -> Result<libc::tm, Error> { let mut result = new_libc_tm(); unsafe { if libc::localtime_r(&epoch, &mut result).is_null() { bail!("libc::localtime failed for '{}'", epoch); } } Ok(result) } /// Safe bindings to libc gmtime pub fn gmtime(epoch: i64) -> Result<libc::tm, Error> { let mut result = new_libc_tm(); unsafe { if libc::gmtime_r(&epoch, &mut result).is_null() { bail!("libc::gmtime failed for '{}'", epoch); } } Ok(result) } /// Returns Unix Epoch (now) /// /// Note: This panics if the SystemTime::now() returns values not /// repesentable as i64 (should never happen). pub fn epoch_i64() -> i64 { use std::convert::TryFrom; use std::time::{SystemTime, UNIX_EPOCH}; let now = SystemTime::now(); if now > UNIX_EPOCH { i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs()) .expect("epoch_i64: now is too large") } else { -i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs()) .expect("epoch_i64: now is too small") } } /// Returns Unix Epoch (now) as f64 with subseconds resolution /// /// Note: This can be inacurrate for values greater the 2^53. But this /// should never happen. pub fn epoch_f64() -> f64 { use std::time::{SystemTime, UNIX_EPOCH}; let now = SystemTime::now(); if now > UNIX_EPOCH { now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64() } else { -UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64() } } // rust libc bindings do not include strftime #[link(name = "c")] extern "C" { #[link_name = "strftime"] fn libc_strftime( s: *mut libc::c_char, max: libc::size_t, format: *const libc::c_char, time: *const libc::tm, ) -> libc::size_t; } /// Safe bindings to libc strftime pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> { let format = CString::new(format)?; let mut buf = vec![0u8; 8192]; let res = unsafe { libc_strftime( buf.as_mut_ptr() as *mut libc::c_char, buf.len() as libc::size_t, format.as_ptr(), t as *const libc::tm, ) }; let len = nix::errno::Errno::result(res).map(|r| r as usize)?; if len == 0 { bail!("strftime: result len is 0 (string too large)"); }; let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?; let str_slice: &str = c_str.to_str().unwrap(); Ok(str_slice.to_owned()) } /// Format epoch as local time pub fn strftime_local(format: &str, epoch: i64) -> Result<String, Error> { let localtime = localtime(epoch)?; strftime(format, &localtime) } /// Format epoch as utc time pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> { let gmtime = gmtime(epoch)?; strftime(format, &gmtime) } /// Convert Unix epoch into RFC3339 UTC string pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> { let gmtime = gmtime(epoch)?; let year = gmtime.tm_year + 1900; if year < 0 || year > 9999 { bail!("epoch_to_rfc3339_utc: wrong year '{}'", year); } strftime("%010FT%TZ", &gmtime) } /// Convert Unix epoch into RFC3339 local time with TZ pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> { let localtime = localtime(epoch)?; let year = localtime.tm_year + 1900; if year < 0 || year > 9999 { bail!("epoch_to_rfc3339: wrong year '{}'", year); } // Note: We cannot use strftime %z because of missing collon let mut offset = localtime.tm_gmtoff; let prefix = if offset < 0 { offset = -offset; '-' } else { '+' }; let mins = offset / 60; let hours = mins / 60; let mins = mins % 60; let mut s = strftime("%10FT%T", &localtime)?; s.push(prefix); s.push_str(&format!("{:02}:{:02}", hours, mins)); Ok(s) } /// Parse RFC3339 into Unix epoch pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> { let input = input_str.as_bytes(); let expect = |pos: usize, c: u8| { if input[pos] != c { bail!("unexpected char at pos {}", pos); } Ok(()) }; let digit = |pos: usize| -> Result<i32, Error> { let digit = input[pos] as i32; if digit < 48 || digit > 57
Ok(digit - 48) }; let check_max = |i: i32, max: i32| { if i > max { bail!("value too large ({} > {})", i, max); } Ok(i) }; crate::try_block!({ if input.len() < 20 || input.len() > 25 { bail!("timestamp of unexpected length"); } let tz = input[19]; match tz { b'Z' => { if input.len() != 20 { bail!("unexpected length in UTC timestamp"); } } b'+' | b'-' => { if input.len() != 25 { bail!("unexpected length in timestamp"); } } _ => bail!("unexpected timezone indicator"), } let mut tm = TmEditor::new(true); tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?; expect(4, b'-')?; tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?; expect(7, b'-')?; tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?; expect(10, b'T')?; tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?; expect(13, b':')?; tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?; expect(16, b':')?; tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?; let epoch = tm.into_epoch()?; if tz == b'Z' { return Ok(epoch); } let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?; expect(22, b':')?; let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?; let offset = (hours * 3600 + mins * 60) as i64; let epoch = match tz { b'+' => epoch - offset, b'-' => epoch + offset, _ => unreachable!(), // already checked above }; Ok(epoch) }) .map_err(|err| { format_err!( "failed to parse rfc3339 timestamp ({:?}) - {}", input_str, err ) }) } #[test] fn test_leap_seconds() { let convert_reconvert = |epoch| { let rfc3339 = epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work"); let parsed = parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work"); assert_eq!(epoch, parsed); }; // 2005-12-31T23:59:59Z was followed by a leap second let epoch = 1136073599; convert_reconvert(epoch); convert_reconvert(epoch + 1); convert_reconvert(epoch + 2); let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work"); assert_eq!(parsed, epoch + 1); } #[test] fn test_rfc3339_range() { // also tests single-digit years/first decade values let lower = -62167219200; let lower_str = "0000-01-01T00:00:00Z"; let upper = 253402300799; let upper_str = "9999-12-31T23:59:59Z"; let converted = epoch_to_rfc3339_utc(lower).expect("converting lower bound of RFC3339 range should work"); assert_eq!(converted, lower_str); let converted = epoch_to_rfc3339_utc(upper).expect("converting upper bound of RFC3339 range should work"); assert_eq!(converted, upper_str); let parsed = parse_rfc3339(lower_str).expect("parsing lower bound of RFC3339 range should work"); assert_eq!(parsed, lower); let parsed = parse_rfc3339(upper_str).expect("parsing upper bound of RFC3339 range should work"); assert_eq!(parsed, upper); epoch_to_rfc3339_utc(lower - 1) .expect_err("converting below lower bound of RFC3339 range should fail"); epoch_to_rfc3339_utc(upper + 1) .expect_err("converting above upper bound of RFC3339 range should fail"); let first_century = -59011459201; let first_century_str = "0099-12-31T23:59:59Z"; let converted = epoch_to_rfc3339_utc(first_century) .expect("converting epoch representing first century year should work"); assert_eq!(converted, first_century_str); let parsed = parse_rfc3339(first_century_str).expect("parsing first century string should work"); assert_eq!(parsed, first_century); let first_millenium = -59011459200; let first_millenium_str = "0100-01-01T00:00:00Z"; let converted = epoch_to_rfc3339_utc(first_millenium) .expect("converting epoch representing first millenium year should work"); assert_eq!(converted, first_millenium_str); let parsed = parse_rfc3339(first_millenium_str).expect("parsing first millenium string should work"); assert_eq!(parsed, first_millenium); } #[test] fn test_gmtime_range() { // year must fit into i32 let lower = -67768040609740800; let upper = 67768036191676799; let mut lower_tm = gmtime(lower).expect("gmtime should work as long as years fit into i32"); let res = timegm(&mut lower_tm).expect("converting back to epoch should work"); assert_eq!(lower, res); gmtime(lower - 1).expect_err("gmtime should fail for years not fitting into i32"); let mut upper_tm = gmtime(upper).expect("gmtime should work as long as years fit into i32"); let res = timegm(&mut upper_tm).expect("converting back to epoch should work"); assert_eq!(upper, res); gmtime(upper + 1).expect_err("gmtime should fail for years not fitting into i32"); } #[test] fn test_timezones() { let input = "2020-12-30T00:00:00+06:30"; let epoch = 1609263000; let expected_utc = "2020-12-29T17:30:00Z"; let parsed = parse_rfc3339(input).expect("parsing failed"); assert_eq!(parsed, epoch); let res = epoch_to_rfc3339_utc(parsed).expect("converting to RFC failed"); assert_eq!(expected_utc, res); }
{ bail!("unexpected char at pos {}", pos); }
conditional_block
mod.rs
use anyhow::{bail, format_err, Error}; use std::ffi::{CStr, CString}; mod tm_editor; pub use tm_editor::*; /// Safe bindings to libc timelocal /// /// We set tm_isdst to -1. /// This also normalizes the parameter pub fn timelocal(t: &mut libc::tm) -> Result<i64, Error> { t.tm_isdst = -1; let epoch = unsafe { libc::mktime(t) }; if epoch == -1 { bail!("libc::mktime failed for {:?}", t); } Ok(epoch) } /// Safe bindings to libc timegm /// /// We set tm_isdst to 0. /// This also normalizes the parameter pub fn timegm(t: &mut libc::tm) -> Result<i64, Error> { t.tm_isdst = 0; let epoch = unsafe { libc::timegm(t) }; if epoch == -1 { bail!("libc::timegm failed for {:?}", t); } Ok(epoch) } fn new_libc_tm() -> libc::tm { libc::tm { tm_sec: 0, tm_min: 0, tm_hour: 0, tm_mday: 0, tm_mon: 0, tm_year: 0, tm_wday: 0, tm_yday: 0, tm_isdst: 0, tm_gmtoff: 0, tm_zone: std::ptr::null(), } } /// Safe bindings to libc localtime pub fn localtime(epoch: i64) -> Result<libc::tm, Error> { let mut result = new_libc_tm(); unsafe { if libc::localtime_r(&epoch, &mut result).is_null() { bail!("libc::localtime failed for '{}'", epoch); } } Ok(result) } /// Safe bindings to libc gmtime pub fn gmtime(epoch: i64) -> Result<libc::tm, Error> { let mut result = new_libc_tm(); unsafe { if libc::gmtime_r(&epoch, &mut result).is_null() { bail!("libc::gmtime failed for '{}'", epoch); } } Ok(result) } /// Returns Unix Epoch (now) /// /// Note: This panics if the SystemTime::now() returns values not /// repesentable as i64 (should never happen). pub fn epoch_i64() -> i64 { use std::convert::TryFrom; use std::time::{SystemTime, UNIX_EPOCH}; let now = SystemTime::now(); if now > UNIX_EPOCH { i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs()) .expect("epoch_i64: now is too large") } else { -i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs()) .expect("epoch_i64: now is too small") } } /// Returns Unix Epoch (now) as f64 with subseconds resolution /// /// Note: This can be inacurrate for values greater the 2^53. But this /// should never happen. pub fn epoch_f64() -> f64 { use std::time::{SystemTime, UNIX_EPOCH}; let now = SystemTime::now(); if now > UNIX_EPOCH { now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64() } else { -UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64() } } // rust libc bindings do not include strftime #[link(name = "c")] extern "C" { #[link_name = "strftime"] fn libc_strftime( s: *mut libc::c_char, max: libc::size_t, format: *const libc::c_char, time: *const libc::tm, ) -> libc::size_t; } /// Safe bindings to libc strftime pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> { let format = CString::new(format)?; let mut buf = vec![0u8; 8192]; let res = unsafe { libc_strftime( buf.as_mut_ptr() as *mut libc::c_char, buf.len() as libc::size_t, format.as_ptr(), t as *const libc::tm, ) }; let len = nix::errno::Errno::result(res).map(|r| r as usize)?; if len == 0 { bail!("strftime: result len is 0 (string too large)"); }; let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?; let str_slice: &str = c_str.to_str().unwrap(); Ok(str_slice.to_owned()) } /// Format epoch as local time pub fn
(format: &str, epoch: i64) -> Result<String, Error> { let localtime = localtime(epoch)?; strftime(format, &localtime) } /// Format epoch as utc time pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> { let gmtime = gmtime(epoch)?; strftime(format, &gmtime) } /// Convert Unix epoch into RFC3339 UTC string pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> { let gmtime = gmtime(epoch)?; let year = gmtime.tm_year + 1900; if year < 0 || year > 9999 { bail!("epoch_to_rfc3339_utc: wrong year '{}'", year); } strftime("%010FT%TZ", &gmtime) } /// Convert Unix epoch into RFC3339 local time with TZ pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> { let localtime = localtime(epoch)?; let year = localtime.tm_year + 1900; if year < 0 || year > 9999 { bail!("epoch_to_rfc3339: wrong year '{}'", year); } // Note: We cannot use strftime %z because of missing collon let mut offset = localtime.tm_gmtoff; let prefix = if offset < 0 { offset = -offset; '-' } else { '+' }; let mins = offset / 60; let hours = mins / 60; let mins = mins % 60; let mut s = strftime("%10FT%T", &localtime)?; s.push(prefix); s.push_str(&format!("{:02}:{:02}", hours, mins)); Ok(s) } /// Parse RFC3339 into Unix epoch pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> { let input = input_str.as_bytes(); let expect = |pos: usize, c: u8| { if input[pos] != c { bail!("unexpected char at pos {}", pos); } Ok(()) }; let digit = |pos: usize| -> Result<i32, Error> { let digit = input[pos] as i32; if digit < 48 || digit > 57 { bail!("unexpected char at pos {}", pos); } Ok(digit - 48) }; let check_max = |i: i32, max: i32| { if i > max { bail!("value too large ({} > {})", i, max); } Ok(i) }; crate::try_block!({ if input.len() < 20 || input.len() > 25 { bail!("timestamp of unexpected length"); } let tz = input[19]; match tz { b'Z' => { if input.len() != 20 { bail!("unexpected length in UTC timestamp"); } } b'+' | b'-' => { if input.len() != 25 { bail!("unexpected length in timestamp"); } } _ => bail!("unexpected timezone indicator"), } let mut tm = TmEditor::new(true); tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?; expect(4, b'-')?; tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?; expect(7, b'-')?; tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?; expect(10, b'T')?; tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?; expect(13, b':')?; tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?; expect(16, b':')?; tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?; let epoch = tm.into_epoch()?; if tz == b'Z' { return Ok(epoch); } let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?; expect(22, b':')?; let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?; let offset = (hours * 3600 + mins * 60) as i64; let epoch = match tz { b'+' => epoch - offset, b'-' => epoch + offset, _ => unreachable!(), // already checked above }; Ok(epoch) }) .map_err(|err| { format_err!( "failed to parse rfc3339 timestamp ({:?}) - {}", input_str, err ) }) } #[test] fn test_leap_seconds() { let convert_reconvert = |epoch| { let rfc3339 = epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work"); let parsed = parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work"); assert_eq!(epoch, parsed); }; // 2005-12-31T23:59:59Z was followed by a leap second let epoch = 1136073599; convert_reconvert(epoch); convert_reconvert(epoch + 1); convert_reconvert(epoch + 2); let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work"); assert_eq!(parsed, epoch + 1); } #[test] fn test_rfc3339_range() { // also tests single-digit years/first decade values let lower = -62167219200; let lower_str = "0000-01-01T00:00:00Z"; let upper = 253402300799; let upper_str = "9999-12-31T23:59:59Z"; let converted = epoch_to_rfc3339_utc(lower).expect("converting lower bound of RFC3339 range should work"); assert_eq!(converted, lower_str); let converted = epoch_to_rfc3339_utc(upper).expect("converting upper bound of RFC3339 range should work"); assert_eq!(converted, upper_str); let parsed = parse_rfc3339(lower_str).expect("parsing lower bound of RFC3339 range should work"); assert_eq!(parsed, lower); let parsed = parse_rfc3339(upper_str).expect("parsing upper bound of RFC3339 range should work"); assert_eq!(parsed, upper); epoch_to_rfc3339_utc(lower - 1) .expect_err("converting below lower bound of RFC3339 range should fail"); epoch_to_rfc3339_utc(upper + 1) .expect_err("converting above upper bound of RFC3339 range should fail"); let first_century = -59011459201; let first_century_str = "0099-12-31T23:59:59Z"; let converted = epoch_to_rfc3339_utc(first_century) .expect("converting epoch representing first century year should work"); assert_eq!(converted, first_century_str); let parsed = parse_rfc3339(first_century_str).expect("parsing first century string should work"); assert_eq!(parsed, first_century); let first_millenium = -59011459200; let first_millenium_str = "0100-01-01T00:00:00Z"; let converted = epoch_to_rfc3339_utc(first_millenium) .expect("converting epoch representing first millenium year should work"); assert_eq!(converted, first_millenium_str); let parsed = parse_rfc3339(first_millenium_str).expect("parsing first millenium string should work"); assert_eq!(parsed, first_millenium); } #[test] fn test_gmtime_range() { // year must fit into i32 let lower = -67768040609740800; let upper = 67768036191676799; let mut lower_tm = gmtime(lower).expect("gmtime should work as long as years fit into i32"); let res = timegm(&mut lower_tm).expect("converting back to epoch should work"); assert_eq!(lower, res); gmtime(lower - 1).expect_err("gmtime should fail for years not fitting into i32"); let mut upper_tm = gmtime(upper).expect("gmtime should work as long as years fit into i32"); let res = timegm(&mut upper_tm).expect("converting back to epoch should work"); assert_eq!(upper, res); gmtime(upper + 1).expect_err("gmtime should fail for years not fitting into i32"); } #[test] fn test_timezones() { let input = "2020-12-30T00:00:00+06:30"; let epoch = 1609263000; let expected_utc = "2020-12-29T17:30:00Z"; let parsed = parse_rfc3339(input).expect("parsing failed"); assert_eq!(parsed, epoch); let res = epoch_to_rfc3339_utc(parsed).expect("converting to RFC failed"); assert_eq!(expected_utc, res); }
strftime_local
identifier_name
mod.rs
use anyhow::{bail, format_err, Error}; use std::ffi::{CStr, CString}; mod tm_editor; pub use tm_editor::*; /// Safe bindings to libc timelocal /// /// We set tm_isdst to -1. /// This also normalizes the parameter pub fn timelocal(t: &mut libc::tm) -> Result<i64, Error> { t.tm_isdst = -1; let epoch = unsafe { libc::mktime(t) }; if epoch == -1 { bail!("libc::mktime failed for {:?}", t); } Ok(epoch) } /// Safe bindings to libc timegm /// /// We set tm_isdst to 0. /// This also normalizes the parameter pub fn timegm(t: &mut libc::tm) -> Result<i64, Error> { t.tm_isdst = 0; let epoch = unsafe { libc::timegm(t) }; if epoch == -1 { bail!("libc::timegm failed for {:?}", t); } Ok(epoch) } fn new_libc_tm() -> libc::tm { libc::tm { tm_sec: 0, tm_min: 0, tm_hour: 0, tm_mday: 0, tm_mon: 0, tm_year: 0, tm_wday: 0, tm_yday: 0, tm_isdst: 0, tm_gmtoff: 0, tm_zone: std::ptr::null(), } } /// Safe bindings to libc localtime pub fn localtime(epoch: i64) -> Result<libc::tm, Error> { let mut result = new_libc_tm(); unsafe { if libc::localtime_r(&epoch, &mut result).is_null() { bail!("libc::localtime failed for '{}'", epoch); } } Ok(result) } /// Safe bindings to libc gmtime pub fn gmtime(epoch: i64) -> Result<libc::tm, Error> { let mut result = new_libc_tm(); unsafe { if libc::gmtime_r(&epoch, &mut result).is_null() { bail!("libc::gmtime failed for '{}'", epoch); } } Ok(result) } /// Returns Unix Epoch (now) /// /// Note: This panics if the SystemTime::now() returns values not /// repesentable as i64 (should never happen). pub fn epoch_i64() -> i64 { use std::convert::TryFrom; use std::time::{SystemTime, UNIX_EPOCH}; let now = SystemTime::now(); if now > UNIX_EPOCH { i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs()) .expect("epoch_i64: now is too large") } else { -i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs()) .expect("epoch_i64: now is too small") } } /// Returns Unix Epoch (now) as f64 with subseconds resolution /// /// Note: This can be inacurrate for values greater the 2^53. But this /// should never happen. pub fn epoch_f64() -> f64 { use std::time::{SystemTime, UNIX_EPOCH}; let now = SystemTime::now(); if now > UNIX_EPOCH { now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64() } else { -UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64() } } // rust libc bindings do not include strftime #[link(name = "c")] extern "C" { #[link_name = "strftime"] fn libc_strftime( s: *mut libc::c_char, max: libc::size_t, format: *const libc::c_char, time: *const libc::tm, ) -> libc::size_t; } /// Safe bindings to libc strftime pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> { let format = CString::new(format)?; let mut buf = vec![0u8; 8192]; let res = unsafe { libc_strftime( buf.as_mut_ptr() as *mut libc::c_char, buf.len() as libc::size_t, format.as_ptr(), t as *const libc::tm, ) }; let len = nix::errno::Errno::result(res).map(|r| r as usize)?; if len == 0 { bail!("strftime: result len is 0 (string too large)"); }; let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?; let str_slice: &str = c_str.to_str().unwrap(); Ok(str_slice.to_owned()) } /// Format epoch as local time pub fn strftime_local(format: &str, epoch: i64) -> Result<String, Error> { let localtime = localtime(epoch)?; strftime(format, &localtime) } /// Format epoch as utc time pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error>
/// Convert Unix epoch into RFC3339 UTC string pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> { let gmtime = gmtime(epoch)?; let year = gmtime.tm_year + 1900; if year < 0 || year > 9999 { bail!("epoch_to_rfc3339_utc: wrong year '{}'", year); } strftime("%010FT%TZ", &gmtime) } /// Convert Unix epoch into RFC3339 local time with TZ pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> { let localtime = localtime(epoch)?; let year = localtime.tm_year + 1900; if year < 0 || year > 9999 { bail!("epoch_to_rfc3339: wrong year '{}'", year); } // Note: We cannot use strftime %z because of missing collon let mut offset = localtime.tm_gmtoff; let prefix = if offset < 0 { offset = -offset; '-' } else { '+' }; let mins = offset / 60; let hours = mins / 60; let mins = mins % 60; let mut s = strftime("%10FT%T", &localtime)?; s.push(prefix); s.push_str(&format!("{:02}:{:02}", hours, mins)); Ok(s) } /// Parse RFC3339 into Unix epoch pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> { let input = input_str.as_bytes(); let expect = |pos: usize, c: u8| { if input[pos] != c { bail!("unexpected char at pos {}", pos); } Ok(()) }; let digit = |pos: usize| -> Result<i32, Error> { let digit = input[pos] as i32; if digit < 48 || digit > 57 { bail!("unexpected char at pos {}", pos); } Ok(digit - 48) }; let check_max = |i: i32, max: i32| { if i > max { bail!("value too large ({} > {})", i, max); } Ok(i) }; crate::try_block!({ if input.len() < 20 || input.len() > 25 { bail!("timestamp of unexpected length"); } let tz = input[19]; match tz { b'Z' => { if input.len() != 20 { bail!("unexpected length in UTC timestamp"); } } b'+' | b'-' => { if input.len() != 25 { bail!("unexpected length in timestamp"); } } _ => bail!("unexpected timezone indicator"), } let mut tm = TmEditor::new(true); tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?; expect(4, b'-')?; tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?; expect(7, b'-')?; tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?; expect(10, b'T')?; tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?; expect(13, b':')?; tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?; expect(16, b':')?; tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?; let epoch = tm.into_epoch()?; if tz == b'Z' { return Ok(epoch); } let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?; expect(22, b':')?; let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?; let offset = (hours * 3600 + mins * 60) as i64; let epoch = match tz { b'+' => epoch - offset, b'-' => epoch + offset, _ => unreachable!(), // already checked above }; Ok(epoch) }) .map_err(|err| { format_err!( "failed to parse rfc3339 timestamp ({:?}) - {}", input_str, err ) }) } #[test] fn test_leap_seconds() { let convert_reconvert = |epoch| { let rfc3339 = epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work"); let parsed = parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work"); assert_eq!(epoch, parsed); }; // 2005-12-31T23:59:59Z was followed by a leap second let epoch = 1136073599; convert_reconvert(epoch); convert_reconvert(epoch + 1); convert_reconvert(epoch + 2); let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work"); assert_eq!(parsed, epoch + 1); } #[test] fn test_rfc3339_range() { // also tests single-digit years/first decade values let lower = -62167219200; let lower_str = "0000-01-01T00:00:00Z"; let upper = 253402300799; let upper_str = "9999-12-31T23:59:59Z"; let converted = epoch_to_rfc3339_utc(lower).expect("converting lower bound of RFC3339 range should work"); assert_eq!(converted, lower_str); let converted = epoch_to_rfc3339_utc(upper).expect("converting upper bound of RFC3339 range should work"); assert_eq!(converted, upper_str); let parsed = parse_rfc3339(lower_str).expect("parsing lower bound of RFC3339 range should work"); assert_eq!(parsed, lower); let parsed = parse_rfc3339(upper_str).expect("parsing upper bound of RFC3339 range should work"); assert_eq!(parsed, upper); epoch_to_rfc3339_utc(lower - 1) .expect_err("converting below lower bound of RFC3339 range should fail"); epoch_to_rfc3339_utc(upper + 1) .expect_err("converting above upper bound of RFC3339 range should fail"); let first_century = -59011459201; let first_century_str = "0099-12-31T23:59:59Z"; let converted = epoch_to_rfc3339_utc(first_century) .expect("converting epoch representing first century year should work"); assert_eq!(converted, first_century_str); let parsed = parse_rfc3339(first_century_str).expect("parsing first century string should work"); assert_eq!(parsed, first_century); let first_millenium = -59011459200; let first_millenium_str = "0100-01-01T00:00:00Z"; let converted = epoch_to_rfc3339_utc(first_millenium) .expect("converting epoch representing first millenium year should work"); assert_eq!(converted, first_millenium_str); let parsed = parse_rfc3339(first_millenium_str).expect("parsing first millenium string should work"); assert_eq!(parsed, first_millenium); } #[test] fn test_gmtime_range() { // year must fit into i32 let lower = -67768040609740800; let upper = 67768036191676799; let mut lower_tm = gmtime(lower).expect("gmtime should work as long as years fit into i32"); let res = timegm(&mut lower_tm).expect("converting back to epoch should work"); assert_eq!(lower, res); gmtime(lower - 1).expect_err("gmtime should fail for years not fitting into i32"); let mut upper_tm = gmtime(upper).expect("gmtime should work as long as years fit into i32"); let res = timegm(&mut upper_tm).expect("converting back to epoch should work"); assert_eq!(upper, res); gmtime(upper + 1).expect_err("gmtime should fail for years not fitting into i32"); } #[test] fn test_timezones() { let input = "2020-12-30T00:00:00+06:30"; let epoch = 1609263000; let expected_utc = "2020-12-29T17:30:00Z"; let parsed = parse_rfc3339(input).expect("parsing failed"); assert_eq!(parsed, epoch); let res = epoch_to_rfc3339_utc(parsed).expect("converting to RFC failed"); assert_eq!(expected_utc, res); }
{ let gmtime = gmtime(epoch)?; strftime(format, &gmtime) }
identifier_body
mod.rs
use anyhow::{bail, format_err, Error}; use std::ffi::{CStr, CString}; mod tm_editor; pub use tm_editor::*; /// Safe bindings to libc timelocal /// /// We set tm_isdst to -1. /// This also normalizes the parameter pub fn timelocal(t: &mut libc::tm) -> Result<i64, Error> { t.tm_isdst = -1; let epoch = unsafe { libc::mktime(t) }; if epoch == -1 { bail!("libc::mktime failed for {:?}", t); } Ok(epoch) } /// Safe bindings to libc timegm /// /// We set tm_isdst to 0. /// This also normalizes the parameter pub fn timegm(t: &mut libc::tm) -> Result<i64, Error> { t.tm_isdst = 0; let epoch = unsafe { libc::timegm(t) }; if epoch == -1 { bail!("libc::timegm failed for {:?}", t); } Ok(epoch) } fn new_libc_tm() -> libc::tm { libc::tm { tm_sec: 0, tm_min: 0, tm_hour: 0, tm_mday: 0, tm_mon: 0, tm_year: 0, tm_wday: 0, tm_yday: 0, tm_isdst: 0, tm_gmtoff: 0, tm_zone: std::ptr::null(), } } /// Safe bindings to libc localtime pub fn localtime(epoch: i64) -> Result<libc::tm, Error> { let mut result = new_libc_tm(); unsafe { if libc::localtime_r(&epoch, &mut result).is_null() { bail!("libc::localtime failed for '{}'", epoch); } } Ok(result) } /// Safe bindings to libc gmtime pub fn gmtime(epoch: i64) -> Result<libc::tm, Error> { let mut result = new_libc_tm(); unsafe { if libc::gmtime_r(&epoch, &mut result).is_null() { bail!("libc::gmtime failed for '{}'", epoch); } } Ok(result) } /// Returns Unix Epoch (now) /// /// Note: This panics if the SystemTime::now() returns values not /// repesentable as i64 (should never happen). pub fn epoch_i64() -> i64 { use std::convert::TryFrom; use std::time::{SystemTime, UNIX_EPOCH}; let now = SystemTime::now(); if now > UNIX_EPOCH { i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs()) .expect("epoch_i64: now is too large") } else { -i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs()) .expect("epoch_i64: now is too small") } } /// Returns Unix Epoch (now) as f64 with subseconds resolution /// /// Note: This can be inacurrate for values greater the 2^53. But this /// should never happen. pub fn epoch_f64() -> f64 { use std::time::{SystemTime, UNIX_EPOCH}; let now = SystemTime::now(); if now > UNIX_EPOCH { now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64() } else { -UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64() } } // rust libc bindings do not include strftime #[link(name = "c")] extern "C" { #[link_name = "strftime"] fn libc_strftime( s: *mut libc::c_char, max: libc::size_t, format: *const libc::c_char, time: *const libc::tm, ) -> libc::size_t; } /// Safe bindings to libc strftime pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> { let format = CString::new(format)?; let mut buf = vec![0u8; 8192]; let res = unsafe { libc_strftime( buf.as_mut_ptr() as *mut libc::c_char, buf.len() as libc::size_t, format.as_ptr(), t as *const libc::tm, ) }; let len = nix::errno::Errno::result(res).map(|r| r as usize)?; if len == 0 { bail!("strftime: result len is 0 (string too large)"); }; let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?; let str_slice: &str = c_str.to_str().unwrap(); Ok(str_slice.to_owned()) } /// Format epoch as local time pub fn strftime_local(format: &str, epoch: i64) -> Result<String, Error> { let localtime = localtime(epoch)?; strftime(format, &localtime) } /// Format epoch as utc time pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> { let gmtime = gmtime(epoch)?; strftime(format, &gmtime) } /// Convert Unix epoch into RFC3339 UTC string pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> { let gmtime = gmtime(epoch)?; let year = gmtime.tm_year + 1900; if year < 0 || year > 9999 { bail!("epoch_to_rfc3339_utc: wrong year '{}'", year); } strftime("%010FT%TZ", &gmtime) } /// Convert Unix epoch into RFC3339 local time with TZ pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> { let localtime = localtime(epoch)?; let year = localtime.tm_year + 1900; if year < 0 || year > 9999 { bail!("epoch_to_rfc3339: wrong year '{}'", year); } // Note: We cannot use strftime %z because of missing collon let mut offset = localtime.tm_gmtoff; let prefix = if offset < 0 { offset = -offset; '-' } else { '+' }; let mins = offset / 60; let hours = mins / 60; let mins = mins % 60; let mut s = strftime("%10FT%T", &localtime)?; s.push(prefix); s.push_str(&format!("{:02}:{:02}", hours, mins)); Ok(s) } /// Parse RFC3339 into Unix epoch pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> { let input = input_str.as_bytes(); let expect = |pos: usize, c: u8| { if input[pos] != c { bail!("unexpected char at pos {}", pos); } Ok(()) }; let digit = |pos: usize| -> Result<i32, Error> { let digit = input[pos] as i32; if digit < 48 || digit > 57 { bail!("unexpected char at pos {}", pos); } Ok(digit - 48) }; let check_max = |i: i32, max: i32| { if i > max { bail!("value too large ({} > {})", i, max); } Ok(i) }; crate::try_block!({ if input.len() < 20 || input.len() > 25 { bail!("timestamp of unexpected length"); } let tz = input[19]; match tz { b'Z' => { if input.len() != 20 { bail!("unexpected length in UTC timestamp"); } } b'+' | b'-' => { if input.len() != 25 { bail!("unexpected length in timestamp"); } } _ => bail!("unexpected timezone indicator"), } let mut tm = TmEditor::new(true); tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?; expect(4, b'-')?; tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?; expect(7, b'-')?; tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?; expect(10, b'T')?; tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?; expect(13, b':')?; tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?; expect(16, b':')?; tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?; let epoch = tm.into_epoch()?; if tz == b'Z' { return Ok(epoch); } let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?; expect(22, b':')?; let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?; let offset = (hours * 3600 + mins * 60) as i64; let epoch = match tz { b'+' => epoch - offset, b'-' => epoch + offset, _ => unreachable!(), // already checked above }; Ok(epoch) }) .map_err(|err| { format_err!( "failed to parse rfc3339 timestamp ({:?}) - {}", input_str, err ) }) } #[test] fn test_leap_seconds() { let convert_reconvert = |epoch| { let rfc3339 = epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work"); let parsed = parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work"); assert_eq!(epoch, parsed); }; // 2005-12-31T23:59:59Z was followed by a leap second let epoch = 1136073599; convert_reconvert(epoch); convert_reconvert(epoch + 1); convert_reconvert(epoch + 2); let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work"); assert_eq!(parsed, epoch + 1); } #[test] fn test_rfc3339_range() { // also tests single-digit years/first decade values let lower = -62167219200; let lower_str = "0000-01-01T00:00:00Z"; let upper = 253402300799; let upper_str = "9999-12-31T23:59:59Z"; let converted = epoch_to_rfc3339_utc(lower).expect("converting lower bound of RFC3339 range should work"); assert_eq!(converted, lower_str);
let parsed = parse_rfc3339(lower_str).expect("parsing lower bound of RFC3339 range should work"); assert_eq!(parsed, lower); let parsed = parse_rfc3339(upper_str).expect("parsing upper bound of RFC3339 range should work"); assert_eq!(parsed, upper); epoch_to_rfc3339_utc(lower - 1) .expect_err("converting below lower bound of RFC3339 range should fail"); epoch_to_rfc3339_utc(upper + 1) .expect_err("converting above upper bound of RFC3339 range should fail"); let first_century = -59011459201; let first_century_str = "0099-12-31T23:59:59Z"; let converted = epoch_to_rfc3339_utc(first_century) .expect("converting epoch representing first century year should work"); assert_eq!(converted, first_century_str); let parsed = parse_rfc3339(first_century_str).expect("parsing first century string should work"); assert_eq!(parsed, first_century); let first_millenium = -59011459200; let first_millenium_str = "0100-01-01T00:00:00Z"; let converted = epoch_to_rfc3339_utc(first_millenium) .expect("converting epoch representing first millenium year should work"); assert_eq!(converted, first_millenium_str); let parsed = parse_rfc3339(first_millenium_str).expect("parsing first millenium string should work"); assert_eq!(parsed, first_millenium); } #[test] fn test_gmtime_range() { // year must fit into i32 let lower = -67768040609740800; let upper = 67768036191676799; let mut lower_tm = gmtime(lower).expect("gmtime should work as long as years fit into i32"); let res = timegm(&mut lower_tm).expect("converting back to epoch should work"); assert_eq!(lower, res); gmtime(lower - 1).expect_err("gmtime should fail for years not fitting into i32"); let mut upper_tm = gmtime(upper).expect("gmtime should work as long as years fit into i32"); let res = timegm(&mut upper_tm).expect("converting back to epoch should work"); assert_eq!(upper, res); gmtime(upper + 1).expect_err("gmtime should fail for years not fitting into i32"); } #[test] fn test_timezones() { let input = "2020-12-30T00:00:00+06:30"; let epoch = 1609263000; let expected_utc = "2020-12-29T17:30:00Z"; let parsed = parse_rfc3339(input).expect("parsing failed"); assert_eq!(parsed, epoch); let res = epoch_to_rfc3339_utc(parsed).expect("converting to RFC failed"); assert_eq!(expected_utc, res); }
let converted = epoch_to_rfc3339_utc(upper).expect("converting upper bound of RFC3339 range should work"); assert_eq!(converted, upper_str);
random_line_split
GP.py
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np import pylab as pb from .. import kern from ..core import model from ..util.linalg import pdinv,mdot from ..util.plot import gpplot,x_frame1D,x_frame2D, Tango from ..likelihoods import EP class GP(model): """ Gaussian Process model for regression and EP :param X: input observations :param kernel: a GPy kernel, defaults to rbf+white :parm likelihood: a GPy likelihood :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_X: False|True :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_Y: False|True :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) :rtype: model object :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 :param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.] :type powerep: list .. Note:: Multiple independent outputs are allowed using columns of Y """ def __init__(self, X, likelihood, kernel, normalize_X=False, Xslices=None): # parse arguments self.Xslices = Xslices self.X = X assert len(self.X.shape)==2 self.N, self.Q = self.X.shape assert isinstance(kernel, kern.kern) self.kern = kernel #here's some simple normalization for the inputs if normalize_X: self._Xmean = X.mean(0)[None,:] self._Xstd = X.std(0)[None,:] self.X = (X.copy() - self._Xmean) / self._Xstd if hasattr(self,'Z'): self.Z = (self.Z - self._Xmean) / self._Xstd else: self._Xmean = np.zeros((1,self.X.shape[1])) self._Xstd = np.ones((1,self.X.shape[1])) self.likelihood = likelihood #assert self.X.shape[0] == self.likelihood.Y.shape[0] #self.N, self.D = self.likelihood.Y.shape assert self.X.shape[0] == self.likelihood.data.shape[0] self.N, self.D = self.likelihood.data.shape model.__init__(self) def dL_dZ(self): """ TODO: one day we might like to learn Z by gradient methods? """ return np.zeros_like(self.Z) def _set_params(self,p): self.kern._set_params_transformed(p[:self.kern.Nparam]) #self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices) self.K += self.likelihood.covariance_matrix self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) #the gradient of the likelihood wrt the covariance matrix if self.likelihood.YYT is None: alpha = np.dot(self.Ki,self.likelihood.Y) self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki) else: tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki) self.dL_dK = 0.5*(tmp - self.D*self.Ki) def
(self): return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params())) def _get_param_names(self): return self.kern._get_param_names_transformed() + self.likelihood._get_param_names() def update_likelihood_approximation(self): """ Approximates a non-gaussian likelihood using Expectation Propagation For a Gaussian (or direct: TODO) likelihood, no iteration is required: this function does nothing """ self.likelihood.fit_full(self.kern.K(self.X)) self._set_params(self._get_params()) # update the GP def _model_fit_term(self): """ Computes the model fit using YYT if it's available """ if self.likelihood.YYT is None: return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y))) else: return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT)) def log_likelihood(self): """ The log marginal likelihood of the GP. For an EP model, can be written as the log likelihood of a regression model for a new variable Y* = v_tilde/tau_tilde, with a covariance matrix K* = K + diag(1./tau_tilde) plus a normalization term. """ return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z def _log_likelihood_gradients(self): """ The gradient of all parameters. For the kernel parameters, use the chain rule via dL_dK For the likelihood parameters, pass in alpha = K^-1 y """ return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK)))) def _raw_predict(self,_Xnew,slices=None, full_cov=False): """ Internal helper function for making predictions, does not account for normalization or likelihood """ Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices) mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y) KiKx = np.dot(self.Ki,Kx) if full_cov: Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices) var = Kxx - np.dot(KiKx.T,Kx) else: Kxx = self.kern.Kdiag(_Xnew, slices=slices) var = Kxx - np.sum(np.multiply(KiKx,Kx),0) var = var[:,None] return mu, var def predict(self,Xnew, slices=None, full_cov=False): """ Predict the function(s) at the new point(s) Xnew. Arguments --------- :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray, Nnew x self.Q :param slices: specifies which outputs kernel(s) the Xnew correspond to (see below) :type slices: (None, list of slice objects, list of ints) :param full_cov: whether to return the folll covariance matrix, or just the diagonal :type full_cov: bool :rtype: posterior mean, a Numpy array, Nnew x self.D :rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise :rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D .. Note:: "slices" specifies how the the points X_new co-vary wich the training points. - If None, the new points covary throigh every kernel part (default) - If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part - If a list of booleans, specifying which kernel parts are active If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew. This is to allow for different normalizations of the output dimensions. """ #normalize X values Xnew = (Xnew.copy() - self._Xmean) / self._Xstd mu, var = self._raw_predict(Xnew, slices, full_cov) #now push through likelihood TODO mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) return mean, var, _025pm, _975pm def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False): """ Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian :param samples: the number of a posteriori samples to plot :param which_data: which if the training data to plot (default all) :type which_data: 'all' or a slice object to slice self.X, self.Y :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :param which_functions: which of the kernel functions to plot (additively) :type which_functions: list of bools :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In two dimsensions, a contour-plot shows the mean predicted function - In higher dimensions, we've no implemented this yet !TODO! Can plot only part of the data and part of the posterior functions using which_data and which_functions Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood """ if which_functions=='all': which_functions = [True]*self.kern.Nparts if which_data=='all': which_data = slice(None) if self.X.shape[1] == 1: Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits) if samples == 0: m,v = self._raw_predict(Xnew, slices=which_functions) gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v)) pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5) else: m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True) Ysim = np.random.multivariate_normal(m.flatten(),v,samples) gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None]) for i in range(samples): pb.plot(Xnew,Ysim[i,:],Tango.colorsHex['darkBlue'],linewidth=0.25) pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5) pb.xlim(xmin,xmax) ymin,ymax = min(np.append(self.likelihood.Y,m-2*np.sqrt(np.diag(v)[:,None]))), max(np.append(self.likelihood.Y,m+2*np.sqrt(np.diag(v)[:,None]))) ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin) pb.ylim(ymin,ymax) if hasattr(self,'Z'): pb.plot(self.Z,self.Z*0+pb.ylim()[0],'r|',mew=1.5,markersize=12) elif self.X.shape[1] == 2: resolution = resolution or 50 Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits,resolution) m,v = self._raw_predict(Xnew, slices=which_functions) m = m.reshape(resolution,resolution).T pb.contour(xx,yy,m,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet) pb.scatter(Xorig[:,0],Xorig[:,1],40,Yorig,linewidth=0,cmap=pb.cm.jet,vmin=m.min(), vmax=m.max()) pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions" def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None,levels=20): """ TODO: Docstrings! :param levels: for 2D plotting, the number of contour levels to use """ # TODO include samples if which_functions=='all': which_functions = [True]*self.kern.Nparts if which_data=='all': which_data = slice(None) if self.X.shape[1] == 1: Xu = self.X * self._Xstd + self._Xmean #NOTE self.X are the normalized values now Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) m, var, lower, upper = self.predict(Xnew, slices=which_functions) gpplot(Xnew,m, lower, upper) pb.plot(Xu[which_data],self.likelihood.data[which_data],'kx',mew=1.5) ymin,ymax = min(np.append(self.likelihood.data,lower)), max(np.append(self.likelihood.data,upper)) ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin) pb.xlim(xmin,xmax) pb.ylim(ymin,ymax) if hasattr(self,'Z'): Zu = self.Z*self._Xstd + self._Xmean pb.plot(Zu,Zu*0+pb.ylim()[0],'r|',mew=1.5,markersize=12) if self.has_uncertain_inputs: pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten())) elif self.X.shape[1]==2: #FIXME resolution = resolution or 50 Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits,resolution) x, y = np.linspace(xmin[0],xmax[0],resolution), np.linspace(xmin[1],xmax[1],resolution) m, var, lower, upper = self.predict(Xnew, slices=which_functions) m = m.reshape(resolution,resolution).T pb.contour(x,y,m,levels,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet) Yf = self.likelihood.Y.flatten() pb.scatter(self.X[:,0], self.X[:,1], 40, Yf, cmap=pb.cm.jet,vmin=m.min(),vmax=m.max(), linewidth=0.) pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) if hasattr(self,'Z'): pb.plot(self.Z[:,0],self.Z[:,1],'wo') else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
_get_params
identifier_name
GP.py
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np import pylab as pb from .. import kern from ..core import model from ..util.linalg import pdinv,mdot from ..util.plot import gpplot,x_frame1D,x_frame2D, Tango from ..likelihoods import EP class GP(model):
""" Gaussian Process model for regression and EP :param X: input observations :param kernel: a GPy kernel, defaults to rbf+white :parm likelihood: a GPy likelihood :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_X: False|True :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_Y: False|True :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) :rtype: model object :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 :param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.] :type powerep: list .. Note:: Multiple independent outputs are allowed using columns of Y """ def __init__(self, X, likelihood, kernel, normalize_X=False, Xslices=None): # parse arguments self.Xslices = Xslices self.X = X assert len(self.X.shape)==2 self.N, self.Q = self.X.shape assert isinstance(kernel, kern.kern) self.kern = kernel #here's some simple normalization for the inputs if normalize_X: self._Xmean = X.mean(0)[None,:] self._Xstd = X.std(0)[None,:] self.X = (X.copy() - self._Xmean) / self._Xstd if hasattr(self,'Z'): self.Z = (self.Z - self._Xmean) / self._Xstd else: self._Xmean = np.zeros((1,self.X.shape[1])) self._Xstd = np.ones((1,self.X.shape[1])) self.likelihood = likelihood #assert self.X.shape[0] == self.likelihood.Y.shape[0] #self.N, self.D = self.likelihood.Y.shape assert self.X.shape[0] == self.likelihood.data.shape[0] self.N, self.D = self.likelihood.data.shape model.__init__(self) def dL_dZ(self): """ TODO: one day we might like to learn Z by gradient methods? """ return np.zeros_like(self.Z) def _set_params(self,p): self.kern._set_params_transformed(p[:self.kern.Nparam]) #self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices) self.K += self.likelihood.covariance_matrix self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) #the gradient of the likelihood wrt the covariance matrix if self.likelihood.YYT is None: alpha = np.dot(self.Ki,self.likelihood.Y) self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki) else: tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki) self.dL_dK = 0.5*(tmp - self.D*self.Ki) def _get_params(self): return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params())) def _get_param_names(self): return self.kern._get_param_names_transformed() + self.likelihood._get_param_names() def update_likelihood_approximation(self): """ Approximates a non-gaussian likelihood using Expectation Propagation For a Gaussian (or direct: TODO) likelihood, no iteration is required: this function does nothing """ self.likelihood.fit_full(self.kern.K(self.X)) self._set_params(self._get_params()) # update the GP def _model_fit_term(self): """ Computes the model fit using YYT if it's available """ if self.likelihood.YYT is None: return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y))) else: return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT)) def log_likelihood(self): """ The log marginal likelihood of the GP. For an EP model, can be written as the log likelihood of a regression model for a new variable Y* = v_tilde/tau_tilde, with a covariance matrix K* = K + diag(1./tau_tilde) plus a normalization term. """ return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z def _log_likelihood_gradients(self): """ The gradient of all parameters. For the kernel parameters, use the chain rule via dL_dK For the likelihood parameters, pass in alpha = K^-1 y """ return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK)))) def _raw_predict(self,_Xnew,slices=None, full_cov=False): """ Internal helper function for making predictions, does not account for normalization or likelihood """ Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices) mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y) KiKx = np.dot(self.Ki,Kx) if full_cov: Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices) var = Kxx - np.dot(KiKx.T,Kx) else: Kxx = self.kern.Kdiag(_Xnew, slices=slices) var = Kxx - np.sum(np.multiply(KiKx,Kx),0) var = var[:,None] return mu, var def predict(self,Xnew, slices=None, full_cov=False): """ Predict the function(s) at the new point(s) Xnew. Arguments --------- :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray, Nnew x self.Q :param slices: specifies which outputs kernel(s) the Xnew correspond to (see below) :type slices: (None, list of slice objects, list of ints) :param full_cov: whether to return the folll covariance matrix, or just the diagonal :type full_cov: bool :rtype: posterior mean, a Numpy array, Nnew x self.D :rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise :rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D .. Note:: "slices" specifies how the the points X_new co-vary wich the training points. - If None, the new points covary throigh every kernel part (default) - If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part - If a list of booleans, specifying which kernel parts are active If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew. This is to allow for different normalizations of the output dimensions. """ #normalize X values Xnew = (Xnew.copy() - self._Xmean) / self._Xstd mu, var = self._raw_predict(Xnew, slices, full_cov) #now push through likelihood TODO mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) return mean, var, _025pm, _975pm def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False): """ Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian :param samples: the number of a posteriori samples to plot :param which_data: which if the training data to plot (default all) :type which_data: 'all' or a slice object to slice self.X, self.Y :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :param which_functions: which of the kernel functions to plot (additively) :type which_functions: list of bools :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In two dimsensions, a contour-plot shows the mean predicted function - In higher dimensions, we've no implemented this yet !TODO! Can plot only part of the data and part of the posterior functions using which_data and which_functions Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood """ if which_functions=='all': which_functions = [True]*self.kern.Nparts if which_data=='all': which_data = slice(None) if self.X.shape[1] == 1: Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits) if samples == 0: m,v = self._raw_predict(Xnew, slices=which_functions) gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v)) pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5) else: m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True) Ysim = np.random.multivariate_normal(m.flatten(),v,samples) gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None]) for i in range(samples): pb.plot(Xnew,Ysim[i,:],Tango.colorsHex['darkBlue'],linewidth=0.25) pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5) pb.xlim(xmin,xmax) ymin,ymax = min(np.append(self.likelihood.Y,m-2*np.sqrt(np.diag(v)[:,None]))), max(np.append(self.likelihood.Y,m+2*np.sqrt(np.diag(v)[:,None]))) ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin) pb.ylim(ymin,ymax) if hasattr(self,'Z'): pb.plot(self.Z,self.Z*0+pb.ylim()[0],'r|',mew=1.5,markersize=12) elif self.X.shape[1] == 2: resolution = resolution or 50 Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits,resolution) m,v = self._raw_predict(Xnew, slices=which_functions) m = m.reshape(resolution,resolution).T pb.contour(xx,yy,m,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet) pb.scatter(Xorig[:,0],Xorig[:,1],40,Yorig,linewidth=0,cmap=pb.cm.jet,vmin=m.min(), vmax=m.max()) pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions" def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None,levels=20): """ TODO: Docstrings! :param levels: for 2D plotting, the number of contour levels to use """ # TODO include samples if which_functions=='all': which_functions = [True]*self.kern.Nparts if which_data=='all': which_data = slice(None) if self.X.shape[1] == 1: Xu = self.X * self._Xstd + self._Xmean #NOTE self.X are the normalized values now Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) m, var, lower, upper = self.predict(Xnew, slices=which_functions) gpplot(Xnew,m, lower, upper) pb.plot(Xu[which_data],self.likelihood.data[which_data],'kx',mew=1.5) ymin,ymax = min(np.append(self.likelihood.data,lower)), max(np.append(self.likelihood.data,upper)) ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin) pb.xlim(xmin,xmax) pb.ylim(ymin,ymax) if hasattr(self,'Z'): Zu = self.Z*self._Xstd + self._Xmean pb.plot(Zu,Zu*0+pb.ylim()[0],'r|',mew=1.5,markersize=12) if self.has_uncertain_inputs: pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten())) elif self.X.shape[1]==2: #FIXME resolution = resolution or 50 Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits,resolution) x, y = np.linspace(xmin[0],xmax[0],resolution), np.linspace(xmin[1],xmax[1],resolution) m, var, lower, upper = self.predict(Xnew, slices=which_functions) m = m.reshape(resolution,resolution).T pb.contour(x,y,m,levels,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet) Yf = self.likelihood.Y.flatten() pb.scatter(self.X[:,0], self.X[:,1], 40, Yf, cmap=pb.cm.jet,vmin=m.min(),vmax=m.max(), linewidth=0.) pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) if hasattr(self,'Z'): pb.plot(self.Z[:,0],self.Z[:,1],'wo') else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
identifier_body
GP.py
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np import pylab as pb from .. import kern from ..core import model from ..util.linalg import pdinv,mdot from ..util.plot import gpplot,x_frame1D,x_frame2D, Tango from ..likelihoods import EP class GP(model): """ Gaussian Process model for regression and EP :param X: input observations :param kernel: a GPy kernel, defaults to rbf+white :parm likelihood: a GPy likelihood :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_X: False|True :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_Y: False|True :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) :rtype: model object :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 :param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.] :type powerep: list .. Note:: Multiple independent outputs are allowed using columns of Y """ def __init__(self, X, likelihood, kernel, normalize_X=False, Xslices=None): # parse arguments self.Xslices = Xslices self.X = X assert len(self.X.shape)==2 self.N, self.Q = self.X.shape assert isinstance(kernel, kern.kern) self.kern = kernel #here's some simple normalization for the inputs if normalize_X: self._Xmean = X.mean(0)[None,:] self._Xstd = X.std(0)[None,:] self.X = (X.copy() - self._Xmean) / self._Xstd if hasattr(self,'Z'): self.Z = (self.Z - self._Xmean) / self._Xstd else: self._Xmean = np.zeros((1,self.X.shape[1])) self._Xstd = np.ones((1,self.X.shape[1])) self.likelihood = likelihood #assert self.X.shape[0] == self.likelihood.Y.shape[0] #self.N, self.D = self.likelihood.Y.shape assert self.X.shape[0] == self.likelihood.data.shape[0] self.N, self.D = self.likelihood.data.shape model.__init__(self) def dL_dZ(self): """ TODO: one day we might like to learn Z by gradient methods? """ return np.zeros_like(self.Z) def _set_params(self,p): self.kern._set_params_transformed(p[:self.kern.Nparam]) #self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices) self.K += self.likelihood.covariance_matrix self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) #the gradient of the likelihood wrt the covariance matrix if self.likelihood.YYT is None: alpha = np.dot(self.Ki,self.likelihood.Y) self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki) else: tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki) self.dL_dK = 0.5*(tmp - self.D*self.Ki) def _get_params(self): return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params())) def _get_param_names(self): return self.kern._get_param_names_transformed() + self.likelihood._get_param_names() def update_likelihood_approximation(self): """ Approximates a non-gaussian likelihood using Expectation Propagation For a Gaussian (or direct: TODO) likelihood, no iteration is required: this function does nothing """ self.likelihood.fit_full(self.kern.K(self.X)) self._set_params(self._get_params()) # update the GP def _model_fit_term(self): """ Computes the model fit using YYT if it's available """ if self.likelihood.YYT is None: return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y))) else: return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT)) def log_likelihood(self): """ The log marginal likelihood of the GP. For an EP model, can be written as the log likelihood of a regression model for a new variable Y* = v_tilde/tau_tilde, with a covariance matrix K* = K + diag(1./tau_tilde) plus a normalization term. """ return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z def _log_likelihood_gradients(self): """ The gradient of all parameters. For the kernel parameters, use the chain rule via dL_dK For the likelihood parameters, pass in alpha = K^-1 y """ return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK)))) def _raw_predict(self,_Xnew,slices=None, full_cov=False): """ Internal helper function for making predictions, does not account for normalization or likelihood """ Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices) mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y) KiKx = np.dot(self.Ki,Kx) if full_cov: Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices) var = Kxx - np.dot(KiKx.T,Kx) else: Kxx = self.kern.Kdiag(_Xnew, slices=slices) var = Kxx - np.sum(np.multiply(KiKx,Kx),0) var = var[:,None] return mu, var def predict(self,Xnew, slices=None, full_cov=False): """ Predict the function(s) at the new point(s) Xnew. Arguments --------- :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray, Nnew x self.Q :param slices: specifies which outputs kernel(s) the Xnew correspond to (see below) :type slices: (None, list of slice objects, list of ints) :param full_cov: whether to return the folll covariance matrix, or just the diagonal :type full_cov: bool :rtype: posterior mean, a Numpy array, Nnew x self.D :rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise :rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D .. Note:: "slices" specifies how the the points X_new co-vary wich the training points. - If None, the new points covary throigh every kernel part (default) - If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part - If a list of booleans, specifying which kernel parts are active If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew. This is to allow for different normalizations of the output dimensions. """ #normalize X values Xnew = (Xnew.copy() - self._Xmean) / self._Xstd mu, var = self._raw_predict(Xnew, slices, full_cov) #now push through likelihood TODO mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) return mean, var, _025pm, _975pm def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False): """
:param samples: the number of a posteriori samples to plot :param which_data: which if the training data to plot (default all) :type which_data: 'all' or a slice object to slice self.X, self.Y :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :param which_functions: which of the kernel functions to plot (additively) :type which_functions: list of bools :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In two dimsensions, a contour-plot shows the mean predicted function - In higher dimensions, we've no implemented this yet !TODO! Can plot only part of the data and part of the posterior functions using which_data and which_functions Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood """ if which_functions=='all': which_functions = [True]*self.kern.Nparts if which_data=='all': which_data = slice(None) if self.X.shape[1] == 1: Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits) if samples == 0: m,v = self._raw_predict(Xnew, slices=which_functions) gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v)) pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5) else: m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True) Ysim = np.random.multivariate_normal(m.flatten(),v,samples) gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None]) for i in range(samples): pb.plot(Xnew,Ysim[i,:],Tango.colorsHex['darkBlue'],linewidth=0.25) pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5) pb.xlim(xmin,xmax) ymin,ymax = min(np.append(self.likelihood.Y,m-2*np.sqrt(np.diag(v)[:,None]))), max(np.append(self.likelihood.Y,m+2*np.sqrt(np.diag(v)[:,None]))) ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin) pb.ylim(ymin,ymax) if hasattr(self,'Z'): pb.plot(self.Z,self.Z*0+pb.ylim()[0],'r|',mew=1.5,markersize=12) elif self.X.shape[1] == 2: resolution = resolution or 50 Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits,resolution) m,v = self._raw_predict(Xnew, slices=which_functions) m = m.reshape(resolution,resolution).T pb.contour(xx,yy,m,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet) pb.scatter(Xorig[:,0],Xorig[:,1],40,Yorig,linewidth=0,cmap=pb.cm.jet,vmin=m.min(), vmax=m.max()) pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions" def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None,levels=20): """ TODO: Docstrings! :param levels: for 2D plotting, the number of contour levels to use """ # TODO include samples if which_functions=='all': which_functions = [True]*self.kern.Nparts if which_data=='all': which_data = slice(None) if self.X.shape[1] == 1: Xu = self.X * self._Xstd + self._Xmean #NOTE self.X are the normalized values now Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) m, var, lower, upper = self.predict(Xnew, slices=which_functions) gpplot(Xnew,m, lower, upper) pb.plot(Xu[which_data],self.likelihood.data[which_data],'kx',mew=1.5) ymin,ymax = min(np.append(self.likelihood.data,lower)), max(np.append(self.likelihood.data,upper)) ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin) pb.xlim(xmin,xmax) pb.ylim(ymin,ymax) if hasattr(self,'Z'): Zu = self.Z*self._Xstd + self._Xmean pb.plot(Zu,Zu*0+pb.ylim()[0],'r|',mew=1.5,markersize=12) if self.has_uncertain_inputs: pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten())) elif self.X.shape[1]==2: #FIXME resolution = resolution or 50 Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits,resolution) x, y = np.linspace(xmin[0],xmax[0],resolution), np.linspace(xmin[1],xmax[1],resolution) m, var, lower, upper = self.predict(Xnew, slices=which_functions) m = m.reshape(resolution,resolution).T pb.contour(x,y,m,levels,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet) Yf = self.likelihood.Y.flatten() pb.scatter(self.X[:,0], self.X[:,1], 40, Yf, cmap=pb.cm.jet,vmin=m.min(),vmax=m.max(), linewidth=0.) pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) if hasattr(self,'Z'): pb.plot(self.Z[:,0],self.Z[:,1],'wo') else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian
random_line_split
GP.py
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np import pylab as pb from .. import kern from ..core import model from ..util.linalg import pdinv,mdot from ..util.plot import gpplot,x_frame1D,x_frame2D, Tango from ..likelihoods import EP class GP(model): """ Gaussian Process model for regression and EP :param X: input observations :param kernel: a GPy kernel, defaults to rbf+white :parm likelihood: a GPy likelihood :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_X: False|True :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_Y: False|True :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) :rtype: model object :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 :param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.] :type powerep: list .. Note:: Multiple independent outputs are allowed using columns of Y """ def __init__(self, X, likelihood, kernel, normalize_X=False, Xslices=None): # parse arguments self.Xslices = Xslices self.X = X assert len(self.X.shape)==2 self.N, self.Q = self.X.shape assert isinstance(kernel, kern.kern) self.kern = kernel #here's some simple normalization for the inputs if normalize_X: self._Xmean = X.mean(0)[None,:] self._Xstd = X.std(0)[None,:] self.X = (X.copy() - self._Xmean) / self._Xstd if hasattr(self,'Z'): self.Z = (self.Z - self._Xmean) / self._Xstd else: self._Xmean = np.zeros((1,self.X.shape[1])) self._Xstd = np.ones((1,self.X.shape[1])) self.likelihood = likelihood #assert self.X.shape[0] == self.likelihood.Y.shape[0] #self.N, self.D = self.likelihood.Y.shape assert self.X.shape[0] == self.likelihood.data.shape[0] self.N, self.D = self.likelihood.data.shape model.__init__(self) def dL_dZ(self): """ TODO: one day we might like to learn Z by gradient methods? """ return np.zeros_like(self.Z) def _set_params(self,p): self.kern._set_params_transformed(p[:self.kern.Nparam]) #self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas self.K = self.kern.K(self.X,slices1=self.Xslices,slices2=self.Xslices) self.K += self.likelihood.covariance_matrix self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) #the gradient of the likelihood wrt the covariance matrix if self.likelihood.YYT is None: alpha = np.dot(self.Ki,self.likelihood.Y) self.dL_dK = 0.5*(np.dot(alpha,alpha.T)-self.D*self.Ki) else: tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki) self.dL_dK = 0.5*(tmp - self.D*self.Ki) def _get_params(self): return np.hstack((self.kern._get_params_transformed(), self.likelihood._get_params())) def _get_param_names(self): return self.kern._get_param_names_transformed() + self.likelihood._get_param_names() def update_likelihood_approximation(self): """ Approximates a non-gaussian likelihood using Expectation Propagation For a Gaussian (or direct: TODO) likelihood, no iteration is required: this function does nothing """ self.likelihood.fit_full(self.kern.K(self.X)) self._set_params(self._get_params()) # update the GP def _model_fit_term(self): """ Computes the model fit using YYT if it's available """ if self.likelihood.YYT is None: return -0.5*np.sum(np.square(np.dot(self.Li,self.likelihood.Y))) else: return -0.5*np.sum(np.multiply(self.Ki, self.likelihood.YYT)) def log_likelihood(self): """ The log marginal likelihood of the GP. For an EP model, can be written as the log likelihood of a regression model for a new variable Y* = v_tilde/tau_tilde, with a covariance matrix K* = K + diag(1./tau_tilde) plus a normalization term. """ return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z def _log_likelihood_gradients(self): """ The gradient of all parameters. For the kernel parameters, use the chain rule via dL_dK For the likelihood parameters, pass in alpha = K^-1 y """ return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK,X=self.X,slices1=self.Xslices,slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK)))) def _raw_predict(self,_Xnew,slices=None, full_cov=False): """ Internal helper function for making predictions, does not account for normalization or likelihood """ Kx = self.kern.K(self.X,_Xnew, slices1=self.Xslices,slices2=slices) mu = np.dot(np.dot(Kx.T,self.Ki),self.likelihood.Y) KiKx = np.dot(self.Ki,Kx) if full_cov: Kxx = self.kern.K(_Xnew, slices1=slices,slices2=slices) var = Kxx - np.dot(KiKx.T,Kx) else: Kxx = self.kern.Kdiag(_Xnew, slices=slices) var = Kxx - np.sum(np.multiply(KiKx,Kx),0) var = var[:,None] return mu, var def predict(self,Xnew, slices=None, full_cov=False): """ Predict the function(s) at the new point(s) Xnew. Arguments --------- :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray, Nnew x self.Q :param slices: specifies which outputs kernel(s) the Xnew correspond to (see below) :type slices: (None, list of slice objects, list of ints) :param full_cov: whether to return the folll covariance matrix, or just the diagonal :type full_cov: bool :rtype: posterior mean, a Numpy array, Nnew x self.D :rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise :rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D .. Note:: "slices" specifies how the the points X_new co-vary wich the training points. - If None, the new points covary throigh every kernel part (default) - If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part - If a list of booleans, specifying which kernel parts are active If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew. This is to allow for different normalizations of the output dimensions. """ #normalize X values Xnew = (Xnew.copy() - self._Xmean) / self._Xstd mu, var = self._raw_predict(Xnew, slices, full_cov) #now push through likelihood TODO mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) return mean, var, _025pm, _975pm def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False): """ Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian :param samples: the number of a posteriori samples to plot :param which_data: which if the training data to plot (default all) :type which_data: 'all' or a slice object to slice self.X, self.Y :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :param which_functions: which of the kernel functions to plot (additively) :type which_functions: list of bools :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In two dimsensions, a contour-plot shows the mean predicted function - In higher dimensions, we've no implemented this yet !TODO! Can plot only part of the data and part of the posterior functions using which_data and which_functions Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood """ if which_functions=='all': which_functions = [True]*self.kern.Nparts if which_data=='all': which_data = slice(None) if self.X.shape[1] == 1: Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits) if samples == 0:
else: m,v = self._raw_predict(Xnew, slices=which_functions,full_cov=True) Ysim = np.random.multivariate_normal(m.flatten(),v,samples) gpplot(Xnew,m,m-2*np.sqrt(np.diag(v)[:,None]),m+2*np.sqrt(np.diag(v))[:,None]) for i in range(samples): pb.plot(Xnew,Ysim[i,:],Tango.colorsHex['darkBlue'],linewidth=0.25) pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5) pb.xlim(xmin,xmax) ymin,ymax = min(np.append(self.likelihood.Y,m-2*np.sqrt(np.diag(v)[:,None]))), max(np.append(self.likelihood.Y,m+2*np.sqrt(np.diag(v)[:,None]))) ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin) pb.ylim(ymin,ymax) if hasattr(self,'Z'): pb.plot(self.Z,self.Z*0+pb.ylim()[0],'r|',mew=1.5,markersize=12) elif self.X.shape[1] == 2: resolution = resolution or 50 Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits,resolution) m,v = self._raw_predict(Xnew, slices=which_functions) m = m.reshape(resolution,resolution).T pb.contour(xx,yy,m,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet) pb.scatter(Xorig[:,0],Xorig[:,1],40,Yorig,linewidth=0,cmap=pb.cm.jet,vmin=m.min(), vmax=m.max()) pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions" def plot(self,samples=0,plot_limits=None,which_data='all',which_functions='all',resolution=None,levels=20): """ TODO: Docstrings! :param levels: for 2D plotting, the number of contour levels to use """ # TODO include samples if which_functions=='all': which_functions = [True]*self.kern.Nparts if which_data=='all': which_data = slice(None) if self.X.shape[1] == 1: Xu = self.X * self._Xstd + self._Xmean #NOTE self.X are the normalized values now Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) m, var, lower, upper = self.predict(Xnew, slices=which_functions) gpplot(Xnew,m, lower, upper) pb.plot(Xu[which_data],self.likelihood.data[which_data],'kx',mew=1.5) ymin,ymax = min(np.append(self.likelihood.data,lower)), max(np.append(self.likelihood.data,upper)) ymin, ymax = ymin - 0.1*(ymax - ymin), ymax + 0.1*(ymax - ymin) pb.xlim(xmin,xmax) pb.ylim(ymin,ymax) if hasattr(self,'Z'): Zu = self.Z*self._Xstd + self._Xmean pb.plot(Zu,Zu*0+pb.ylim()[0],'r|',mew=1.5,markersize=12) if self.has_uncertain_inputs: pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten())) elif self.X.shape[1]==2: #FIXME resolution = resolution or 50 Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits,resolution) x, y = np.linspace(xmin[0],xmax[0],resolution), np.linspace(xmin[1],xmax[1],resolution) m, var, lower, upper = self.predict(Xnew, slices=which_functions) m = m.reshape(resolution,resolution).T pb.contour(x,y,m,levels,vmin=m.min(),vmax=m.max(),cmap=pb.cm.jet) Yf = self.likelihood.Y.flatten() pb.scatter(self.X[:,0], self.X[:,1], 40, Yf, cmap=pb.cm.jet,vmin=m.min(),vmax=m.max(), linewidth=0.) pb.xlim(xmin[0],xmax[0]) pb.ylim(xmin[1],xmax[1]) if hasattr(self,'Z'): pb.plot(self.Z[:,0],self.Z[:,1],'wo') else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
m,v = self._raw_predict(Xnew, slices=which_functions) gpplot(Xnew,m,m-2*np.sqrt(v),m+2*np.sqrt(v)) pb.plot(self.X[which_data],self.likelihood.Y[which_data],'kx',mew=1.5)
conditional_block
vision.py
# Python 2.7 Doritobot Vision System # EECS 498 Purple Team, 2014 # Written by Cody Hyman (hymanc@umich.edu) # Written against OpenCV 3.0.0-alpha import sys import os import cv2 import numpy as np from uvcinterface import UVCInterface as uvc from visionUtil import VisionUtil as vu from collections import deque from math import * # Calibration state 'Enumeration' class CalState(object): UNCAL = 1 CAL_PROG = 2 CALIBRATED = 3 ### Vision System Class ### class VisionSystem(object): # Window names CAM_FEED_NAME = 'Camera Feed' CAL_NAME = 'Calibrated Image' PROC_NAME = 'Vision Processing' CTL_NAME = 'Filter Controls' # Constants G_CENTER = 52 R_CENTER = 0 SMIN = 50 VMIN = 80 #HISTORY_LENGTH = 15 EMPTY_KERNEL = [0, 0, 0, 0, 0, 0, 0] RAW_KERNEL = np.array([1, 2, 3, 6, 10, 18, 20], dtype = np.float32) FIR_KERNEL = np.multiply(RAW_KERNEL,1/np.linalg.norm(RAW_KERNEL,1)) # Normalized kernel def __init__(self, camera): ### Instance Value initialization ### self.camera = camera self.calstate = CalState.UNCAL self.calpts = [] self.XSIZE = 1000 self.YSIZE = 1000 self.x_est = -1 self.y_est = -1 self.theta_est = -1 # Drawing storage self.waypointEst = [(300,300)] # Waypoint estimates for UI self.tagLoc = (10,10) # Tag location estimate self.fVectorStart = (0,0) self.fVectorEnd = (0,0) #self.worldpts = np.float32([ # [0,self.YSIZE/2], # [0,0], # [self.XSIZE,0], # [self.XSIZE,self.YSIZE/2] # ]) # ===== ***** Calibration points from world *****===== # '''self.worldpts = np.float32([ [-5, -1. * -105], #22 [90, -1. * -100], #27 [90, -1. * 110], #26 [0, -1. * 107] #25 ])#*self.IMG_SCALE + self.IMG_OFFSET''' # Swap x-y coordinates (WTF!) '''self.worldpts = np.float32([ [-105,-5], #22 [-100, 90], #27 [110, 90], #26 [107, 0] #25 ])#*self.IMG_SCALE + self.IMG_OFFSET''' self.worldpts = np.float32([ [-104,-2], #22 [-104,85], #27 [115,84], #26 [115,3] #25 ]) self.worldpts = vu.toImageCoordinates(self.worldpts) testPts = vu.toWaypointCoordinates(self.worldpts) print 'TestWorldPts', str(testPts) # ===== *************** ===== # ### Camera initialization ### print 'Opening Camera ' + str(camera) self.vidcap = cv2.VideoCapture(camera)# Open up specified camera # Check if camera is opened and exit if not if self.vidcap.isOpened(): print 'Camera ' + str(camera) + ' opened successfully' else: print 'ERROR: Camera ' + str(camera) + ' not opened' return False # Set camera autoexposure uvc.set(self.camera, uvc.EXPOSURE_AUTO, 1) uvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0) ### Initialize UI elements ### # Filter Controls Window ctlWindow = cv2.namedWindow(self.CTL_NAME) cv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler) cv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler) cv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler) cv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler) cv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler) # Camera input window camWindow = cv2.namedWindow(self.CAM_FEED_NAME) cv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged) cv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged) cv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged) cv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration # Rectified/Calibrated Image window #calWindow = cv2.namedWindow(self.CAL_NAME) #cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler) # Image processing Window 2 procWindow = cv2.namedWindow(self.PROC_NAME) # History for filter bank self.xHistory = deque(self.EMPTY_KERNEL) self.yHistory = deque(self.EMPTY_KERNEL) self.thetaHistory = deque(self.EMPTY_KERNEL) # Run vision on a frame def processFrame(self): ### Main processing loop ### #while(True): frameRet, self.camImg = self.vidcap.read() #Img = self.drawCalMarkers() cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers()) if(self.calstate == CalState.CALIBRATED): self.remapImage() # Apply perspective warp bl = cv2.getTrackbarPos('Blue', self.CTL_NAME) gr = cv2.getTrackbarPos('Green', self.CTL_NAME) rd = cv2.getTrackbarPos('Red', self.CTL_NAME) bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME) gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME)
rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin) #vu.printCentroids(gCentroid, rCentroid) if(bgroundFlag): self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg) else: self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg) ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid) if((ctr != None) and (theta != None)): fctr, ftheta = self.filterPoints(ctr, theta) self.x_est = ctr[0] self.y_est = ctr[1] # print 'Theta IN:', theta self.theta_est = theta#ftheta self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255)) if(gCentroid != None): vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255)) if(rCentroid != None): vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0)) if(bCentroid != None): vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0)) wpIndex = 0 for wp in self.waypointEst: wpIndex = wpIndex + 1 if(wpIndex == 1): wpcolor = (0,0,255) else: wpcolor = (0,255,255) vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) # vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index if(self.tagLoc[0] != None): vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160)) #vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255)) #cv2.imshow(self.CAL_NAME, self.warpImg) cv2.imshow(self.PROC_NAME, self.rgbImg) #if cv2.waitKey(20) & 0xFF == ord('q'): # break # Use current perspective transform to remap image def remapImage(self): if(self.calstate == CalState.CALIBRATED): self.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE))) self.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1) self.warpImg = cv2.medianBlur(self.warpImg, 5) else: print 'Transform not calibrated' # Draws calibration markers on the camera image def drawCalMarkers(self): markedImg = self.camImg.copy() for pt in self.calpts: vu.drawSquareMarker(markedImg, pt[0], pt[1], 5, (255,0,255)) return markedImg # Finds a marker's central moment def findMarker(self, image, hueCenter, hueWidth, satMin, valMin): hsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) markerImg = cv2.inRange(hsvImg, np.array([hueCenter-hueWidth/2, satMin, valMin]), np.array([hueCenter+hueWidth/2, 255, 255])) cleanElement = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) markerImg = cv2.erode(markerImg, cleanElement) # Clean up marker image w/ erode-dilate-median markerImg = cv2.dilate(markerImg, cleanElement) markerImg = cv2.medianBlur(markerImg, 3) mMoments = cv2.moments(markerImg) # Compute moments m00 = mMoments['m00'] if(m00 > 0.1): return (mMoments['m10']/m00, mMoments['m01']/m00), markerImg return None, markerImg # FIR on centers and angles def filterPoints(self, ctr, theta): if((ctr != None) and (theta != None)): if(len(self.xHistory) == len(self.FIR_KERNEL)): self.xHistory.popleft() if(len(self.yHistory) == len(self.FIR_KERNEL)): self.yHistory.popleft() if(len(self.thetaHistory) == len(self.FIR_KERNEL)): self.thetaHistory.popleft() self.xHistory.append(ctr[0]) self.yHistory.append(ctr[1]) self.thetaHistory.append(theta) xFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.xHistory)),1) yFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.yHistory)),1) thetaFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.thetaHistory)),1) #print 'Filtered Phi:', phiFilter, ' Raw Theta:', theta return (xFilter, yFilter), thetaFilter # Interface to get current state estimates def getState(self): # Give estimated [x,y,theta] if(self.tagLoc != None): tx = self.tagLoc[0] ty = self.tagLoc[1] else: tx = None ty = None return [self.x_est, self.y_est, self.theta_est, tx, ty] ### Event Handlers ### # Camera input mouseclick handler def mouseClickHandler(self, event, x, y, flags, param): if event == cv2.EVENT_RBUTTONDOWN: print 'Recalibration requested' self.calstate = CalState.CAL_PROG self.calpts = [] # Reset calibration points if event == cv2.EVENT_LBUTTONDOWN: print 'Mouse left click event at ' + str(x) + ',' + str(y) if(self.calstate == CalState.UNCAL): self.calstate = CalState.CAL_PROG print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')' self.calpts.append([x,y]) elif(self.calstate == CalState.CAL_PROG): if(len(self.calpts) < 4): print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')' self.calpts.append([x,y]) # Finish if(len(self.calpts) == 4): print 'Calibrated' self.warp = cv2.getPerspectiveTransform(np.float32(self.calpts), self.worldpts) print str(self.calpts) self.calstate = CalState.CALIBRATED elif(self.calstate == CalState.CALIBRATED): print 'Already calibrated' # Color click handler for cal window def colorClickHandler(self, event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: print 'Checking marker 1 color at ', str(x), ',', str(y) pass # Get color at point if event == cv2.EVENT_RBUTTONDOWN: print 'Checking marker 2 color at ', str(x), ',', str(y) pass # Get color at point # Generic do-nothing slider handler (for ) def trackbarChangeHandler(self, x): pass # Gain slider handler def gainChanged(self, gain): uvc.set(self.camera, uvc.GAIN, gain) # Saturation slider handler def saturationChanged(self, sat): uvc.set(self.camera, uvc.SATURATION, sat) # Exposure slider handler def exposureChanged(self, exp): uvc.set(self.camera, uvc.EXPOSURE_ABS, exp) # Sets the waypoint list for rendering on overlay def setWaypoints(self, waypointEst): self.waypointEst = vu.toImageCoordinates(waypointEst) # Sets the estimated tag location for rendering on the overlay def setTagLocation(self, tagEst): self.tagLoc = (int(tagEst[0]),int(tagEst[1])) # Stops the vision process def stop(self): self.vidcap.release() cv2.release() cv2.destroyAllWindows() # Main function to run vision system as standalone def main(): print 'Args:' , str(sys.argv) for x in range(len(sys.argv)): if(sys.argv[x] == '-c'): ncam = int(sys.argv[x+1]) vs = VisionSystem(ncam) self.vidcap.release() cv2.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME) smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME) bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME) bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin) gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin)
random_line_split
vision.py
# Python 2.7 Doritobot Vision System # EECS 498 Purple Team, 2014 # Written by Cody Hyman (hymanc@umich.edu) # Written against OpenCV 3.0.0-alpha import sys import os import cv2 import numpy as np from uvcinterface import UVCInterface as uvc from visionUtil import VisionUtil as vu from collections import deque from math import * # Calibration state 'Enumeration' class CalState(object): UNCAL = 1 CAL_PROG = 2 CALIBRATED = 3 ### Vision System Class ### class VisionSystem(object): # Window names CAM_FEED_NAME = 'Camera Feed' CAL_NAME = 'Calibrated Image' PROC_NAME = 'Vision Processing' CTL_NAME = 'Filter Controls' # Constants G_CENTER = 52 R_CENTER = 0 SMIN = 50 VMIN = 80 #HISTORY_LENGTH = 15 EMPTY_KERNEL = [0, 0, 0, 0, 0, 0, 0] RAW_KERNEL = np.array([1, 2, 3, 6, 10, 18, 20], dtype = np.float32) FIR_KERNEL = np.multiply(RAW_KERNEL,1/np.linalg.norm(RAW_KERNEL,1)) # Normalized kernel def __init__(self, camera): ### Instance Value initialization ### self.camera = camera self.calstate = CalState.UNCAL self.calpts = [] self.XSIZE = 1000 self.YSIZE = 1000 self.x_est = -1 self.y_est = -1 self.theta_est = -1 # Drawing storage self.waypointEst = [(300,300)] # Waypoint estimates for UI self.tagLoc = (10,10) # Tag location estimate self.fVectorStart = (0,0) self.fVectorEnd = (0,0) #self.worldpts = np.float32([ # [0,self.YSIZE/2], # [0,0], # [self.XSIZE,0], # [self.XSIZE,self.YSIZE/2] # ]) # ===== ***** Calibration points from world *****===== # '''self.worldpts = np.float32([ [-5, -1. * -105], #22 [90, -1. * -100], #27 [90, -1. * 110], #26 [0, -1. * 107] #25 ])#*self.IMG_SCALE + self.IMG_OFFSET''' # Swap x-y coordinates (WTF!) '''self.worldpts = np.float32([ [-105,-5], #22 [-100, 90], #27 [110, 90], #26 [107, 0] #25 ])#*self.IMG_SCALE + self.IMG_OFFSET''' self.worldpts = np.float32([ [-104,-2], #22 [-104,85], #27 [115,84], #26 [115,3] #25 ]) self.worldpts = vu.toImageCoordinates(self.worldpts) testPts = vu.toWaypointCoordinates(self.worldpts) print 'TestWorldPts', str(testPts) # ===== *************** ===== # ### Camera initialization ### print 'Opening Camera ' + str(camera) self.vidcap = cv2.VideoCapture(camera)# Open up specified camera # Check if camera is opened and exit if not if self.vidcap.isOpened(): print 'Camera ' + str(camera) + ' opened successfully' else: print 'ERROR: Camera ' + str(camera) + ' not opened' return False # Set camera autoexposure uvc.set(self.camera, uvc.EXPOSURE_AUTO, 1) uvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0) ### Initialize UI elements ### # Filter Controls Window ctlWindow = cv2.namedWindow(self.CTL_NAME) cv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler) cv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler) cv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler) cv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler) cv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler) # Camera input window camWindow = cv2.namedWindow(self.CAM_FEED_NAME) cv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged) cv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged) cv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged) cv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration # Rectified/Calibrated Image window #calWindow = cv2.namedWindow(self.CAL_NAME) #cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler) # Image processing Window 2 procWindow = cv2.namedWindow(self.PROC_NAME) # History for filter bank self.xHistory = deque(self.EMPTY_KERNEL) self.yHistory = deque(self.EMPTY_KERNEL) self.thetaHistory = deque(self.EMPTY_KERNEL) # Run vision on a frame def processFrame(self): ### Main processing loop ### #while(True): frameRet, self.camImg = self.vidcap.read() #Img = self.drawCalMarkers() cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers()) if(self.calstate == CalState.CALIBRATED): self.remapImage() # Apply perspective warp bl = cv2.getTrackbarPos('Blue', self.CTL_NAME) gr = cv2.getTrackbarPos('Green', self.CTL_NAME) rd = cv2.getTrackbarPos('Red', self.CTL_NAME) bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME) gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME) rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME) smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME) bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME) bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin) gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin) rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin) #vu.printCentroids(gCentroid, rCentroid) if(bgroundFlag): self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg) else: self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg) ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid) if((ctr != None) and (theta != None)): fctr, ftheta = self.filterPoints(ctr, theta) self.x_est = ctr[0] self.y_est = ctr[1] # print 'Theta IN:', theta self.theta_est = theta#ftheta self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255)) if(gCentroid != None): vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255)) if(rCentroid != None): vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0)) if(bCentroid != None): vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0)) wpIndex = 0 for wp in self.waypointEst: wpIndex = wpIndex + 1 if(wpIndex == 1): wpcolor = (0,0,255) else: wpcolor = (0,255,255) vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) # vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index if(self.tagLoc[0] != None): vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160)) #vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255)) #cv2.imshow(self.CAL_NAME, self.warpImg) cv2.imshow(self.PROC_NAME, self.rgbImg) #if cv2.waitKey(20) & 0xFF == ord('q'): # break # Use current perspective transform to remap image def remapImage(self): if(self.calstate == CalState.CALIBRATED): self.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE))) self.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1) self.warpImg = cv2.medianBlur(self.warpImg, 5) else: print 'Transform not calibrated' # Draws calibration markers on the camera image def drawCalMarkers(self): markedImg = self.camImg.copy() for pt in self.calpts: vu.drawSquareMarker(markedImg, pt[0], pt[1], 5, (255,0,255)) return markedImg # Finds a marker's central moment def findMarker(self, image, hueCenter, hueWidth, satMin, valMin): hsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) markerImg = cv2.inRange(hsvImg, np.array([hueCenter-hueWidth/2, satMin, valMin]), np.array([hueCenter+hueWidth/2, 255, 255])) cleanElement = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) markerImg = cv2.erode(markerImg, cleanElement) # Clean up marker image w/ erode-dilate-median markerImg = cv2.dilate(markerImg, cleanElement) markerImg = cv2.medianBlur(markerImg, 3) mMoments = cv2.moments(markerImg) # Compute moments m00 = mMoments['m00'] if(m00 > 0.1): return (mMoments['m10']/m00, mMoments['m01']/m00), markerImg return None, markerImg # FIR on centers and angles def filterPoints(self, ctr, theta): if((ctr != None) and (theta != None)): if(len(self.xHistory) == len(self.FIR_KERNEL)): self.xHistory.popleft() if(len(self.yHistory) == len(self.FIR_KERNEL)): self.yHistory.popleft() if(len(self.thetaHistory) == len(self.FIR_KERNEL)): self.thetaHistory.popleft() self.xHistory.append(ctr[0]) self.yHistory.append(ctr[1]) self.thetaHistory.append(theta) xFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.xHistory)),1) yFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.yHistory)),1) thetaFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.thetaHistory)),1) #print 'Filtered Phi:', phiFilter, ' Raw Theta:', theta return (xFilter, yFilter), thetaFilter # Interface to get current state estimates def getState(self): # Give estimated [x,y,theta] if(self.tagLoc != None): tx = self.tagLoc[0] ty = self.tagLoc[1] else: tx = None ty = None return [self.x_est, self.y_est, self.theta_est, tx, ty] ### Event Handlers ### # Camera input mouseclick handler def mouseClickHandler(self, event, x, y, flags, param): if event == cv2.EVENT_RBUTTONDOWN: print 'Recalibration requested' self.calstate = CalState.CAL_PROG self.calpts = [] # Reset calibration points if event == cv2.EVENT_LBUTTONDOWN: print 'Mouse left click event at ' + str(x) + ',' + str(y) if(self.calstate == CalState.UNCAL): self.calstate = CalState.CAL_PROG print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')' self.calpts.append([x,y]) elif(self.calstate == CalState.CAL_PROG): if(len(self.calpts) < 4): print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')' self.calpts.append([x,y]) # Finish if(len(self.calpts) == 4): print 'Calibrated' self.warp = cv2.getPerspectiveTransform(np.float32(self.calpts), self.worldpts) print str(self.calpts) self.calstate = CalState.CALIBRATED elif(self.calstate == CalState.CALIBRATED): print 'Already calibrated' # Color click handler for cal window def colorClickHandler(self, event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: print 'Checking marker 1 color at ', str(x), ',', str(y) pass # Get color at point if event == cv2.EVENT_RBUTTONDOWN: print 'Checking marker 2 color at ', str(x), ',', str(y) pass # Get color at point # Generic do-nothing slider handler (for ) def
(self, x): pass # Gain slider handler def gainChanged(self, gain): uvc.set(self.camera, uvc.GAIN, gain) # Saturation slider handler def saturationChanged(self, sat): uvc.set(self.camera, uvc.SATURATION, sat) # Exposure slider handler def exposureChanged(self, exp): uvc.set(self.camera, uvc.EXPOSURE_ABS, exp) # Sets the waypoint list for rendering on overlay def setWaypoints(self, waypointEst): self.waypointEst = vu.toImageCoordinates(waypointEst) # Sets the estimated tag location for rendering on the overlay def setTagLocation(self, tagEst): self.tagLoc = (int(tagEst[0]),int(tagEst[1])) # Stops the vision process def stop(self): self.vidcap.release() cv2.release() cv2.destroyAllWindows() # Main function to run vision system as standalone def main(): print 'Args:' , str(sys.argv) for x in range(len(sys.argv)): if(sys.argv[x] == '-c'): ncam = int(sys.argv[x+1]) vs = VisionSystem(ncam) self.vidcap.release() cv2.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
trackbarChangeHandler
identifier_name
vision.py
# Python 2.7 Doritobot Vision System # EECS 498 Purple Team, 2014 # Written by Cody Hyman (hymanc@umich.edu) # Written against OpenCV 3.0.0-alpha import sys import os import cv2 import numpy as np from uvcinterface import UVCInterface as uvc from visionUtil import VisionUtil as vu from collections import deque from math import * # Calibration state 'Enumeration' class CalState(object): UNCAL = 1 CAL_PROG = 2 CALIBRATED = 3 ### Vision System Class ### class VisionSystem(object): # Window names CAM_FEED_NAME = 'Camera Feed' CAL_NAME = 'Calibrated Image' PROC_NAME = 'Vision Processing' CTL_NAME = 'Filter Controls' # Constants G_CENTER = 52 R_CENTER = 0 SMIN = 50 VMIN = 80 #HISTORY_LENGTH = 15 EMPTY_KERNEL = [0, 0, 0, 0, 0, 0, 0] RAW_KERNEL = np.array([1, 2, 3, 6, 10, 18, 20], dtype = np.float32) FIR_KERNEL = np.multiply(RAW_KERNEL,1/np.linalg.norm(RAW_KERNEL,1)) # Normalized kernel def __init__(self, camera): ### Instance Value initialization ### self.camera = camera self.calstate = CalState.UNCAL self.calpts = [] self.XSIZE = 1000 self.YSIZE = 1000 self.x_est = -1 self.y_est = -1 self.theta_est = -1 # Drawing storage self.waypointEst = [(300,300)] # Waypoint estimates for UI self.tagLoc = (10,10) # Tag location estimate self.fVectorStart = (0,0) self.fVectorEnd = (0,0) #self.worldpts = np.float32([ # [0,self.YSIZE/2], # [0,0], # [self.XSIZE,0], # [self.XSIZE,self.YSIZE/2] # ]) # ===== ***** Calibration points from world *****===== # '''self.worldpts = np.float32([ [-5, -1. * -105], #22 [90, -1. * -100], #27 [90, -1. * 110], #26 [0, -1. * 107] #25 ])#*self.IMG_SCALE + self.IMG_OFFSET''' # Swap x-y coordinates (WTF!) '''self.worldpts = np.float32([ [-105,-5], #22 [-100, 90], #27 [110, 90], #26 [107, 0] #25 ])#*self.IMG_SCALE + self.IMG_OFFSET''' self.worldpts = np.float32([ [-104,-2], #22 [-104,85], #27 [115,84], #26 [115,3] #25 ]) self.worldpts = vu.toImageCoordinates(self.worldpts) testPts = vu.toWaypointCoordinates(self.worldpts) print 'TestWorldPts', str(testPts) # ===== *************** ===== # ### Camera initialization ### print 'Opening Camera ' + str(camera) self.vidcap = cv2.VideoCapture(camera)# Open up specified camera # Check if camera is opened and exit if not if self.vidcap.isOpened(): print 'Camera ' + str(camera) + ' opened successfully' else: print 'ERROR: Camera ' + str(camera) + ' not opened' return False # Set camera autoexposure uvc.set(self.camera, uvc.EXPOSURE_AUTO, 1) uvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0) ### Initialize UI elements ### # Filter Controls Window ctlWindow = cv2.namedWindow(self.CTL_NAME) cv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler) cv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler) cv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler) cv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler) cv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler) # Camera input window camWindow = cv2.namedWindow(self.CAM_FEED_NAME) cv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged) cv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged) cv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged) cv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration # Rectified/Calibrated Image window #calWindow = cv2.namedWindow(self.CAL_NAME) #cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler) # Image processing Window 2 procWindow = cv2.namedWindow(self.PROC_NAME) # History for filter bank self.xHistory = deque(self.EMPTY_KERNEL) self.yHistory = deque(self.EMPTY_KERNEL) self.thetaHistory = deque(self.EMPTY_KERNEL) # Run vision on a frame def processFrame(self): ### Main processing loop ### #while(True): frameRet, self.camImg = self.vidcap.read() #Img = self.drawCalMarkers() cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers()) if(self.calstate == CalState.CALIBRATED): self.remapImage() # Apply perspective warp bl = cv2.getTrackbarPos('Blue', self.CTL_NAME) gr = cv2.getTrackbarPos('Green', self.CTL_NAME) rd = cv2.getTrackbarPos('Red', self.CTL_NAME) bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME) gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME) rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME) smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME) bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME) bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin) gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin) rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin) #vu.printCentroids(gCentroid, rCentroid) if(bgroundFlag): self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg) else: self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg) ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid) if((ctr != None) and (theta != None)): fctr, ftheta = self.filterPoints(ctr, theta) self.x_est = ctr[0] self.y_est = ctr[1] # print 'Theta IN:', theta self.theta_est = theta#ftheta self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255)) if(gCentroid != None): vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255)) if(rCentroid != None): vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0)) if(bCentroid != None): vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0)) wpIndex = 0 for wp in self.waypointEst: wpIndex = wpIndex + 1 if(wpIndex == 1): wpcolor = (0,0,255) else: wpcolor = (0,255,255) vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) # vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index if(self.tagLoc[0] != None): vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160)) #vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255)) #cv2.imshow(self.CAL_NAME, self.warpImg) cv2.imshow(self.PROC_NAME, self.rgbImg) #if cv2.waitKey(20) & 0xFF == ord('q'): # break # Use current perspective transform to remap image def remapImage(self): if(self.calstate == CalState.CALIBRATED): self.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE))) self.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1) self.warpImg = cv2.medianBlur(self.warpImg, 5) else: print 'Transform not calibrated' # Draws calibration markers on the camera image def drawCalMarkers(self): markedImg = self.camImg.copy() for pt in self.calpts:
return markedImg # Finds a marker's central moment def findMarker(self, image, hueCenter, hueWidth, satMin, valMin): hsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) markerImg = cv2.inRange(hsvImg, np.array([hueCenter-hueWidth/2, satMin, valMin]), np.array([hueCenter+hueWidth/2, 255, 255])) cleanElement = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) markerImg = cv2.erode(markerImg, cleanElement) # Clean up marker image w/ erode-dilate-median markerImg = cv2.dilate(markerImg, cleanElement) markerImg = cv2.medianBlur(markerImg, 3) mMoments = cv2.moments(markerImg) # Compute moments m00 = mMoments['m00'] if(m00 > 0.1): return (mMoments['m10']/m00, mMoments['m01']/m00), markerImg return None, markerImg # FIR on centers and angles def filterPoints(self, ctr, theta): if((ctr != None) and (theta != None)): if(len(self.xHistory) == len(self.FIR_KERNEL)): self.xHistory.popleft() if(len(self.yHistory) == len(self.FIR_KERNEL)): self.yHistory.popleft() if(len(self.thetaHistory) == len(self.FIR_KERNEL)): self.thetaHistory.popleft() self.xHistory.append(ctr[0]) self.yHistory.append(ctr[1]) self.thetaHistory.append(theta) xFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.xHistory)),1) yFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.yHistory)),1) thetaFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.thetaHistory)),1) #print 'Filtered Phi:', phiFilter, ' Raw Theta:', theta return (xFilter, yFilter), thetaFilter # Interface to get current state estimates def getState(self): # Give estimated [x,y,theta] if(self.tagLoc != None): tx = self.tagLoc[0] ty = self.tagLoc[1] else: tx = None ty = None return [self.x_est, self.y_est, self.theta_est, tx, ty] ### Event Handlers ### # Camera input mouseclick handler def mouseClickHandler(self, event, x, y, flags, param): if event == cv2.EVENT_RBUTTONDOWN: print 'Recalibration requested' self.calstate = CalState.CAL_PROG self.calpts = [] # Reset calibration points if event == cv2.EVENT_LBUTTONDOWN: print 'Mouse left click event at ' + str(x) + ',' + str(y) if(self.calstate == CalState.UNCAL): self.calstate = CalState.CAL_PROG print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')' self.calpts.append([x,y]) elif(self.calstate == CalState.CAL_PROG): if(len(self.calpts) < 4): print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')' self.calpts.append([x,y]) # Finish if(len(self.calpts) == 4): print 'Calibrated' self.warp = cv2.getPerspectiveTransform(np.float32(self.calpts), self.worldpts) print str(self.calpts) self.calstate = CalState.CALIBRATED elif(self.calstate == CalState.CALIBRATED): print 'Already calibrated' # Color click handler for cal window def colorClickHandler(self, event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: print 'Checking marker 1 color at ', str(x), ',', str(y) pass # Get color at point if event == cv2.EVENT_RBUTTONDOWN: print 'Checking marker 2 color at ', str(x), ',', str(y) pass # Get color at point # Generic do-nothing slider handler (for ) def trackbarChangeHandler(self, x): pass # Gain slider handler def gainChanged(self, gain): uvc.set(self.camera, uvc.GAIN, gain) # Saturation slider handler def saturationChanged(self, sat): uvc.set(self.camera, uvc.SATURATION, sat) # Exposure slider handler def exposureChanged(self, exp): uvc.set(self.camera, uvc.EXPOSURE_ABS, exp) # Sets the waypoint list for rendering on overlay def setWaypoints(self, waypointEst): self.waypointEst = vu.toImageCoordinates(waypointEst) # Sets the estimated tag location for rendering on the overlay def setTagLocation(self, tagEst): self.tagLoc = (int(tagEst[0]),int(tagEst[1])) # Stops the vision process def stop(self): self.vidcap.release() cv2.release() cv2.destroyAllWindows() # Main function to run vision system as standalone def main(): print 'Args:' , str(sys.argv) for x in range(len(sys.argv)): if(sys.argv[x] == '-c'): ncam = int(sys.argv[x+1]) vs = VisionSystem(ncam) self.vidcap.release() cv2.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
vu.drawSquareMarker(markedImg, pt[0], pt[1], 5, (255,0,255))
conditional_block
vision.py
# Python 2.7 Doritobot Vision System # EECS 498 Purple Team, 2014 # Written by Cody Hyman (hymanc@umich.edu) # Written against OpenCV 3.0.0-alpha import sys import os import cv2 import numpy as np from uvcinterface import UVCInterface as uvc from visionUtil import VisionUtil as vu from collections import deque from math import * # Calibration state 'Enumeration' class CalState(object): UNCAL = 1 CAL_PROG = 2 CALIBRATED = 3 ### Vision System Class ### class VisionSystem(object): # Window names CAM_FEED_NAME = 'Camera Feed' CAL_NAME = 'Calibrated Image' PROC_NAME = 'Vision Processing' CTL_NAME = 'Filter Controls' # Constants G_CENTER = 52 R_CENTER = 0 SMIN = 50 VMIN = 80 #HISTORY_LENGTH = 15 EMPTY_KERNEL = [0, 0, 0, 0, 0, 0, 0] RAW_KERNEL = np.array([1, 2, 3, 6, 10, 18, 20], dtype = np.float32) FIR_KERNEL = np.multiply(RAW_KERNEL,1/np.linalg.norm(RAW_KERNEL,1)) # Normalized kernel def __init__(self, camera): ### Instance Value initialization ###
# Run vision on a frame def processFrame(self): ### Main processing loop ### #while(True): frameRet, self.camImg = self.vidcap.read() #Img = self.drawCalMarkers() cv2.imshow(self.CAM_FEED_NAME, self.drawCalMarkers()) if(self.calstate == CalState.CALIBRATED): self.remapImage() # Apply perspective warp bl = cv2.getTrackbarPos('Blue', self.CTL_NAME) gr = cv2.getTrackbarPos('Green', self.CTL_NAME) rd = cv2.getTrackbarPos('Red', self.CTL_NAME) bvmin = cv2.getTrackbarPos('B Cutoff', self.CTL_NAME) gvmin = cv2.getTrackbarPos('G Cutoff', self.CTL_NAME) rvmin = cv2.getTrackbarPos('R Cutoff', self.CTL_NAME) smin = cv2.getTrackbarPos('Sat Cutoff', self.CTL_NAME) bgroundFlag = cv2.getTrackbarPos('Show Background', self.CTL_NAME) bCentroid, self.bTagImg = self.findMarker(self.warpImg, bl, 10, smin, bvmin) gCentroid, self.gTagImg = self.findMarker(self.warpImg, gr, 10, smin, gvmin) rCentroid, self.rTagImg = self.findMarker(self.warpImg, rd, 10, smin, rvmin) #vu.printCentroids(gCentroid, rCentroid) if(bgroundFlag): self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg, self.warpImg) else: self.rgbImg = vu.comboImage(self.bTagImg, self.gTagImg, self.rTagImg) ctr, theta, bCtr, gCtr, rCtr = vu.localizeRobot(bCentroid, gCentroid, rCentroid) if((ctr != None) and (theta != None)): fctr, ftheta = self.filterPoints(ctr, theta) self.x_est = ctr[0] self.y_est = ctr[1] # print 'Theta IN:', theta self.theta_est = theta#ftheta self.tagLoc = vu.computeTagLocation(ctr, bCtr) # Compute tag location vu.drawSquareMarker(self.rgbImg, int(fctr[0]), int(fctr[1]), 5, (255,0,255)) if(gCentroid != None): vu.drawSquareMarker(self.rgbImg, int(gCentroid[0]), int(gCentroid[1]), 5, (0,0,255)) if(rCentroid != None): vu.drawSquareMarker(self.rgbImg, int(rCentroid[0]), int(rCentroid[1]), 5, (255,0,0)) if(bCentroid != None): vu.drawSquareMarker(self.rgbImg, int(bCentroid[0]), int(bCentroid[1]), 5, (255,255,0)) wpIndex = 0 for wp in self.waypointEst: wpIndex = wpIndex + 1 if(wpIndex == 1): wpcolor = (0,0,255) else: wpcolor = (0,255,255) vu.drawFilledCircleMarker(self.rgbImg, wp[0], wp[1], 10, wpcolor) # vu.drawTextIndex(self.rgbImg, wp[0], wp[1], str(wpIndex)) # Draw waypoint index if(self.tagLoc[0] != None): vu.drawFilledCircleMarker(self.rgbImg, self.tagLoc[0], self.tagLoc[1], 5, (0,0,160)) #vu.drawVector(self.rgbImg, self.fVectorStart, self.fVectorEnd, (255,128,255)) #cv2.imshow(self.CAL_NAME, self.warpImg) cv2.imshow(self.PROC_NAME, self.rgbImg) #if cv2.waitKey(20) & 0xFF == ord('q'): # break # Use current perspective transform to remap image def remapImage(self): if(self.calstate == CalState.CALIBRATED): self.warpImg = cv2.warpPerspective(self.camImg, self.warp,(int(300*vu.IMG_SCALE),int(300*vu.IMG_SCALE))) self.warpImg = cv2.GaussianBlur(self.warpImg, (9,9), 1) self.warpImg = cv2.medianBlur(self.warpImg, 5) else: print 'Transform not calibrated' # Draws calibration markers on the camera image def drawCalMarkers(self): markedImg = self.camImg.copy() for pt in self.calpts: vu.drawSquareMarker(markedImg, pt[0], pt[1], 5, (255,0,255)) return markedImg # Finds a marker's central moment def findMarker(self, image, hueCenter, hueWidth, satMin, valMin): hsvImg = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) markerImg = cv2.inRange(hsvImg, np.array([hueCenter-hueWidth/2, satMin, valMin]), np.array([hueCenter+hueWidth/2, 255, 255])) cleanElement = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) markerImg = cv2.erode(markerImg, cleanElement) # Clean up marker image w/ erode-dilate-median markerImg = cv2.dilate(markerImg, cleanElement) markerImg = cv2.medianBlur(markerImg, 3) mMoments = cv2.moments(markerImg) # Compute moments m00 = mMoments['m00'] if(m00 > 0.1): return (mMoments['m10']/m00, mMoments['m01']/m00), markerImg return None, markerImg # FIR on centers and angles def filterPoints(self, ctr, theta): if((ctr != None) and (theta != None)): if(len(self.xHistory) == len(self.FIR_KERNEL)): self.xHistory.popleft() if(len(self.yHistory) == len(self.FIR_KERNEL)): self.yHistory.popleft() if(len(self.thetaHistory) == len(self.FIR_KERNEL)): self.thetaHistory.popleft() self.xHistory.append(ctr[0]) self.yHistory.append(ctr[1]) self.thetaHistory.append(theta) xFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.xHistory)),1) yFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.yHistory)),1) thetaFilter = np.linalg.norm(np.multiply(self.FIR_KERNEL, np.array(self.thetaHistory)),1) #print 'Filtered Phi:', phiFilter, ' Raw Theta:', theta return (xFilter, yFilter), thetaFilter # Interface to get current state estimates def getState(self): # Give estimated [x,y,theta] if(self.tagLoc != None): tx = self.tagLoc[0] ty = self.tagLoc[1] else: tx = None ty = None return [self.x_est, self.y_est, self.theta_est, tx, ty] ### Event Handlers ### # Camera input mouseclick handler def mouseClickHandler(self, event, x, y, flags, param): if event == cv2.EVENT_RBUTTONDOWN: print 'Recalibration requested' self.calstate = CalState.CAL_PROG self.calpts = [] # Reset calibration points if event == cv2.EVENT_LBUTTONDOWN: print 'Mouse left click event at ' + str(x) + ',' + str(y) if(self.calstate == CalState.UNCAL): self.calstate = CalState.CAL_PROG print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')' self.calpts.append([x,y]) elif(self.calstate == CalState.CAL_PROG): if(len(self.calpts) < 4): print 'Adding calibration point at (' + str(x) + ',' + str(y) + ')' self.calpts.append([x,y]) # Finish if(len(self.calpts) == 4): print 'Calibrated' self.warp = cv2.getPerspectiveTransform(np.float32(self.calpts), self.worldpts) print str(self.calpts) self.calstate = CalState.CALIBRATED elif(self.calstate == CalState.CALIBRATED): print 'Already calibrated' # Color click handler for cal window def colorClickHandler(self, event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: print 'Checking marker 1 color at ', str(x), ',', str(y) pass # Get color at point if event == cv2.EVENT_RBUTTONDOWN: print 'Checking marker 2 color at ', str(x), ',', str(y) pass # Get color at point # Generic do-nothing slider handler (for ) def trackbarChangeHandler(self, x): pass # Gain slider handler def gainChanged(self, gain): uvc.set(self.camera, uvc.GAIN, gain) # Saturation slider handler def saturationChanged(self, sat): uvc.set(self.camera, uvc.SATURATION, sat) # Exposure slider handler def exposureChanged(self, exp): uvc.set(self.camera, uvc.EXPOSURE_ABS, exp) # Sets the waypoint list for rendering on overlay def setWaypoints(self, waypointEst): self.waypointEst = vu.toImageCoordinates(waypointEst) # Sets the estimated tag location for rendering on the overlay def setTagLocation(self, tagEst): self.tagLoc = (int(tagEst[0]),int(tagEst[1])) # Stops the vision process def stop(self): self.vidcap.release() cv2.release() cv2.destroyAllWindows() # Main function to run vision system as standalone def main(): print 'Args:' , str(sys.argv) for x in range(len(sys.argv)): if(sys.argv[x] == '-c'): ncam = int(sys.argv[x+1]) vs = VisionSystem(ncam) self.vidcap.release() cv2.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
self.camera = camera self.calstate = CalState.UNCAL self.calpts = [] self.XSIZE = 1000 self.YSIZE = 1000 self.x_est = -1 self.y_est = -1 self.theta_est = -1 # Drawing storage self.waypointEst = [(300,300)] # Waypoint estimates for UI self.tagLoc = (10,10) # Tag location estimate self.fVectorStart = (0,0) self.fVectorEnd = (0,0) #self.worldpts = np.float32([ # [0,self.YSIZE/2], # [0,0], # [self.XSIZE,0], # [self.XSIZE,self.YSIZE/2] # ]) # ===== ***** Calibration points from world *****===== # '''self.worldpts = np.float32([ [-5, -1. * -105], #22 [90, -1. * -100], #27 [90, -1. * 110], #26 [0, -1. * 107] #25 ])#*self.IMG_SCALE + self.IMG_OFFSET''' # Swap x-y coordinates (WTF!) '''self.worldpts = np.float32([ [-105,-5], #22 [-100, 90], #27 [110, 90], #26 [107, 0] #25 ])#*self.IMG_SCALE + self.IMG_OFFSET''' self.worldpts = np.float32([ [-104,-2], #22 [-104,85], #27 [115,84], #26 [115,3] #25 ]) self.worldpts = vu.toImageCoordinates(self.worldpts) testPts = vu.toWaypointCoordinates(self.worldpts) print 'TestWorldPts', str(testPts) # ===== *************** ===== # ### Camera initialization ### print 'Opening Camera ' + str(camera) self.vidcap = cv2.VideoCapture(camera)# Open up specified camera # Check if camera is opened and exit if not if self.vidcap.isOpened(): print 'Camera ' + str(camera) + ' opened successfully' else: print 'ERROR: Camera ' + str(camera) + ' not opened' return False # Set camera autoexposure uvc.set(self.camera, uvc.EXPOSURE_AUTO, 1) uvc.set(self.camera, uvc.EXPOSURE_AUTO_PRIORITY, 0) ### Initialize UI elements ### # Filter Controls Window ctlWindow = cv2.namedWindow(self.CTL_NAME) cv2.createTrackbar('Blue', self.CTL_NAME, 88, 180, self.trackbarChangeHandler) cv2.createTrackbar('Green', self.CTL_NAME, 41, 180, self.trackbarChangeHandler) cv2.createTrackbar('Red', self.CTL_NAME, 172, 180, self.trackbarChangeHandler) cv2.createTrackbar('B Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('G Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('R Cutoff', self.CTL_NAME, 110, 255, self.trackbarChangeHandler) cv2.createTrackbar('Sat Cutoff', self.CTL_NAME, 100, 255, self.trackbarChangeHandler) cv2.createTrackbar('Show Background', self.CTL_NAME, 1, 1, self.trackbarChangeHandler) # Camera input window camWindow = cv2.namedWindow(self.CAM_FEED_NAME) cv2.createTrackbar('Gain', self.CAM_FEED_NAME, 128, 255, self.gainChanged) cv2.createTrackbar('Exposure', self.CAM_FEED_NAME, 1600, 2000, self.exposureChanged) cv2.createTrackbar('Saturation', self.CAM_FEED_NAME, 128, 255, self.saturationChanged) cv2.setMouseCallback(self.CAM_FEED_NAME, self.mouseClickHandler) # Set mouse callbacks for calibration # Rectified/Calibrated Image window #calWindow = cv2.namedWindow(self.CAL_NAME) #cv2.setMouseCallback(self.CAL_NAME, self.colorClickHandler) # Image processing Window 2 procWindow = cv2.namedWindow(self.PROC_NAME) # History for filter bank self.xHistory = deque(self.EMPTY_KERNEL) self.yHistory = deque(self.EMPTY_KERNEL) self.thetaHistory = deque(self.EMPTY_KERNEL)
identifier_body
theoretical_tools.py
import numpy as np import numba import scipy.special as sp_spec import scipy.integrate as sp_int from scipy.optimize import minimize, curve_fit import sys import matplotlib.pyplot as plt from scipy import signal from scipy.integrate import quad def pseq_params(params): Qe, Te, Ee = params['Qe'], params['Te'], params['Ee'] Qi, Ti, Ei = params['Qi'], params['Ti'], params['Ei'] Gl, Cm , El = params['Gl'], params['Cm'] , params['El'] for key, dval in zip(['Ntot', 'pconnec', 'gei'], [1, 2., 0.5]): if key in params.keys(): exec(key+' = params[key]') else: # default value exec(key+' = dval') if 'P' in params.keys(): P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 = params['P'] else: # no correction P0 = -45e-3 for i in range(1,11): exec('P'+str(i)+'= 0') return Qe,Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 # @numba.jit() def get_fluct_regime_varsup(Fe, Fi, XX,Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !! fi = Fi*gei*pconnec*Ntot muGe, muGi = Qe*Te*fe, Qi*Ti*fi muG = Gl+muGe+muGi muV = (muGe*Ee+muGi*Ei+Gl*El-XX)/muG muGn, Tm = muG/Gl, Cm/muG Ue, Ui = Qe/muG*(Ee-muV), Qi/muG*(Ei-muV) sV = np.sqrt(\ fe*(Ue*Te)**2/2./(Te+Tm)+\ fi*(Ti*Ui)**2/2./(Ti+Tm)) fe, fi = fe+1e-9, fi+1e-9 # just to insure a non zero division, Tv = ( fe*(Ue*Te)**2 + fi*(Ti*Ui)**2 ) /( fe*(Ue*Te)**2/(Te+Tm) + fi*(Ti*Ui)**2/(Ti+Tm) ) TvN = Tv*Gl/Cm return muV, sV+1e-12, muGn, TvN def mean_and_var_conductance(Fe, Fi, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !! fi = Fi*gei*pconnec*Ntot return Qe*Te*fe, Qi*Ti*fi, Qe*np.sqrt(Te*fe/2.), Qi*np.sqrt(Ti*fi/2.) ### FUNCTION, INVERSE FUNCTION # @numba.jit() def erfc_func(muV, sV, TvN, Vthre, Gl, Cm): return .5/TvN*Gl/Cm*(sp_spec.erfc((Vthre-muV)/np.sqrt(2)/sV)) # @numba.jit() def effective_Vthre(Y, muV, sV, TvN, Gl, Cm): Vthre_eff = muV+np.sqrt(2)*sV*sp_spec.erfcinv(\ Y*2.*TvN*Cm/Gl) # effective threshold return Vthre_eff # @numba.jit() def threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): """ setting by default to True the square because when use by external modules, coeff[5:]=np.zeros(3) in the case of a linear threshold """ muV0, DmuV0 = -60e-3,10e-3 sV0, DsV0 =4e-3, 6e-3 TvN0, DTvN0 = 0.5, 1. return P0+P1*(muV-muV0)/DmuV0+\ P2*(sV-sV0)/DsV0+P3*(TvN-TvN0)/DTvN0+\ 0*P4*np.log(muGn)+P5*((muV-muV0)/DmuV0)**2+\ P6*((sV-sV0)/DsV0)**2+P7*((TvN-TvN0)/DTvN0)**2+\ P8*(muV-muV0)/DmuV0*(sV-sV0)/DsV0+\ P9*(muV-muV0)/DmuV0*(TvN-TvN0)/DTvN0+\ P10*(sV-sV0)/DsV0*(TvN-TvN0)/DTvN0 # final transfer function template : # @numba.jit() def TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input if(hasattr(fe, "__len__")): fe[fe<1e-8]=1e-8 else: if(fe<1e-8): fe=1e-8 if(hasattr(fi, "__len__")): fi[fi<1e-8]=1e-8 else: if(fi<1e-8): fi=1e-8 muV, sV, muGn, TvN = get_fluct_regime_varsup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) Vthre = threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) if(hasattr(muV, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__"))
else: if(sV<1e-4): sV=1e-4 Fout_th = erfc_func(muV, sV, TvN, Vthre, Gl, Cm) if(hasattr(Fout_th, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) Fout_th[Fout_th<1e-8]=1e-8 else: if(Fout_th<1e-8): Fout_th=1e-8 ''' if(El<-0.063): if(hasattr(Fout_th, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) Fout_th[Fout_th>80.]=175 else: if(Fout_th>80.): print("Done") Fout_th=175 ''' #print 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',fe,fi,muV, sV, TvN,Fout_th return Fout_th def gaussian(x, mu, sig): return (1/(sig*np.sqrt(2*3.1415)))*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) def TF_my_templateup_heterogeneity(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input def Phet(k): locale=gaussian(k,1.,0.2)*TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El*k, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) return locale outhet, err = quad(Phet, 0.1, 5) return outhet # @numba.jit() def make_loop(t, nu, vm, nu_aff_exc, nu_aff_inh, BIN,\ Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): dt = t[1]-t[0] # constructing the Euler method for the activity rate for i_t in range(len(t)-1): # loop over time fe = (nu_aff_exc[i_t]+nu[i_t]+Fdrive) # afferent+recurrent excitation fi = nu[i_t]+nu_aff_inh[i_t] # recurrent inhibition W[i_t+1] = W[i_t] + dt/Tw*(b*nu[i_t]*Tw - W[i_t]) nu[i_t+1] = nu[i_t] +\ dt/BIN*(\ TF_my_template(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)\ -nu[i_t]) vm[i_t], _, _, _ = get_fluct_regime_vars(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) return nu, vm, W ################################################################ ##### Now fitting to Transfer Function data ################################################################ def fitting_Vthre_then_Fout(Fout, Fe_eff, fiSim,w, params,\ maxiter=50000, xtol=1e-5, with_square_terms=False): Gl, Cm , El = params['Gl'], params['Cm'] , params['El'] Fout, Fe_eff, fiSim,w = Fout.flatten(), Fe_eff.flatten(), fiSim.flatten(), w.flatten() #print 'Eccolo', Fout[8],w[8],len(Fout) muV, sV, muGn, TvN = get_fluct_regime_varsup(Fe_eff, fiSim,w, *pseq_params(params)) i_non_zeros = np.where((Fout>0.)&(Fout<60.)) Vthre_eff = effective_Vthre(Fout[i_non_zeros], muV[i_non_zeros],\ sV[i_non_zeros], TvN[i_non_zeros], params['Gl'], params['Cm']) if with_square_terms: P = np.zeros(11) else: P = np.zeros(5) P[:5] = Vthre_eff.mean(), 1e-3, 1e-3, 1e-3, 1e-3 def Res(p): if not with_square_terms: pp = np.concatenate([p, np.zeros(6)]) else: pp=p vthre = threshold_func(muV[i_non_zeros], sV[i_non_zeros],\ TvN[i_non_zeros], muGn[i_non_zeros], *pp) return np.mean((Vthre_eff-vthre)**2) #xtol=1e-19 #plsq = minimize(Res, P, method='nelder-mead',options={'xtol': xtol, 'disp': True, 'maxiter':maxiter}) plsq = minimize(Res, P, method='SLSQP',options={'ftol': 1e-15, 'disp': True, 'maxiter':40000}) #print plsq P = plsq.x def Res(p): if not with_square_terms: params['P'] = np.concatenate([p, np.zeros(6)]) else: params['P'] = p return np.mean((Fout-\ TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params)))**2) plsq = minimize(Res, P, method='nelder-mead',\ options={'xtol': xtol, 'disp': True, 'maxiter':maxiter}) params['P'] = P diff=(TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params))-Fout).mean() diff_M=(TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params))-Fout).max() print("rrrrr",diff,diff_M) plt.plot(fiSim,Fout,'rd',fiSim,TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params)),'bs') #plt.plot(fiSim,Fe-eff,Fout,'rd') plt.show() thrplot=threshold_func(muV, sV,TvN, muGn, *(plsq.x)) #np.save('FScell_Voltage.npy',[muV, sV,TvN,Fout]) plt.plot(muV,Fout,'rd',muV,erfc_func(muV, sV, TvN, thrplot, Gl, Cm),'bd') plt.show() plt.plot(muV,erfc_func(muV, 4e-3, 0.5, thrplot, Gl, Cm),'bd') plt.show() if with_square_terms: #return plsq.x return P else: return np.concatenate([plsq.x, np.zeros(6)]) def make_fit_from_data(DATA, with_square_terms=False): MEANfreq, SDfreq, Fe_eff, fiSim, params,w = np.load(DATA) Fe_eff, Fout = np.array(Fe_eff), np.array(MEANfreq) levels = fiSim # to store for colors fiSim = np.meshgrid(np.zeros(Fe_eff.shape[1]), fiSim)[1] P = fitting_Vthre_then_Fout(Fout, Fe_eff, fiSim,w, params,\ with_square_terms=with_square_terms) print("ffffff",P) #plt.plot(Fe_eff[2,:],MEANfreq[2,:],"bs",fiSim[:,5],MEANfreq[:,5],"o") plt.plot(Fe_eff[2,:],MEANfreq[2,:],"bs",fiSim[:,5],MEANfreq[:,5],"o") #np.save('FScell_freq.npy',[Fe_eff[2,:],fiSim[:,5],MEANfreq[:,5]]) plt.show() print '==================================================' print 1e3*np.array(P), 'mV' # then we save it: filename = DATA.replace('.npy', '_fit.npy') print 'coefficients saved in ', filename np.save(filename, np.array(P)) return P import argparse if __name__=='__main__': # First a nice documentation parser=argparse.ArgumentParser(description= """ '==================================================' '=====> FIT of the transfer function ==============' '=== and theoretical objects for the TF relation ==' '==================================================' """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-f', "--FILE",help="file name of numerical TF data",\ default='data/example_data.npy') parser.add_argument("--With_Square",help="Add the square terms in the TF formula"+\ "\n then we have 7 parameters",\ action="store_true") args = parser.parse_args() make_fit_from_data(args.FILE, with_square_terms=args.With_Square)
sV[sV<1e-4]=1e-4
conditional_block
theoretical_tools.py
import numpy as np import numba import scipy.special as sp_spec import scipy.integrate as sp_int from scipy.optimize import minimize, curve_fit import sys import matplotlib.pyplot as plt from scipy import signal from scipy.integrate import quad def pseq_params(params): Qe, Te, Ee = params['Qe'], params['Te'], params['Ee'] Qi, Ti, Ei = params['Qi'], params['Ti'], params['Ei'] Gl, Cm , El = params['Gl'], params['Cm'] , params['El'] for key, dval in zip(['Ntot', 'pconnec', 'gei'], [1, 2., 0.5]): if key in params.keys(): exec(key+' = params[key]') else: # default value exec(key+' = dval') if 'P' in params.keys(): P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 = params['P'] else: # no correction P0 = -45e-3 for i in range(1,11): exec('P'+str(i)+'= 0') return Qe,Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 # @numba.jit() def get_fluct_regime_varsup(Fe, Fi, XX,Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !! fi = Fi*gei*pconnec*Ntot muGe, muGi = Qe*Te*fe, Qi*Ti*fi muG = Gl+muGe+muGi muV = (muGe*Ee+muGi*Ei+Gl*El-XX)/muG muGn, Tm = muG/Gl, Cm/muG Ue, Ui = Qe/muG*(Ee-muV), Qi/muG*(Ei-muV) sV = np.sqrt(\ fe*(Ue*Te)**2/2./(Te+Tm)+\ fi*(Ti*Ui)**2/2./(Ti+Tm)) fe, fi = fe+1e-9, fi+1e-9 # just to insure a non zero division, Tv = ( fe*(Ue*Te)**2 + fi*(Ti*Ui)**2 ) /( fe*(Ue*Te)**2/(Te+Tm) + fi*(Ti*Ui)**2/(Ti+Tm) ) TvN = Tv*Gl/Cm return muV, sV+1e-12, muGn, TvN def mean_and_var_conductance(Fe, Fi, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !! fi = Fi*gei*pconnec*Ntot return Qe*Te*fe, Qi*Ti*fi, Qe*np.sqrt(Te*fe/2.), Qi*np.sqrt(Ti*fi/2.) ### FUNCTION, INVERSE FUNCTION # @numba.jit() def erfc_func(muV, sV, TvN, Vthre, Gl, Cm): return .5/TvN*Gl/Cm*(sp_spec.erfc((Vthre-muV)/np.sqrt(2)/sV)) # @numba.jit() def effective_Vthre(Y, muV, sV, TvN, Gl, Cm): Vthre_eff = muV+np.sqrt(2)*sV*sp_spec.erfcinv(\ Y*2.*TvN*Cm/Gl) # effective threshold return Vthre_eff # @numba.jit() def threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): """ setting by default to True the square because when use by external modules, coeff[5:]=np.zeros(3) in the case of a linear threshold """ muV0, DmuV0 = -60e-3,10e-3 sV0, DsV0 =4e-3, 6e-3 TvN0, DTvN0 = 0.5, 1. return P0+P1*(muV-muV0)/DmuV0+\ P2*(sV-sV0)/DsV0+P3*(TvN-TvN0)/DTvN0+\ 0*P4*np.log(muGn)+P5*((muV-muV0)/DmuV0)**2+\ P6*((sV-sV0)/DsV0)**2+P7*((TvN-TvN0)/DTvN0)**2+\ P8*(muV-muV0)/DmuV0*(sV-sV0)/DsV0+\ P9*(muV-muV0)/DmuV0*(TvN-TvN0)/DTvN0+\ P10*(sV-sV0)/DsV0*(TvN-TvN0)/DTvN0 # final transfer function template : # @numba.jit() def TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input if(hasattr(fe, "__len__")): fe[fe<1e-8]=1e-8 else: if(fe<1e-8): fe=1e-8 if(hasattr(fi, "__len__")): fi[fi<1e-8]=1e-8 else: if(fi<1e-8): fi=1e-8 muV, sV, muGn, TvN = get_fluct_regime_varsup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) Vthre = threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) if(hasattr(muV, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) sV[sV<1e-4]=1e-4 else: if(sV<1e-4): sV=1e-4 Fout_th = erfc_func(muV, sV, TvN, Vthre, Gl, Cm) if(hasattr(Fout_th, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) Fout_th[Fout_th<1e-8]=1e-8 else: if(Fout_th<1e-8): Fout_th=1e-8 ''' if(El<-0.063): if(hasattr(Fout_th, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) Fout_th[Fout_th>80.]=175 else: if(Fout_th>80.): print("Done") Fout_th=175 ''' #print 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',fe,fi,muV, sV, TvN,Fout_th return Fout_th def gaussian(x, mu, sig): return (1/(sig*np.sqrt(2*3.1415)))*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) def TF_my_templateup_heterogeneity(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input def Phet(k):
outhet, err = quad(Phet, 0.1, 5) return outhet # @numba.jit() def make_loop(t, nu, vm, nu_aff_exc, nu_aff_inh, BIN,\ Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): dt = t[1]-t[0] # constructing the Euler method for the activity rate for i_t in range(len(t)-1): # loop over time fe = (nu_aff_exc[i_t]+nu[i_t]+Fdrive) # afferent+recurrent excitation fi = nu[i_t]+nu_aff_inh[i_t] # recurrent inhibition W[i_t+1] = W[i_t] + dt/Tw*(b*nu[i_t]*Tw - W[i_t]) nu[i_t+1] = nu[i_t] +\ dt/BIN*(\ TF_my_template(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)\ -nu[i_t]) vm[i_t], _, _, _ = get_fluct_regime_vars(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) return nu, vm, W ################################################################ ##### Now fitting to Transfer Function data ################################################################ def fitting_Vthre_then_Fout(Fout, Fe_eff, fiSim,w, params,\ maxiter=50000, xtol=1e-5, with_square_terms=False): Gl, Cm , El = params['Gl'], params['Cm'] , params['El'] Fout, Fe_eff, fiSim,w = Fout.flatten(), Fe_eff.flatten(), fiSim.flatten(), w.flatten() #print 'Eccolo', Fout[8],w[8],len(Fout) muV, sV, muGn, TvN = get_fluct_regime_varsup(Fe_eff, fiSim,w, *pseq_params(params)) i_non_zeros = np.where((Fout>0.)&(Fout<60.)) Vthre_eff = effective_Vthre(Fout[i_non_zeros], muV[i_non_zeros],\ sV[i_non_zeros], TvN[i_non_zeros], params['Gl'], params['Cm']) if with_square_terms: P = np.zeros(11) else: P = np.zeros(5) P[:5] = Vthre_eff.mean(), 1e-3, 1e-3, 1e-3, 1e-3 def Res(p): if not with_square_terms: pp = np.concatenate([p, np.zeros(6)]) else: pp=p vthre = threshold_func(muV[i_non_zeros], sV[i_non_zeros],\ TvN[i_non_zeros], muGn[i_non_zeros], *pp) return np.mean((Vthre_eff-vthre)**2) #xtol=1e-19 #plsq = minimize(Res, P, method='nelder-mead',options={'xtol': xtol, 'disp': True, 'maxiter':maxiter}) plsq = minimize(Res, P, method='SLSQP',options={'ftol': 1e-15, 'disp': True, 'maxiter':40000}) #print plsq P = plsq.x def Res(p): if not with_square_terms: params['P'] = np.concatenate([p, np.zeros(6)]) else: params['P'] = p return np.mean((Fout-\ TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params)))**2) plsq = minimize(Res, P, method='nelder-mead',\ options={'xtol': xtol, 'disp': True, 'maxiter':maxiter}) params['P'] = P diff=(TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params))-Fout).mean() diff_M=(TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params))-Fout).max() print("rrrrr",diff,diff_M) plt.plot(fiSim,Fout,'rd',fiSim,TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params)),'bs') #plt.plot(fiSim,Fe-eff,Fout,'rd') plt.show() thrplot=threshold_func(muV, sV,TvN, muGn, *(plsq.x)) #np.save('FScell_Voltage.npy',[muV, sV,TvN,Fout]) plt.plot(muV,Fout,'rd',muV,erfc_func(muV, sV, TvN, thrplot, Gl, Cm),'bd') plt.show() plt.plot(muV,erfc_func(muV, 4e-3, 0.5, thrplot, Gl, Cm),'bd') plt.show() if with_square_terms: #return plsq.x return P else: return np.concatenate([plsq.x, np.zeros(6)]) def make_fit_from_data(DATA, with_square_terms=False): MEANfreq, SDfreq, Fe_eff, fiSim, params,w = np.load(DATA) Fe_eff, Fout = np.array(Fe_eff), np.array(MEANfreq) levels = fiSim # to store for colors fiSim = np.meshgrid(np.zeros(Fe_eff.shape[1]), fiSim)[1] P = fitting_Vthre_then_Fout(Fout, Fe_eff, fiSim,w, params,\ with_square_terms=with_square_terms) print("ffffff",P) #plt.plot(Fe_eff[2,:],MEANfreq[2,:],"bs",fiSim[:,5],MEANfreq[:,5],"o") plt.plot(Fe_eff[2,:],MEANfreq[2,:],"bs",fiSim[:,5],MEANfreq[:,5],"o") #np.save('FScell_freq.npy',[Fe_eff[2,:],fiSim[:,5],MEANfreq[:,5]]) plt.show() print '==================================================' print 1e3*np.array(P), 'mV' # then we save it: filename = DATA.replace('.npy', '_fit.npy') print 'coefficients saved in ', filename np.save(filename, np.array(P)) return P import argparse if __name__=='__main__': # First a nice documentation parser=argparse.ArgumentParser(description= """ '==================================================' '=====> FIT of the transfer function ==============' '=== and theoretical objects for the TF relation ==' '==================================================' """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-f', "--FILE",help="file name of numerical TF data",\ default='data/example_data.npy') parser.add_argument("--With_Square",help="Add the square terms in the TF formula"+\ "\n then we have 7 parameters",\ action="store_true") args = parser.parse_args() make_fit_from_data(args.FILE, with_square_terms=args.With_Square)
locale=gaussian(k,1.,0.2)*TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El*k, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) return locale
identifier_body
theoretical_tools.py
import numpy as np import numba import scipy.special as sp_spec import scipy.integrate as sp_int from scipy.optimize import minimize, curve_fit import sys import matplotlib.pyplot as plt from scipy import signal from scipy.integrate import quad def pseq_params(params): Qe, Te, Ee = params['Qe'], params['Te'], params['Ee'] Qi, Ti, Ei = params['Qi'], params['Ti'], params['Ei'] Gl, Cm , El = params['Gl'], params['Cm'] , params['El'] for key, dval in zip(['Ntot', 'pconnec', 'gei'], [1, 2., 0.5]): if key in params.keys(): exec(key+' = params[key]') else: # default value exec(key+' = dval') if 'P' in params.keys(): P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 = params['P'] else: # no correction P0 = -45e-3 for i in range(1,11): exec('P'+str(i)+'= 0') return Qe,Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 # @numba.jit() def get_fluct_regime_varsup(Fe, Fi, XX,Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !! fi = Fi*gei*pconnec*Ntot muGe, muGi = Qe*Te*fe, Qi*Ti*fi muG = Gl+muGe+muGi muV = (muGe*Ee+muGi*Ei+Gl*El-XX)/muG muGn, Tm = muG/Gl, Cm/muG Ue, Ui = Qe/muG*(Ee-muV), Qi/muG*(Ei-muV) sV = np.sqrt(\ fe*(Ue*Te)**2/2./(Te+Tm)+\ fi*(Ti*Ui)**2/2./(Ti+Tm)) fe, fi = fe+1e-9, fi+1e-9 # just to insure a non zero division, Tv = ( fe*(Ue*Te)**2 + fi*(Ti*Ui)**2 ) /( fe*(Ue*Te)**2/(Te+Tm) + fi*(Ti*Ui)**2/(Ti+Tm) ) TvN = Tv*Gl/Cm
return muV, sV+1e-12, muGn, TvN def mean_and_var_conductance(Fe, Fi, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !! fi = Fi*gei*pconnec*Ntot return Qe*Te*fe, Qi*Ti*fi, Qe*np.sqrt(Te*fe/2.), Qi*np.sqrt(Ti*fi/2.) ### FUNCTION, INVERSE FUNCTION # @numba.jit() def erfc_func(muV, sV, TvN, Vthre, Gl, Cm): return .5/TvN*Gl/Cm*(sp_spec.erfc((Vthre-muV)/np.sqrt(2)/sV)) # @numba.jit() def effective_Vthre(Y, muV, sV, TvN, Gl, Cm): Vthre_eff = muV+np.sqrt(2)*sV*sp_spec.erfcinv(\ Y*2.*TvN*Cm/Gl) # effective threshold return Vthre_eff # @numba.jit() def threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): """ setting by default to True the square because when use by external modules, coeff[5:]=np.zeros(3) in the case of a linear threshold """ muV0, DmuV0 = -60e-3,10e-3 sV0, DsV0 =4e-3, 6e-3 TvN0, DTvN0 = 0.5, 1. return P0+P1*(muV-muV0)/DmuV0+\ P2*(sV-sV0)/DsV0+P3*(TvN-TvN0)/DTvN0+\ 0*P4*np.log(muGn)+P5*((muV-muV0)/DmuV0)**2+\ P6*((sV-sV0)/DsV0)**2+P7*((TvN-TvN0)/DTvN0)**2+\ P8*(muV-muV0)/DmuV0*(sV-sV0)/DsV0+\ P9*(muV-muV0)/DmuV0*(TvN-TvN0)/DTvN0+\ P10*(sV-sV0)/DsV0*(TvN-TvN0)/DTvN0 # final transfer function template : # @numba.jit() def TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input if(hasattr(fe, "__len__")): fe[fe<1e-8]=1e-8 else: if(fe<1e-8): fe=1e-8 if(hasattr(fi, "__len__")): fi[fi<1e-8]=1e-8 else: if(fi<1e-8): fi=1e-8 muV, sV, muGn, TvN = get_fluct_regime_varsup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) Vthre = threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) if(hasattr(muV, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) sV[sV<1e-4]=1e-4 else: if(sV<1e-4): sV=1e-4 Fout_th = erfc_func(muV, sV, TvN, Vthre, Gl, Cm) if(hasattr(Fout_th, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) Fout_th[Fout_th<1e-8]=1e-8 else: if(Fout_th<1e-8): Fout_th=1e-8 ''' if(El<-0.063): if(hasattr(Fout_th, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) Fout_th[Fout_th>80.]=175 else: if(Fout_th>80.): print("Done") Fout_th=175 ''' #print 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',fe,fi,muV, sV, TvN,Fout_th return Fout_th def gaussian(x, mu, sig): return (1/(sig*np.sqrt(2*3.1415)))*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) def TF_my_templateup_heterogeneity(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input def Phet(k): locale=gaussian(k,1.,0.2)*TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El*k, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) return locale outhet, err = quad(Phet, 0.1, 5) return outhet # @numba.jit() def make_loop(t, nu, vm, nu_aff_exc, nu_aff_inh, BIN,\ Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): dt = t[1]-t[0] # constructing the Euler method for the activity rate for i_t in range(len(t)-1): # loop over time fe = (nu_aff_exc[i_t]+nu[i_t]+Fdrive) # afferent+recurrent excitation fi = nu[i_t]+nu_aff_inh[i_t] # recurrent inhibition W[i_t+1] = W[i_t] + dt/Tw*(b*nu[i_t]*Tw - W[i_t]) nu[i_t+1] = nu[i_t] +\ dt/BIN*(\ TF_my_template(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)\ -nu[i_t]) vm[i_t], _, _, _ = get_fluct_regime_vars(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) return nu, vm, W ################################################################ ##### Now fitting to Transfer Function data ################################################################ def fitting_Vthre_then_Fout(Fout, Fe_eff, fiSim,w, params,\ maxiter=50000, xtol=1e-5, with_square_terms=False): Gl, Cm , El = params['Gl'], params['Cm'] , params['El'] Fout, Fe_eff, fiSim,w = Fout.flatten(), Fe_eff.flatten(), fiSim.flatten(), w.flatten() #print 'Eccolo', Fout[8],w[8],len(Fout) muV, sV, muGn, TvN = get_fluct_regime_varsup(Fe_eff, fiSim,w, *pseq_params(params)) i_non_zeros = np.where((Fout>0.)&(Fout<60.)) Vthre_eff = effective_Vthre(Fout[i_non_zeros], muV[i_non_zeros],\ sV[i_non_zeros], TvN[i_non_zeros], params['Gl'], params['Cm']) if with_square_terms: P = np.zeros(11) else: P = np.zeros(5) P[:5] = Vthre_eff.mean(), 1e-3, 1e-3, 1e-3, 1e-3 def Res(p): if not with_square_terms: pp = np.concatenate([p, np.zeros(6)]) else: pp=p vthre = threshold_func(muV[i_non_zeros], sV[i_non_zeros],\ TvN[i_non_zeros], muGn[i_non_zeros], *pp) return np.mean((Vthre_eff-vthre)**2) #xtol=1e-19 #plsq = minimize(Res, P, method='nelder-mead',options={'xtol': xtol, 'disp': True, 'maxiter':maxiter}) plsq = minimize(Res, P, method='SLSQP',options={'ftol': 1e-15, 'disp': True, 'maxiter':40000}) #print plsq P = plsq.x def Res(p): if not with_square_terms: params['P'] = np.concatenate([p, np.zeros(6)]) else: params['P'] = p return np.mean((Fout-\ TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params)))**2) plsq = minimize(Res, P, method='nelder-mead',\ options={'xtol': xtol, 'disp': True, 'maxiter':maxiter}) params['P'] = P diff=(TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params))-Fout).mean() diff_M=(TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params))-Fout).max() print("rrrrr",diff,diff_M) plt.plot(fiSim,Fout,'rd',fiSim,TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params)),'bs') #plt.plot(fiSim,Fe-eff,Fout,'rd') plt.show() thrplot=threshold_func(muV, sV,TvN, muGn, *(plsq.x)) #np.save('FScell_Voltage.npy',[muV, sV,TvN,Fout]) plt.plot(muV,Fout,'rd',muV,erfc_func(muV, sV, TvN, thrplot, Gl, Cm),'bd') plt.show() plt.plot(muV,erfc_func(muV, 4e-3, 0.5, thrplot, Gl, Cm),'bd') plt.show() if with_square_terms: #return plsq.x return P else: return np.concatenate([plsq.x, np.zeros(6)]) def make_fit_from_data(DATA, with_square_terms=False): MEANfreq, SDfreq, Fe_eff, fiSim, params,w = np.load(DATA) Fe_eff, Fout = np.array(Fe_eff), np.array(MEANfreq) levels = fiSim # to store for colors fiSim = np.meshgrid(np.zeros(Fe_eff.shape[1]), fiSim)[1] P = fitting_Vthre_then_Fout(Fout, Fe_eff, fiSim,w, params,\ with_square_terms=with_square_terms) print("ffffff",P) #plt.plot(Fe_eff[2,:],MEANfreq[2,:],"bs",fiSim[:,5],MEANfreq[:,5],"o") plt.plot(Fe_eff[2,:],MEANfreq[2,:],"bs",fiSim[:,5],MEANfreq[:,5],"o") #np.save('FScell_freq.npy',[Fe_eff[2,:],fiSim[:,5],MEANfreq[:,5]]) plt.show() print '==================================================' print 1e3*np.array(P), 'mV' # then we save it: filename = DATA.replace('.npy', '_fit.npy') print 'coefficients saved in ', filename np.save(filename, np.array(P)) return P import argparse if __name__=='__main__': # First a nice documentation parser=argparse.ArgumentParser(description= """ '==================================================' '=====> FIT of the transfer function ==============' '=== and theoretical objects for the TF relation ==' '==================================================' """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-f', "--FILE",help="file name of numerical TF data",\ default='data/example_data.npy') parser.add_argument("--With_Square",help="Add the square terms in the TF formula"+\ "\n then we have 7 parameters",\ action="store_true") args = parser.parse_args() make_fit_from_data(args.FILE, with_square_terms=args.With_Square)
random_line_split
theoretical_tools.py
import numpy as np import numba import scipy.special as sp_spec import scipy.integrate as sp_int from scipy.optimize import minimize, curve_fit import sys import matplotlib.pyplot as plt from scipy import signal from scipy.integrate import quad def pseq_params(params): Qe, Te, Ee = params['Qe'], params['Te'], params['Ee'] Qi, Ti, Ei = params['Qi'], params['Ti'], params['Ei'] Gl, Cm , El = params['Gl'], params['Cm'] , params['El'] for key, dval in zip(['Ntot', 'pconnec', 'gei'], [1, 2., 0.5]): if key in params.keys(): exec(key+' = params[key]') else: # default value exec(key+' = dval') if 'P' in params.keys(): P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 = params['P'] else: # no correction P0 = -45e-3 for i in range(1,11): exec('P'+str(i)+'= 0') return Qe,Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 # @numba.jit() def get_fluct_regime_varsup(Fe, Fi, XX,Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !! fi = Fi*gei*pconnec*Ntot muGe, muGi = Qe*Te*fe, Qi*Ti*fi muG = Gl+muGe+muGi muV = (muGe*Ee+muGi*Ei+Gl*El-XX)/muG muGn, Tm = muG/Gl, Cm/muG Ue, Ui = Qe/muG*(Ee-muV), Qi/muG*(Ei-muV) sV = np.sqrt(\ fe*(Ue*Te)**2/2./(Te+Tm)+\ fi*(Ti*Ui)**2/2./(Ti+Tm)) fe, fi = fe+1e-9, fi+1e-9 # just to insure a non zero division, Tv = ( fe*(Ue*Te)**2 + fi*(Ti*Ui)**2 ) /( fe*(Ue*Te)**2/(Te+Tm) + fi*(Ti*Ui)**2/(Ti+Tm) ) TvN = Tv*Gl/Cm return muV, sV+1e-12, muGn, TvN def mean_and_var_conductance(Fe, Fi, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input fe = Fe*(1.-gei)*pconnec*Ntot # default is 1 !! fi = Fi*gei*pconnec*Ntot return Qe*Te*fe, Qi*Ti*fi, Qe*np.sqrt(Te*fe/2.), Qi*np.sqrt(Ti*fi/2.) ### FUNCTION, INVERSE FUNCTION # @numba.jit() def erfc_func(muV, sV, TvN, Vthre, Gl, Cm): return .5/TvN*Gl/Cm*(sp_spec.erfc((Vthre-muV)/np.sqrt(2)/sV)) # @numba.jit() def effective_Vthre(Y, muV, sV, TvN, Gl, Cm): Vthre_eff = muV+np.sqrt(2)*sV*sp_spec.erfcinv(\ Y*2.*TvN*Cm/Gl) # effective threshold return Vthre_eff # @numba.jit() def
(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): """ setting by default to True the square because when use by external modules, coeff[5:]=np.zeros(3) in the case of a linear threshold """ muV0, DmuV0 = -60e-3,10e-3 sV0, DsV0 =4e-3, 6e-3 TvN0, DTvN0 = 0.5, 1. return P0+P1*(muV-muV0)/DmuV0+\ P2*(sV-sV0)/DsV0+P3*(TvN-TvN0)/DTvN0+\ 0*P4*np.log(muGn)+P5*((muV-muV0)/DmuV0)**2+\ P6*((sV-sV0)/DsV0)**2+P7*((TvN-TvN0)/DTvN0)**2+\ P8*(muV-muV0)/DmuV0*(sV-sV0)/DsV0+\ P9*(muV-muV0)/DmuV0*(TvN-TvN0)/DTvN0+\ P10*(sV-sV0)/DsV0*(TvN-TvN0)/DTvN0 # final transfer function template : # @numba.jit() def TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input if(hasattr(fe, "__len__")): fe[fe<1e-8]=1e-8 else: if(fe<1e-8): fe=1e-8 if(hasattr(fi, "__len__")): fi[fi<1e-8]=1e-8 else: if(fi<1e-8): fi=1e-8 muV, sV, muGn, TvN = get_fluct_regime_varsup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) Vthre = threshold_func(muV, sV, TvN, muGn, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) if(hasattr(muV, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) sV[sV<1e-4]=1e-4 else: if(sV<1e-4): sV=1e-4 Fout_th = erfc_func(muV, sV, TvN, Vthre, Gl, Cm) if(hasattr(Fout_th, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) Fout_th[Fout_th<1e-8]=1e-8 else: if(Fout_th<1e-8): Fout_th=1e-8 ''' if(El<-0.063): if(hasattr(Fout_th, "__len__")): #print("ttt",isinstance(muV, list), hasattr(muV, "__len__")) Fout_th[Fout_th>80.]=175 else: if(Fout_th>80.): print("Done") Fout_th=175 ''' #print 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',fe,fi,muV, sV, TvN,Fout_th return Fout_th def gaussian(x, mu, sig): return (1/(sig*np.sqrt(2*3.1415)))*np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) def TF_my_templateup_heterogeneity(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): # here TOTAL (sum over synapses) excitatory and inhibitory input def Phet(k): locale=gaussian(k,1.,0.2)*TF_my_templateup(fe, fi,XX, Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El*k, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) return locale outhet, err = quad(Phet, 0.1, 5) return outhet # @numba.jit() def make_loop(t, nu, vm, nu_aff_exc, nu_aff_inh, BIN,\ Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10): dt = t[1]-t[0] # constructing the Euler method for the activity rate for i_t in range(len(t)-1): # loop over time fe = (nu_aff_exc[i_t]+nu[i_t]+Fdrive) # afferent+recurrent excitation fi = nu[i_t]+nu_aff_inh[i_t] # recurrent inhibition W[i_t+1] = W[i_t] + dt/Tw*(b*nu[i_t]*Tw - W[i_t]) nu[i_t+1] = nu[i_t] +\ dt/BIN*(\ TF_my_template(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)\ -nu[i_t]) vm[i_t], _, _, _ = get_fluct_regime_vars(fe, fi, W[i_t], Qe, Te, Ee, Qi, Ti, Ei, Gl, Cm, El, Ntot, pconnec, gei, P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) return nu, vm, W ################################################################ ##### Now fitting to Transfer Function data ################################################################ def fitting_Vthre_then_Fout(Fout, Fe_eff, fiSim,w, params,\ maxiter=50000, xtol=1e-5, with_square_terms=False): Gl, Cm , El = params['Gl'], params['Cm'] , params['El'] Fout, Fe_eff, fiSim,w = Fout.flatten(), Fe_eff.flatten(), fiSim.flatten(), w.flatten() #print 'Eccolo', Fout[8],w[8],len(Fout) muV, sV, muGn, TvN = get_fluct_regime_varsup(Fe_eff, fiSim,w, *pseq_params(params)) i_non_zeros = np.where((Fout>0.)&(Fout<60.)) Vthre_eff = effective_Vthre(Fout[i_non_zeros], muV[i_non_zeros],\ sV[i_non_zeros], TvN[i_non_zeros], params['Gl'], params['Cm']) if with_square_terms: P = np.zeros(11) else: P = np.zeros(5) P[:5] = Vthre_eff.mean(), 1e-3, 1e-3, 1e-3, 1e-3 def Res(p): if not with_square_terms: pp = np.concatenate([p, np.zeros(6)]) else: pp=p vthre = threshold_func(muV[i_non_zeros], sV[i_non_zeros],\ TvN[i_non_zeros], muGn[i_non_zeros], *pp) return np.mean((Vthre_eff-vthre)**2) #xtol=1e-19 #plsq = minimize(Res, P, method='nelder-mead',options={'xtol': xtol, 'disp': True, 'maxiter':maxiter}) plsq = minimize(Res, P, method='SLSQP',options={'ftol': 1e-15, 'disp': True, 'maxiter':40000}) #print plsq P = plsq.x def Res(p): if not with_square_terms: params['P'] = np.concatenate([p, np.zeros(6)]) else: params['P'] = p return np.mean((Fout-\ TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params)))**2) plsq = minimize(Res, P, method='nelder-mead',\ options={'xtol': xtol, 'disp': True, 'maxiter':maxiter}) params['P'] = P diff=(TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params))-Fout).mean() diff_M=(TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params))-Fout).max() print("rrrrr",diff,diff_M) plt.plot(fiSim,Fout,'rd',fiSim,TF_my_templateup(Fe_eff, fiSim,w, *pseq_params(params)),'bs') #plt.plot(fiSim,Fe-eff,Fout,'rd') plt.show() thrplot=threshold_func(muV, sV,TvN, muGn, *(plsq.x)) #np.save('FScell_Voltage.npy',[muV, sV,TvN,Fout]) plt.plot(muV,Fout,'rd',muV,erfc_func(muV, sV, TvN, thrplot, Gl, Cm),'bd') plt.show() plt.plot(muV,erfc_func(muV, 4e-3, 0.5, thrplot, Gl, Cm),'bd') plt.show() if with_square_terms: #return plsq.x return P else: return np.concatenate([plsq.x, np.zeros(6)]) def make_fit_from_data(DATA, with_square_terms=False): MEANfreq, SDfreq, Fe_eff, fiSim, params,w = np.load(DATA) Fe_eff, Fout = np.array(Fe_eff), np.array(MEANfreq) levels = fiSim # to store for colors fiSim = np.meshgrid(np.zeros(Fe_eff.shape[1]), fiSim)[1] P = fitting_Vthre_then_Fout(Fout, Fe_eff, fiSim,w, params,\ with_square_terms=with_square_terms) print("ffffff",P) #plt.plot(Fe_eff[2,:],MEANfreq[2,:],"bs",fiSim[:,5],MEANfreq[:,5],"o") plt.plot(Fe_eff[2,:],MEANfreq[2,:],"bs",fiSim[:,5],MEANfreq[:,5],"o") #np.save('FScell_freq.npy',[Fe_eff[2,:],fiSim[:,5],MEANfreq[:,5]]) plt.show() print '==================================================' print 1e3*np.array(P), 'mV' # then we save it: filename = DATA.replace('.npy', '_fit.npy') print 'coefficients saved in ', filename np.save(filename, np.array(P)) return P import argparse if __name__=='__main__': # First a nice documentation parser=argparse.ArgumentParser(description= """ '==================================================' '=====> FIT of the transfer function ==============' '=== and theoretical objects for the TF relation ==' '==================================================' """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-f', "--FILE",help="file name of numerical TF data",\ default='data/example_data.npy') parser.add_argument("--With_Square",help="Add the square terms in the TF formula"+\ "\n then we have 7 parameters",\ action="store_true") args = parser.parse_args() make_fit_from_data(args.FILE, with_square_terms=args.With_Square)
threshold_func
identifier_name
Home.js
import React from 'react' import axios from 'axios' import ReactDOM from 'react-dom' import moment from 'moment' // import ColorPicker from 'rc-color-picker' import JSONTree from 'react-json-tree' const treeTheme = { scheme: 'monokai', author: 'wimer hazenberg (http://www.monokai.nl)', base00: 'none', //'#272822', base01: '#383830', base02: '#49483e', base03: '#75715e', base04: '#a59f85', base05: '#f8f8f2', base06: '#f5f4f1', base07: '#f9f8f5', base08: '#f92672', base09: '#fd971f', base0A: '#f4bf75', base0B: '#a6e22e', base0C: '#a1efe4', base0D: '#66d9ef', base0E: '#ae81ff', base0F: '#cc6633' } function parseObjects(text) { const objs = [] if(!text) return objs if(typeof text == 'object') { objs.push(text) return objs } let openBrace = -1 for(let i=0; i<text.length; i++) { if(text[i] == '{' && openBrace == -1) { openBrace = i } else if(text[i] == '}' && openBrace != -1) { const subText = text.substring(openBrace, i+1) // console.log(openBrace, i, subText) let o try {o = JSON.parse(subText)} catch(e){} // console.log('o', o) if(o) { objs.push(o) openBrace = -1 } } } // const matches = text.match(/\{[\s\S]*\}/i) //match all whitespace and non white space chars // if(matches) { // const jsonText = matches[0] // try {textJson = JSON.parse(jsonText)} catch(e){} // } return objs } class Home extends React.Component { constructor(props) { super(props) this.checkScroll = this.checkScroll.bind(this) this.onScroll = this.onScroll.bind(this) this.onClick = this.onClick.bind(this) this.onWheel = this.onWheel.bind(this) this.onKeyDown = this.onKeyDown.bind(this) } componentDidUpdate(prevProps, prevState) { this.checkScroll() } checkScroll() { if(this.props.shouldScrollBottom) { const ele = ReactDOM.findDOMNode(this.refs.trailingDiv) if(ele) ele.scrollIntoView({behavior: "smooth"}) } } onScroll(e) { } onClick(e) { this.checkScroll() //these delays trigger after the tree expands, can probably be improved upon by adding an expand listener to the tree object setTimeout(this.checkScroll, 200) setTimeout(this.checkScroll, 500) } onWheel(e) { const ele = e.currentTarget const height = ele.getBoundingClientRect().height const atBottom = ((ele.scrollTop + height) - ele.scrollHeight) > 10 if(this.props.shouldScrollBottom != atBottom) this.props.actions.set({'shouldScrollBottom': atBottom}) } onKeyDown(e) { // console.log('key', e.key, e.keyCode) if(e.key == ' ' || e.keyCode == 32) { this.props.actions.set({'shouldScrollBottom': !this.props.shouldScrollBottom}) } } render () { const {filters, filteredTraces, actions, shouldScrollBottom, showingFilters, shouldReconnect, userCount, whiteSpace, jsonExpandLevel} = this.props return ( <div className='home'> <div className='filters'> <div className='filters-controls'> <button className='btn btn-default' onClick={() => actions.set({showingFilters:!showingFilters})}> <i className={'fa' + (showingFilters?' fa-chevron-up':' fa-chevron-down')}/> </button> <button className='btn btn-default' onClick={() => actions.clearTraces()}> <i className='fa fa-ban'/> </button> {shouldScrollBottom ? <button className='btn btn-info' onClick={() => actions.set({'shouldScrollBottom': false})}><i className='fa fa-hand-o-down'/></button> : <button className='btn btn-danger' onClick={() => actions.set({'shouldScrollBottom': true})}><i className='fa fa-hand-paper-o'/></button> } {whiteSpace=='pre' && <button className='btn btn-default' onClick={() => actions.set({'whiteSpace': 'pre-wrap'})}><i className='fa fa-indent'/></button>} {whiteSpace=='pre-wrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'nowrap'})}><i className='fa fa-align-left'/></button>} {whiteSpace=='nowrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'pre'})}><i className='fa fa-list'/></button>} <button className='btn btn-default' onClick={() => actions.set({'jsonExpandLevel': jsonExpandLevel>=3?-1:jsonExpandLevel+1})}><i className='fa fa-level-down'/>{jsonExpandLevel}</button> <input type='text' className='form-control' value={this.props.searchText} onChange={e => actions.setSearchText(e.target.value)}/> {this.props.isSocketConnected ? <button className='btn btn-default' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-link'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button> : <button className='btn btn-danger' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-unlink'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button> } <button className='btn btn-default'><i className='fa fa-user'/>{' ' + (userCount==-1 ? '-' : userCount)}</button> <button className='btn btn-default' onClick={() => actions.createFilter()}> <i className='fa fa-plus'/> </button> </div> {showingFilters && filters.map((filter, i) => { const mod = (filter) => actions.setFilter(i, filter) return ( <div key={i} className='filter'> {/*Filter On/Off*/} {filter.get('isActive') ? <button className='btn btn-default' onClick={() => mod(filter.set('isActive', false))}><i className='fa fa-circle'/></button> : <button className='btn btn-danger' onClick={() => mod(filter.set('isActive', true))}><i className='fa fa-circle-o'/></button> } {/*Query Level*/} {[null,'v','d','i','w','e'].map((level, i, a) => { if(filter.get('queryLevel') != level) return const nextLevel = i==a.length-1 ? a[0] : a[i+1] return <button className={'btn' + (level?' btn-info':' btn-default')} key={level} onClick={() => mod(filter.set('queryLevel', nextLevel))}> {level ? level.toUpperCase() : '--'} </button> })} {/*Query Mode*/} {filter.get('queryMode') == 'contains' && <button className="btn btn-default" title='Plain' onClick={() => mod(filter.set('queryMode', 'regex'))}> P </button> } {filter.get('queryMode') == 'regex' && <button className="btn btn-info" title='Regex' onClick={() => mod(filter.set('queryMode', 'bundle'))}> R </button> } {filter.get('queryMode') == 'bundle' && <button className="btn btn-info" title='Bundle' onClick={() => mod(filter.set('queryMode', 'contains'))}> B </button> } {/*Query*/} <input className='form-control' type='text' value={filter.get('query') || ''} onChange={e => mod(filter.set('query', e.target.value))} /> {/*Visibility*/} {filter.get('show') == true && <button className="btn btn-info" onClick={() => mod(filter.set('show', false))}>S</button> } {filter.get('show') == false && <button className="btn btn-info" onClick={() => mod(filter.set('show', null))}>H</button> } {filter.get('show') == null && <button className="btn btn-default" onClick={() => mod(filter.set('show', true))}>--</button> } {/*Styles*/} <input className='form-control style-control' type='text' value={filter.getIn(['style', 'color']) || ''} onChange={e => mod(filter.setIn(['style', 'color'], e.target.value))} /> <input className='form-control style-control' type='text' value={filter.getIn(['style', 'background']) || ''} onChange={e => mod(filter.setIn(['style', 'background'], e.target.value))} /> {/*Order*/} <button className='btn btn-default' onClick={() => actions.swapFilters(i, i-1)}> <i className='fa fa-arrow-up'/> </button> <button className='btn btn-default' onClick={() => actions.swapFilters(i, i+1)}> <i className='fa fa-arrow-down'/> </button> {/*Remove*/} <button className='btn btn-default' onClick={() => actions.removeFilter(i)}> <i className='fa fa-remove'/> </button> </div> ) })} </div> <div className='traces' onScroll={this.onScroll} onClick={this.onClick} onWheel={this.onWheel} onKeyDown={this.onKeyDown}> {filteredTraces.map((trace, i, list) => { const style = trace.get('style') ? trace.get('style').toJS() : {} const timestamp = moment(trace.get('instant')).format('HH:mm:ss')//.format('YYYY-MM-DD HH:mm:ss') const bundle = trace.get('bundle')
const parsedObjects = parseObjects(text) return ( <div className={'trace ' + level} key={i} style={{background:style.background}}> <span className='trace-timestamp'>{timestamp}</span> <span className='trace-bundle'>{bundle}</span> <span className='trace-level'>{level.toUpperCase()}</span> {parsedObjects.map((o,i) => ( <span className='trace-json' key={i}> <JSONTree data={o} theme={treeTheme} invertTheme={false} shouldExpandNode={(keyName, data, level) => level<=jsonExpandLevel} /> </span> ))} <span className={'trace-text'} style={{color:style.color, whiteSpace:whiteSpace}}>{text}</span> </div> ) })} <div id='trailingDiv' ref='trailingDiv'/> </div> </div> ) } } import { connect } from 'react-redux' import { bindActionCreators } from 'redux' import * as actions from '../redux/actions' export default connect( (state) => { //map store to props return { searchText: state.app.get('searchText'), shouldScrollBottom: state.app.get('shouldScrollBottom'), whiteSpace: state.app.get('whiteSpace'), jsonExpandLevel: state.app.get('jsonExpandLevel'), filteredTraces: state.app.get('filteredTraces'), filters: state.app.get('filters'), showingFilters: state.app.get('showingFilters'), isSocketConnected: state.app.get('isSocketConnected'), shouldReconnect: state.app.get('shouldReconnect'), userCount: state.app.get('userCount'), } }, (dispatch) => { //map dispatch to props return { actions: bindActionCreators(actions, dispatch) } } )(Home)
const level = trace.get('level') const text = trace.get('text') // console.log(trace)
random_line_split
Home.js
import React from 'react' import axios from 'axios' import ReactDOM from 'react-dom' import moment from 'moment' // import ColorPicker from 'rc-color-picker' import JSONTree from 'react-json-tree' const treeTheme = { scheme: 'monokai', author: 'wimer hazenberg (http://www.monokai.nl)', base00: 'none', //'#272822', base01: '#383830', base02: '#49483e', base03: '#75715e', base04: '#a59f85', base05: '#f8f8f2', base06: '#f5f4f1', base07: '#f9f8f5', base08: '#f92672', base09: '#fd971f', base0A: '#f4bf75', base0B: '#a6e22e', base0C: '#a1efe4', base0D: '#66d9ef', base0E: '#ae81ff', base0F: '#cc6633' } function parseObjects(text) { const objs = [] if(!text) return objs if(typeof text == 'object') { objs.push(text) return objs } let openBrace = -1 for(let i=0; i<text.length; i++) { if(text[i] == '{' && openBrace == -1) { openBrace = i } else if(text[i] == '}' && openBrace != -1) { const subText = text.substring(openBrace, i+1) // console.log(openBrace, i, subText) let o try {o = JSON.parse(subText)} catch(e){} // console.log('o', o) if(o) { objs.push(o) openBrace = -1 } } } // const matches = text.match(/\{[\s\S]*\}/i) //match all whitespace and non white space chars // if(matches) { // const jsonText = matches[0] // try {textJson = JSON.parse(jsonText)} catch(e){} // } return objs } class Home extends React.Component { constructor(props) { super(props) this.checkScroll = this.checkScroll.bind(this) this.onScroll = this.onScroll.bind(this) this.onClick = this.onClick.bind(this) this.onWheel = this.onWheel.bind(this) this.onKeyDown = this.onKeyDown.bind(this) } componentDidUpdate(prevProps, prevState) { this.checkScroll() } checkScroll()
onScroll(e) { } onClick(e) { this.checkScroll() //these delays trigger after the tree expands, can probably be improved upon by adding an expand listener to the tree object setTimeout(this.checkScroll, 200) setTimeout(this.checkScroll, 500) } onWheel(e) { const ele = e.currentTarget const height = ele.getBoundingClientRect().height const atBottom = ((ele.scrollTop + height) - ele.scrollHeight) > 10 if(this.props.shouldScrollBottom != atBottom) this.props.actions.set({'shouldScrollBottom': atBottom}) } onKeyDown(e) { // console.log('key', e.key, e.keyCode) if(e.key == ' ' || e.keyCode == 32) { this.props.actions.set({'shouldScrollBottom': !this.props.shouldScrollBottom}) } } render () { const {filters, filteredTraces, actions, shouldScrollBottom, showingFilters, shouldReconnect, userCount, whiteSpace, jsonExpandLevel} = this.props return ( <div className='home'> <div className='filters'> <div className='filters-controls'> <button className='btn btn-default' onClick={() => actions.set({showingFilters:!showingFilters})}> <i className={'fa' + (showingFilters?' fa-chevron-up':' fa-chevron-down')}/> </button> <button className='btn btn-default' onClick={() => actions.clearTraces()}> <i className='fa fa-ban'/> </button> {shouldScrollBottom ? <button className='btn btn-info' onClick={() => actions.set({'shouldScrollBottom': false})}><i className='fa fa-hand-o-down'/></button> : <button className='btn btn-danger' onClick={() => actions.set({'shouldScrollBottom': true})}><i className='fa fa-hand-paper-o'/></button> } {whiteSpace=='pre' && <button className='btn btn-default' onClick={() => actions.set({'whiteSpace': 'pre-wrap'})}><i className='fa fa-indent'/></button>} {whiteSpace=='pre-wrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'nowrap'})}><i className='fa fa-align-left'/></button>} {whiteSpace=='nowrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'pre'})}><i className='fa fa-list'/></button>} <button className='btn btn-default' onClick={() => actions.set({'jsonExpandLevel': jsonExpandLevel>=3?-1:jsonExpandLevel+1})}><i className='fa fa-level-down'/>{jsonExpandLevel}</button> <input type='text' className='form-control' value={this.props.searchText} onChange={e => actions.setSearchText(e.target.value)}/> {this.props.isSocketConnected ? <button className='btn btn-default' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-link'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button> : <button className='btn btn-danger' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-unlink'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button> } <button className='btn btn-default'><i className='fa fa-user'/>{' ' + (userCount==-1 ? '-' : userCount)}</button> <button className='btn btn-default' onClick={() => actions.createFilter()}> <i className='fa fa-plus'/> </button> </div> {showingFilters && filters.map((filter, i) => { const mod = (filter) => actions.setFilter(i, filter) return ( <div key={i} className='filter'> {/*Filter On/Off*/} {filter.get('isActive') ? <button className='btn btn-default' onClick={() => mod(filter.set('isActive', false))}><i className='fa fa-circle'/></button> : <button className='btn btn-danger' onClick={() => mod(filter.set('isActive', true))}><i className='fa fa-circle-o'/></button> } {/*Query Level*/} {[null,'v','d','i','w','e'].map((level, i, a) => { if(filter.get('queryLevel') != level) return const nextLevel = i==a.length-1 ? a[0] : a[i+1] return <button className={'btn' + (level?' btn-info':' btn-default')} key={level} onClick={() => mod(filter.set('queryLevel', nextLevel))}> {level ? level.toUpperCase() : '--'} </button> })} {/*Query Mode*/} {filter.get('queryMode') == 'contains' && <button className="btn btn-default" title='Plain' onClick={() => mod(filter.set('queryMode', 'regex'))}> P </button> } {filter.get('queryMode') == 'regex' && <button className="btn btn-info" title='Regex' onClick={() => mod(filter.set('queryMode', 'bundle'))}> R </button> } {filter.get('queryMode') == 'bundle' && <button className="btn btn-info" title='Bundle' onClick={() => mod(filter.set('queryMode', 'contains'))}> B </button> } {/*Query*/} <input className='form-control' type='text' value={filter.get('query') || ''} onChange={e => mod(filter.set('query', e.target.value))} /> {/*Visibility*/} {filter.get('show') == true && <button className="btn btn-info" onClick={() => mod(filter.set('show', false))}>S</button> } {filter.get('show') == false && <button className="btn btn-info" onClick={() => mod(filter.set('show', null))}>H</button> } {filter.get('show') == null && <button className="btn btn-default" onClick={() => mod(filter.set('show', true))}>--</button> } {/*Styles*/} <input className='form-control style-control' type='text' value={filter.getIn(['style', 'color']) || ''} onChange={e => mod(filter.setIn(['style', 'color'], e.target.value))} /> <input className='form-control style-control' type='text' value={filter.getIn(['style', 'background']) || ''} onChange={e => mod(filter.setIn(['style', 'background'], e.target.value))} /> {/*Order*/} <button className='btn btn-default' onClick={() => actions.swapFilters(i, i-1)}> <i className='fa fa-arrow-up'/> </button> <button className='btn btn-default' onClick={() => actions.swapFilters(i, i+1)}> <i className='fa fa-arrow-down'/> </button> {/*Remove*/} <button className='btn btn-default' onClick={() => actions.removeFilter(i)}> <i className='fa fa-remove'/> </button> </div> ) })} </div> <div className='traces' onScroll={this.onScroll} onClick={this.onClick} onWheel={this.onWheel} onKeyDown={this.onKeyDown}> {filteredTraces.map((trace, i, list) => { const style = trace.get('style') ? trace.get('style').toJS() : {} const timestamp = moment(trace.get('instant')).format('HH:mm:ss')//.format('YYYY-MM-DD HH:mm:ss') const bundle = trace.get('bundle') const level = trace.get('level') const text = trace.get('text') // console.log(trace) const parsedObjects = parseObjects(text) return ( <div className={'trace ' + level} key={i} style={{background:style.background}}> <span className='trace-timestamp'>{timestamp}</span> <span className='trace-bundle'>{bundle}</span> <span className='trace-level'>{level.toUpperCase()}</span> {parsedObjects.map((o,i) => ( <span className='trace-json' key={i}> <JSONTree data={o} theme={treeTheme} invertTheme={false} shouldExpandNode={(keyName, data, level) => level<=jsonExpandLevel} /> </span> ))} <span className={'trace-text'} style={{color:style.color, whiteSpace:whiteSpace}}>{text}</span> </div> ) })} <div id='trailingDiv' ref='trailingDiv'/> </div> </div> ) } } import { connect } from 'react-redux' import { bindActionCreators } from 'redux' import * as actions from '../redux/actions' export default connect( (state) => { //map store to props return { searchText: state.app.get('searchText'), shouldScrollBottom: state.app.get('shouldScrollBottom'), whiteSpace: state.app.get('whiteSpace'), jsonExpandLevel: state.app.get('jsonExpandLevel'), filteredTraces: state.app.get('filteredTraces'), filters: state.app.get('filters'), showingFilters: state.app.get('showingFilters'), isSocketConnected: state.app.get('isSocketConnected'), shouldReconnect: state.app.get('shouldReconnect'), userCount: state.app.get('userCount'), } }, (dispatch) => { //map dispatch to props return { actions: bindActionCreators(actions, dispatch) } } )(Home)
{ if(this.props.shouldScrollBottom) { const ele = ReactDOM.findDOMNode(this.refs.trailingDiv) if(ele) ele.scrollIntoView({behavior: "smooth"}) } }
identifier_body
Home.js
import React from 'react' import axios from 'axios' import ReactDOM from 'react-dom' import moment from 'moment' // import ColorPicker from 'rc-color-picker' import JSONTree from 'react-json-tree' const treeTheme = { scheme: 'monokai', author: 'wimer hazenberg (http://www.monokai.nl)', base00: 'none', //'#272822', base01: '#383830', base02: '#49483e', base03: '#75715e', base04: '#a59f85', base05: '#f8f8f2', base06: '#f5f4f1', base07: '#f9f8f5', base08: '#f92672', base09: '#fd971f', base0A: '#f4bf75', base0B: '#a6e22e', base0C: '#a1efe4', base0D: '#66d9ef', base0E: '#ae81ff', base0F: '#cc6633' } function parseObjects(text) { const objs = [] if(!text) return objs if(typeof text == 'object') { objs.push(text) return objs } let openBrace = -1 for(let i=0; i<text.length; i++) { if(text[i] == '{' && openBrace == -1) { openBrace = i } else if(text[i] == '}' && openBrace != -1) { const subText = text.substring(openBrace, i+1) // console.log(openBrace, i, subText) let o try {o = JSON.parse(subText)} catch(e){} // console.log('o', o) if(o) { objs.push(o) openBrace = -1 } } } // const matches = text.match(/\{[\s\S]*\}/i) //match all whitespace and non white space chars // if(matches) { // const jsonText = matches[0] // try {textJson = JSON.parse(jsonText)} catch(e){} // } return objs } class Home extends React.Component { constructor(props) { super(props) this.checkScroll = this.checkScroll.bind(this) this.onScroll = this.onScroll.bind(this) this.onClick = this.onClick.bind(this) this.onWheel = this.onWheel.bind(this) this.onKeyDown = this.onKeyDown.bind(this) }
(prevProps, prevState) { this.checkScroll() } checkScroll() { if(this.props.shouldScrollBottom) { const ele = ReactDOM.findDOMNode(this.refs.trailingDiv) if(ele) ele.scrollIntoView({behavior: "smooth"}) } } onScroll(e) { } onClick(e) { this.checkScroll() //these delays trigger after the tree expands, can probably be improved upon by adding an expand listener to the tree object setTimeout(this.checkScroll, 200) setTimeout(this.checkScroll, 500) } onWheel(e) { const ele = e.currentTarget const height = ele.getBoundingClientRect().height const atBottom = ((ele.scrollTop + height) - ele.scrollHeight) > 10 if(this.props.shouldScrollBottom != atBottom) this.props.actions.set({'shouldScrollBottom': atBottom}) } onKeyDown(e) { // console.log('key', e.key, e.keyCode) if(e.key == ' ' || e.keyCode == 32) { this.props.actions.set({'shouldScrollBottom': !this.props.shouldScrollBottom}) } } render () { const {filters, filteredTraces, actions, shouldScrollBottom, showingFilters, shouldReconnect, userCount, whiteSpace, jsonExpandLevel} = this.props return ( <div className='home'> <div className='filters'> <div className='filters-controls'> <button className='btn btn-default' onClick={() => actions.set({showingFilters:!showingFilters})}> <i className={'fa' + (showingFilters?' fa-chevron-up':' fa-chevron-down')}/> </button> <button className='btn btn-default' onClick={() => actions.clearTraces()}> <i className='fa fa-ban'/> </button> {shouldScrollBottom ? <button className='btn btn-info' onClick={() => actions.set({'shouldScrollBottom': false})}><i className='fa fa-hand-o-down'/></button> : <button className='btn btn-danger' onClick={() => actions.set({'shouldScrollBottom': true})}><i className='fa fa-hand-paper-o'/></button> } {whiteSpace=='pre' && <button className='btn btn-default' onClick={() => actions.set({'whiteSpace': 'pre-wrap'})}><i className='fa fa-indent'/></button>} {whiteSpace=='pre-wrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'nowrap'})}><i className='fa fa-align-left'/></button>} {whiteSpace=='nowrap' && <button className='btn btn-info' onClick={() => actions.set({'whiteSpace': 'pre'})}><i className='fa fa-list'/></button>} <button className='btn btn-default' onClick={() => actions.set({'jsonExpandLevel': jsonExpandLevel>=3?-1:jsonExpandLevel+1})}><i className='fa fa-level-down'/>{jsonExpandLevel}</button> <input type='text' className='form-control' value={this.props.searchText} onChange={e => actions.setSearchText(e.target.value)}/> {this.props.isSocketConnected ? <button className='btn btn-default' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-link'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button> : <button className='btn btn-danger' onClick={() => actions.toggleShouldReconnect()}><i className='fa fa-unlink'/>{shouldReconnect&&<i className='fa fa-bolt'/>}</button> } <button className='btn btn-default'><i className='fa fa-user'/>{' ' + (userCount==-1 ? '-' : userCount)}</button> <button className='btn btn-default' onClick={() => actions.createFilter()}> <i className='fa fa-plus'/> </button> </div> {showingFilters && filters.map((filter, i) => { const mod = (filter) => actions.setFilter(i, filter) return ( <div key={i} className='filter'> {/*Filter On/Off*/} {filter.get('isActive') ? <button className='btn btn-default' onClick={() => mod(filter.set('isActive', false))}><i className='fa fa-circle'/></button> : <button className='btn btn-danger' onClick={() => mod(filter.set('isActive', true))}><i className='fa fa-circle-o'/></button> } {/*Query Level*/} {[null,'v','d','i','w','e'].map((level, i, a) => { if(filter.get('queryLevel') != level) return const nextLevel = i==a.length-1 ? a[0] : a[i+1] return <button className={'btn' + (level?' btn-info':' btn-default')} key={level} onClick={() => mod(filter.set('queryLevel', nextLevel))}> {level ? level.toUpperCase() : '--'} </button> })} {/*Query Mode*/} {filter.get('queryMode') == 'contains' && <button className="btn btn-default" title='Plain' onClick={() => mod(filter.set('queryMode', 'regex'))}> P </button> } {filter.get('queryMode') == 'regex' && <button className="btn btn-info" title='Regex' onClick={() => mod(filter.set('queryMode', 'bundle'))}> R </button> } {filter.get('queryMode') == 'bundle' && <button className="btn btn-info" title='Bundle' onClick={() => mod(filter.set('queryMode', 'contains'))}> B </button> } {/*Query*/} <input className='form-control' type='text' value={filter.get('query') || ''} onChange={e => mod(filter.set('query', e.target.value))} /> {/*Visibility*/} {filter.get('show') == true && <button className="btn btn-info" onClick={() => mod(filter.set('show', false))}>S</button> } {filter.get('show') == false && <button className="btn btn-info" onClick={() => mod(filter.set('show', null))}>H</button> } {filter.get('show') == null && <button className="btn btn-default" onClick={() => mod(filter.set('show', true))}>--</button> } {/*Styles*/} <input className='form-control style-control' type='text' value={filter.getIn(['style', 'color']) || ''} onChange={e => mod(filter.setIn(['style', 'color'], e.target.value))} /> <input className='form-control style-control' type='text' value={filter.getIn(['style', 'background']) || ''} onChange={e => mod(filter.setIn(['style', 'background'], e.target.value))} /> {/*Order*/} <button className='btn btn-default' onClick={() => actions.swapFilters(i, i-1)}> <i className='fa fa-arrow-up'/> </button> <button className='btn btn-default' onClick={() => actions.swapFilters(i, i+1)}> <i className='fa fa-arrow-down'/> </button> {/*Remove*/} <button className='btn btn-default' onClick={() => actions.removeFilter(i)}> <i className='fa fa-remove'/> </button> </div> ) })} </div> <div className='traces' onScroll={this.onScroll} onClick={this.onClick} onWheel={this.onWheel} onKeyDown={this.onKeyDown}> {filteredTraces.map((trace, i, list) => { const style = trace.get('style') ? trace.get('style').toJS() : {} const timestamp = moment(trace.get('instant')).format('HH:mm:ss')//.format('YYYY-MM-DD HH:mm:ss') const bundle = trace.get('bundle') const level = trace.get('level') const text = trace.get('text') // console.log(trace) const parsedObjects = parseObjects(text) return ( <div className={'trace ' + level} key={i} style={{background:style.background}}> <span className='trace-timestamp'>{timestamp}</span> <span className='trace-bundle'>{bundle}</span> <span className='trace-level'>{level.toUpperCase()}</span> {parsedObjects.map((o,i) => ( <span className='trace-json' key={i}> <JSONTree data={o} theme={treeTheme} invertTheme={false} shouldExpandNode={(keyName, data, level) => level<=jsonExpandLevel} /> </span> ))} <span className={'trace-text'} style={{color:style.color, whiteSpace:whiteSpace}}>{text}</span> </div> ) })} <div id='trailingDiv' ref='trailingDiv'/> </div> </div> ) } } import { connect } from 'react-redux' import { bindActionCreators } from 'redux' import * as actions from '../redux/actions' export default connect( (state) => { //map store to props return { searchText: state.app.get('searchText'), shouldScrollBottom: state.app.get('shouldScrollBottom'), whiteSpace: state.app.get('whiteSpace'), jsonExpandLevel: state.app.get('jsonExpandLevel'), filteredTraces: state.app.get('filteredTraces'), filters: state.app.get('filters'), showingFilters: state.app.get('showingFilters'), isSocketConnected: state.app.get('isSocketConnected'), shouldReconnect: state.app.get('shouldReconnect'), userCount: state.app.get('userCount'), } }, (dispatch) => { //map dispatch to props return { actions: bindActionCreators(actions, dispatch) } } )(Home)
componentDidUpdate
identifier_name
mc6845.rs
/* MartyPC https://github.com/dbalsom/martypc Copyright 2022-2023 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- devices::mc6845.rs Implementation of the Motorola MC6845 CRT controller. Used internally by the MDA and CGA video cards. */ use crate::tracelogger::TraceLogger; const CURSOR_LINE_MASK: u8 = 0b0000_1111; const CURSOR_ATTR_MASK: u8 = 0b0011_0000; const REGISTER_MAX: usize = 17; const REGISTER_UNREADABLE_VALUE: u8 = 0x00; #[derive (Copy, Clone, Debug)] pub enum CrtcRegister { HorizontalTotal, HorizontalDisplayed, HorizontalSyncPosition, SyncWidth, VerticalTotal, VerticalTotalAdjust, VerticalDisplayed, VerticalSync, InterlaceMode, MaximumScanlineAddress, CursorStartLine, CursorEndLine, StartAddressH, StartAddressL, CursorAddressH, CursorAddressL, LightPenPositionH, LightPenPositionL, } use crate::mc6845::CrtcRegister::*; macro_rules! trace { ($self:ident, $($t:tt)*) => {{ $self.trace_logger.print(&format!($($t)*)); $self.trace_logger.print("\n".to_string()); }}; } macro_rules! trace_regs { ($self:ident) => { $self.trace_logger.print( &format!("") /* &format!( "[SL:{:03} HCC:{:03} VCC:{:03} VT:{:03} VS:{:03}] ", $self.scanline, $self.hcc_c0, $self.vcc_c4, $self.crtc_vertical_total, $self.crtc_vertical_sync_pos ) */ ); }; } pub struct Crtc6845 { reg: [u8; 18], // Externally-accessable CRTC register file reg_select: CrtcRegister, // Selected CRTC register start_address: u16, // Calculated value from R12 & R13 cursor_address: u16, // Calculated value from R14 & R15 lightpen_position: u16, // Calculated value from R16 & R17 cursor_status: bool, cursor_start_line: u8, cursor_slow_blink: bool, cursor_blink_rate: f64, display_enable: bool, // True if we are in counting in the display area, false otherwise hcc_c0: u8, // Horizontal character counter (x pos of character) vlc_c9: u8, // Vertical line counter - counts during vsync period vcc_c4: u8, // Vertical character counter (y pos of character) vsc_c3h: u8, hsc_c3l: u8, vtac_c5: u8, vma: u16, // VMA register - Video memory address vma_t: u16, // VMA' register - Video memory address temporary trace_logger: TraceLogger, } impl Crtc6845 { fn new(trace_logger: TraceLogger) -> Self { Self { reg: [0; 18], reg_select: HorizontalTotal, start_address: 0, cursor_address: 0, lightpen_position: 0, cursor_status: false, cursor_start_line: 0, cursor_slow_blink: false, cursor_blink_rate: 0.0, display_enable: false, hcc_c0: 0, vlc_c9: 0, vcc_c4: 0, vsc_c3h: 0, hsc_c3l: 0, vtac_c5: 0, vma: 0, vma_t: 0, trace_logger } } pub fn select_register(&mut self, idx: usize) { if idx > REGISTER_MAX { return } let reg_select = match idx { 0 => HorizontalTotal, 1 => HorizontalDisplayed, 2 => HorizontalSyncPosition, 3 => SyncWidth, 4 => VerticalTotal, 5 => VerticalTotalAdjust, 6 => VerticalDisplayed, 7 => VerticalSync, 8 => InterlaceMode, 9 => MaximumScanlineAddress, 10 => CursorStartLine, 11 => CursorEndLine, 12 => StartAddressH, 13 => StartAddressL, 14 => CursorAddressH, 15 => CursorAddressL, 16 => LightPenPositionH, _ => LightPenPositionL, }; } pub fn write_register(&mut self, byte: u8) { match self.reg_select { CrtcRegister::HorizontalTotal => { // (R0) 8 bit write only self.reg[0] = byte; }, CrtcRegister::HorizontalDisplayed => { // (R1) 8 bit write only self.reg[1] = byte; } CrtcRegister::HorizontalSyncPosition => { // (R2) 8 bit write only self.reg[2] = byte; }, CrtcRegister::SyncWidth => { // (R3) 8 bit write only self.reg[3] = byte; }, CrtcRegister::VerticalTotal => { // (R4) 7 bit write only self.reg[4] = byte & 0x7F; trace_regs!(self); trace!( self, "CRTC Register Write (04h): VerticalTotal updated: {}", self.reg[4] ) }, CrtcRegister::VerticalTotalAdjust => { // (R5) 5 bit write only self.reg[5] = byte & 0x1F; } CrtcRegister::VerticalDisplayed => { // (R6) 7 bit write only self.reg[6] = byte & 0x7F; }, CrtcRegister::VerticalSync => { // (R7) 7 bit write only self.reg[7] = byte & 0x7F; trace_regs!(self); trace!( self, "CRTC Register Write (07h): VerticalSync updated: {}", self.reg[7] ) }, CrtcRegister::InterlaceMode => { // (R8) 2 bit write only self.reg[8] = byte & 0x03; }, CrtcRegister::MaximumScanlineAddress => { // (R9) 5 bit write only self.reg[9] = byte & 0x1F; } CrtcRegister::CursorStartLine => { // (R10) 7 bit bitfield. Write only. self.reg[10] = byte & 0x7F; self.cursor_start_line = byte & CURSOR_LINE_MASK; match byte & CURSOR_ATTR_MASK >> 4 { 0b00 | 0b10 => { self.cursor_status = true; self.cursor_slow_blink = false; } 0b01 => { self.cursor_status = false; self.cursor_slow_blink = false; } _ => { self.cursor_status = true; self.cursor_slow_blink = true; } } } CrtcRegister::CursorEndLine => { // (R11) 5 bit write only self.reg[11] = byte & 0x1F; } CrtcRegister::StartAddressH => { // (R12) 6 bit write only self.reg[12] = byte & 0x3F; trace_regs!(self); trace!( self,
self.update_start_address(); } CrtcRegister::StartAddressL => { // (R13) 8 bit write only self.reg[13] = byte; trace_regs!(self); trace!( self, "CRTC Register Write (0Dh): StartAddressL updated: {:02X}", byte ); self.update_start_address(); } CrtcRegister::CursorAddressH => { // (R14) 6 bit read/write self.reg[14] = byte & 0x3F; self.update_cursor_address(); } CrtcRegister::CursorAddressL => { // (R15) 8 bit read/write self.reg[15] = byte; self.update_cursor_address(); } CrtcRegister::LightPenPositionH => { // (R16) 6 bit read only } CrtcRegister::LightPenPositionL => { // (R17) 8 bit read only } } } pub fn read_register(&self) -> u8 { match self.reg_select { CursorAddressH | CursorAddressL | LightPenPositionH | LightPenPositionL => { self.reg[self.reg_select as usize] } _ => REGISTER_UNREADABLE_VALUE } } pub fn read_address(&self) -> u16 { self.vma } fn update_start_address(&mut self) { self.start_address = (self.reg[12] as u16) << 8 | self.reg[13] as u16 } fn update_cursor_address(&mut self) { self.cursor_address = (self.reg[14] as u16) << 8 | self.reg[15] as u16 } }
"CRTC Register Write (0Ch): StartAddressH updated: {:02X}", byte );
random_line_split
mc6845.rs
/* MartyPC https://github.com/dbalsom/martypc Copyright 2022-2023 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- devices::mc6845.rs Implementation of the Motorola MC6845 CRT controller. Used internally by the MDA and CGA video cards. */ use crate::tracelogger::TraceLogger; const CURSOR_LINE_MASK: u8 = 0b0000_1111; const CURSOR_ATTR_MASK: u8 = 0b0011_0000; const REGISTER_MAX: usize = 17; const REGISTER_UNREADABLE_VALUE: u8 = 0x00; #[derive (Copy, Clone, Debug)] pub enum CrtcRegister { HorizontalTotal, HorizontalDisplayed, HorizontalSyncPosition, SyncWidth, VerticalTotal, VerticalTotalAdjust, VerticalDisplayed, VerticalSync, InterlaceMode, MaximumScanlineAddress, CursorStartLine, CursorEndLine, StartAddressH, StartAddressL, CursorAddressH, CursorAddressL, LightPenPositionH, LightPenPositionL, } use crate::mc6845::CrtcRegister::*; macro_rules! trace { ($self:ident, $($t:tt)*) => {{ $self.trace_logger.print(&format!($($t)*)); $self.trace_logger.print("\n".to_string()); }}; } macro_rules! trace_regs { ($self:ident) => { $self.trace_logger.print( &format!("") /* &format!( "[SL:{:03} HCC:{:03} VCC:{:03} VT:{:03} VS:{:03}] ", $self.scanline, $self.hcc_c0, $self.vcc_c4, $self.crtc_vertical_total, $self.crtc_vertical_sync_pos ) */ ); }; } pub struct Crtc6845
reg: [u8; 18], // Externally-accessable CRTC register file reg_select: CrtcRegister, // Selected CRTC register start_address: u16, // Calculated value from R12 & R13 cursor_address: u16, // Calculated value from R14 & R15 lightpen_position: u16, // Calculated value from R16 & R17 cursor_status: bool, cursor_start_line: u8, cursor_slow_blink: bool, cursor_blink_rate: f64, display_enable: bool, // True if we are in counting in the display area, false otherwise hcc_c0: u8, // Horizontal character counter (x pos of character) vlc_c9: u8, // Vertical line counter - counts during vsync period vcc_c4: u8, // Vertical character counter (y pos of character) vsc_c3h: u8, hsc_c3l: u8, vtac_c5: u8, vma: u16, // VMA register - Video memory address vma_t: u16, // VMA' register - Video memory address temporary trace_logger: TraceLogger, } impl Crtc6845 { fn new(trace_logger: TraceLogger) -> Self { Self { reg: [0; 18], reg_select: HorizontalTotal, start_address: 0, cursor_address: 0, lightpen_position: 0, cursor_status: false, cursor_start_line: 0, cursor_slow_blink: false, cursor_blink_rate: 0.0, display_enable: false, hcc_c0: 0, vlc_c9: 0, vcc_c4: 0, vsc_c3h: 0, hsc_c3l: 0, vtac_c5: 0, vma: 0, vma_t: 0, trace_logger } } pub fn select_register(&mut self, idx: usize) { if idx > REGISTER_MAX { return } let reg_select = match idx { 0 => HorizontalTotal, 1 => HorizontalDisplayed, 2 => HorizontalSyncPosition, 3 => SyncWidth, 4 => VerticalTotal, 5 => VerticalTotalAdjust, 6 => VerticalDisplayed, 7 => VerticalSync, 8 => InterlaceMode, 9 => MaximumScanlineAddress, 10 => CursorStartLine, 11 => CursorEndLine, 12 => StartAddressH, 13 => StartAddressL, 14 => CursorAddressH, 15 => CursorAddressL, 16 => LightPenPositionH, _ => LightPenPositionL, }; } pub fn write_register(&mut self, byte: u8) { match self.reg_select { CrtcRegister::HorizontalTotal => { // (R0) 8 bit write only self.reg[0] = byte; }, CrtcRegister::HorizontalDisplayed => { // (R1) 8 bit write only self.reg[1] = byte; } CrtcRegister::HorizontalSyncPosition => { // (R2) 8 bit write only self.reg[2] = byte; }, CrtcRegister::SyncWidth => { // (R3) 8 bit write only self.reg[3] = byte; }, CrtcRegister::VerticalTotal => { // (R4) 7 bit write only self.reg[4] = byte & 0x7F; trace_regs!(self); trace!( self, "CRTC Register Write (04h): VerticalTotal updated: {}", self.reg[4] ) }, CrtcRegister::VerticalTotalAdjust => { // (R5) 5 bit write only self.reg[5] = byte & 0x1F; } CrtcRegister::VerticalDisplayed => { // (R6) 7 bit write only self.reg[6] = byte & 0x7F; }, CrtcRegister::VerticalSync => { // (R7) 7 bit write only self.reg[7] = byte & 0x7F; trace_regs!(self); trace!( self, "CRTC Register Write (07h): VerticalSync updated: {}", self.reg[7] ) }, CrtcRegister::InterlaceMode => { // (R8) 2 bit write only self.reg[8] = byte & 0x03; }, CrtcRegister::MaximumScanlineAddress => { // (R9) 5 bit write only self.reg[9] = byte & 0x1F; } CrtcRegister::CursorStartLine => { // (R10) 7 bit bitfield. Write only. self.reg[10] = byte & 0x7F; self.cursor_start_line = byte & CURSOR_LINE_MASK; match byte & CURSOR_ATTR_MASK >> 4 { 0b00 | 0b10 => { self.cursor_status = true; self.cursor_slow_blink = false; } 0b01 => { self.cursor_status = false; self.cursor_slow_blink = false; } _ => { self.cursor_status = true; self.cursor_slow_blink = true; } } } CrtcRegister::CursorEndLine => { // (R11) 5 bit write only self.reg[11] = byte & 0x1F; } CrtcRegister::StartAddressH => { // (R12) 6 bit write only self.reg[12] = byte & 0x3F; trace_regs!(self); trace!( self, "CRTC Register Write (0Ch): StartAddressH updated: {:02X}", byte ); self.update_start_address(); } CrtcRegister::StartAddressL => { // (R13) 8 bit write only self.reg[13] = byte; trace_regs!(self); trace!( self, "CRTC Register Write (0Dh): StartAddressL updated: {:02X}", byte ); self.update_start_address(); } CrtcRegister::CursorAddressH => { // (R14) 6 bit read/write self.reg[14] = byte & 0x3F; self.update_cursor_address(); } CrtcRegister::CursorAddressL => { // (R15) 8 bit read/write self.reg[15] = byte; self.update_cursor_address(); } CrtcRegister::LightPenPositionH => { // (R16) 6 bit read only } CrtcRegister::LightPenPositionL => { // (R17) 8 bit read only } } } pub fn read_register(&self) -> u8 { match self.reg_select { CursorAddressH | CursorAddressL | LightPenPositionH | LightPenPositionL => { self.reg[self.reg_select as usize] } _ => REGISTER_UNREADABLE_VALUE } } pub fn read_address(&self) -> u16 { self.vma } fn update_start_address(&mut self) { self.start_address = (self.reg[12] as u16) << 8 | self.reg[13] as u16 } fn update_cursor_address(&mut self) { self.cursor_address = (self.reg[14] as u16) << 8 | self.reg[15] as u16 } }
{
identifier_name
mc6845.rs
/* MartyPC https://github.com/dbalsom/martypc Copyright 2022-2023 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- devices::mc6845.rs Implementation of the Motorola MC6845 CRT controller. Used internally by the MDA and CGA video cards. */ use crate::tracelogger::TraceLogger; const CURSOR_LINE_MASK: u8 = 0b0000_1111; const CURSOR_ATTR_MASK: u8 = 0b0011_0000; const REGISTER_MAX: usize = 17; const REGISTER_UNREADABLE_VALUE: u8 = 0x00; #[derive (Copy, Clone, Debug)] pub enum CrtcRegister { HorizontalTotal, HorizontalDisplayed, HorizontalSyncPosition, SyncWidth, VerticalTotal, VerticalTotalAdjust, VerticalDisplayed, VerticalSync, InterlaceMode, MaximumScanlineAddress, CursorStartLine, CursorEndLine, StartAddressH, StartAddressL, CursorAddressH, CursorAddressL, LightPenPositionH, LightPenPositionL, } use crate::mc6845::CrtcRegister::*; macro_rules! trace { ($self:ident, $($t:tt)*) => {{ $self.trace_logger.print(&format!($($t)*)); $self.trace_logger.print("\n".to_string()); }}; } macro_rules! trace_regs { ($self:ident) => { $self.trace_logger.print( &format!("") /* &format!( "[SL:{:03} HCC:{:03} VCC:{:03} VT:{:03} VS:{:03}] ", $self.scanline, $self.hcc_c0, $self.vcc_c4, $self.crtc_vertical_total, $self.crtc_vertical_sync_pos ) */ ); }; } pub struct Crtc6845 { reg: [u8; 18], // Externally-accessable CRTC register file reg_select: CrtcRegister, // Selected CRTC register start_address: u16, // Calculated value from R12 & R13 cursor_address: u16, // Calculated value from R14 & R15 lightpen_position: u16, // Calculated value from R16 & R17 cursor_status: bool, cursor_start_line: u8, cursor_slow_blink: bool, cursor_blink_rate: f64, display_enable: bool, // True if we are in counting in the display area, false otherwise hcc_c0: u8, // Horizontal character counter (x pos of character) vlc_c9: u8, // Vertical line counter - counts during vsync period vcc_c4: u8, // Vertical character counter (y pos of character) vsc_c3h: u8, hsc_c3l: u8, vtac_c5: u8, vma: u16, // VMA register - Video memory address vma_t: u16, // VMA' register - Video memory address temporary trace_logger: TraceLogger, } impl Crtc6845 { fn new(trace_logger: TraceLogger) -> Self { Self { reg: [0; 18], reg_select: HorizontalTotal, start_address: 0, cursor_address: 0, lightpen_position: 0, cursor_status: false, cursor_start_line: 0, cursor_slow_blink: false, cursor_blink_rate: 0.0, display_enable: false, hcc_c0: 0, vlc_c9: 0, vcc_c4: 0, vsc_c3h: 0, hsc_c3l: 0, vtac_c5: 0, vma: 0, vma_t: 0, trace_logger } } pub fn select_register(&mut self, idx: usize) { if idx > REGISTER_MAX { return } let reg_select = match idx { 0 => HorizontalTotal, 1 => HorizontalDisplayed, 2 => HorizontalSyncPosition, 3 => SyncWidth, 4 => VerticalTotal, 5 => VerticalTotalAdjust, 6 => VerticalDisplayed, 7 => VerticalSync, 8 => InterlaceMode, 9 => MaximumScanlineAddress, 10 => CursorStartLine, 11 => CursorEndLine, 12 => StartAddressH, 13 => StartAddressL, 14 => CursorAddressH, 15 => CursorAddressL, 16 => LightPenPositionH, _ => LightPenPositionL, }; } pub fn write_register(&mut self, byte: u8) {
b fn read_register(&self) -> u8 { match self.reg_select { CursorAddressH | CursorAddressL | LightPenPositionH | LightPenPositionL => { self.reg[self.reg_select as usize] } _ => REGISTER_UNREADABLE_VALUE } } pub fn read_address(&self) -> u16 { self.vma } fn update_start_address(&mut self) { self.start_address = (self.reg[12] as u16) << 8 | self.reg[13] as u16 } fn update_cursor_address(&mut self) { self.cursor_address = (self.reg[14] as u16) << 8 | self.reg[15] as u16 } }
match self.reg_select { CrtcRegister::HorizontalTotal => { // (R0) 8 bit write only self.reg[0] = byte; }, CrtcRegister::HorizontalDisplayed => { // (R1) 8 bit write only self.reg[1] = byte; } CrtcRegister::HorizontalSyncPosition => { // (R2) 8 bit write only self.reg[2] = byte; }, CrtcRegister::SyncWidth => { // (R3) 8 bit write only self.reg[3] = byte; }, CrtcRegister::VerticalTotal => { // (R4) 7 bit write only self.reg[4] = byte & 0x7F; trace_regs!(self); trace!( self, "CRTC Register Write (04h): VerticalTotal updated: {}", self.reg[4] ) }, CrtcRegister::VerticalTotalAdjust => { // (R5) 5 bit write only self.reg[5] = byte & 0x1F; } CrtcRegister::VerticalDisplayed => { // (R6) 7 bit write only self.reg[6] = byte & 0x7F; }, CrtcRegister::VerticalSync => { // (R7) 7 bit write only self.reg[7] = byte & 0x7F; trace_regs!(self); trace!( self, "CRTC Register Write (07h): VerticalSync updated: {}", self.reg[7] ) }, CrtcRegister::InterlaceMode => { // (R8) 2 bit write only self.reg[8] = byte & 0x03; }, CrtcRegister::MaximumScanlineAddress => { // (R9) 5 bit write only self.reg[9] = byte & 0x1F; } CrtcRegister::CursorStartLine => { // (R10) 7 bit bitfield. Write only. self.reg[10] = byte & 0x7F; self.cursor_start_line = byte & CURSOR_LINE_MASK; match byte & CURSOR_ATTR_MASK >> 4 { 0b00 | 0b10 => { self.cursor_status = true; self.cursor_slow_blink = false; } 0b01 => { self.cursor_status = false; self.cursor_slow_blink = false; } _ => { self.cursor_status = true; self.cursor_slow_blink = true; } } } CrtcRegister::CursorEndLine => { // (R11) 5 bit write only self.reg[11] = byte & 0x1F; } CrtcRegister::StartAddressH => { // (R12) 6 bit write only self.reg[12] = byte & 0x3F; trace_regs!(self); trace!( self, "CRTC Register Write (0Ch): StartAddressH updated: {:02X}", byte ); self.update_start_address(); } CrtcRegister::StartAddressL => { // (R13) 8 bit write only self.reg[13] = byte; trace_regs!(self); trace!( self, "CRTC Register Write (0Dh): StartAddressL updated: {:02X}", byte ); self.update_start_address(); } CrtcRegister::CursorAddressH => { // (R14) 6 bit read/write self.reg[14] = byte & 0x3F; self.update_cursor_address(); } CrtcRegister::CursorAddressL => { // (R15) 8 bit read/write self.reg[15] = byte; self.update_cursor_address(); } CrtcRegister::LightPenPositionH => { // (R16) 6 bit read only } CrtcRegister::LightPenPositionL => { // (R17) 8 bit read only } } } pu
identifier_body
UnFlowLoss.py
from typing import Dict import torch import torch.nn as nn import torch.nn.functional as F from .loss_functions import SSIM __all__ = ['unFlowLoss', 'flow_warp'] def mesh_grid(batch_sz, height, width): ''' Creates meshgrid of two dimensions which is the pixel location ''' # mesh grid x_base = torch.arange(0, width).repeat(batch_sz, height, 1) # BHW y_base = torch.arange(0, height).repeat(batch_sz, width, 1).transpose(1, 2) # BHW base_grid = torch.stack([x_base, y_base], 1) # B2HW return base_grid def norm_grid(v_grid): ''' Normalizses a meshgrid between (-1,1) ''' _, _, height, width = v_grid.size() # scale grid to [-1,1] v_grid_norm = torch.zeros_like(v_grid) v_grid_norm[:, 0, :, :] = 2.0 * v_grid[:, 0, :, :] / (width - 1) - 1.0 v_grid_norm[:, 1, :, :] = 2.0 * v_grid[:, 1, :, :] / (height - 1) - 1.0 return v_grid_norm.permute(0, 2, 3, 1) # BHW2 def get_corresponding_map(data): """ :param data: unnormalized coordinates Bx2xHxW :return: Bx1xHxW """ B, _, H, W = data.size() x = data[:, 0, :, :].view(B, -1) # BxN (N=H*W) y = data[:, 1, :, :].view(B, -1) x1 = torch.floor(x) x_floor = x1.clamp(0, W - 1) y1 = torch.floor(y) y_floor = y1.clamp(0, H - 1) x0 = x1 + 1 x_ceil = x0.clamp(0, W - 1) y0 = y1 + 1 y_ceil = y0.clamp(0, H - 1) x_ceil_out = x0 != x_ceil y_ceil_out = y0 != y_ceil x_floor_out = x1 != x_floor y_floor_out = y1 != y_floor invalid = torch.cat([x_ceil_out | y_ceil_out, x_ceil_out | y_floor_out, x_floor_out | y_ceil_out, x_floor_out | y_floor_out], dim=1) # encode coordinates, since the scatter function can only index along one axis corresponding_map = torch.zeros(B, H * W).type_as(data) indices = torch.cat([x_ceil + y_ceil * W, x_ceil + y_floor * W, x_floor + y_ceil * W, x_floor + y_floor * W], 1).long() # BxN (N=4*H*W) values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)), (1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)), (1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)), (1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))], 1) # values = torch.ones_like(values) values[invalid] = 0 corresponding_map.scatter_add_(1, indices, values) # decode coordinates corresponding_map = corresponding_map.view(B, H, W) return corresponding_map.unsqueeze(1) def flow_warp(image, flow12, pad='border', mode='bilinear'): ''' Warps an image given a flow prediction using grid_sample ''' batch_sz, _, height, width = image.size() base_grid = mesh_grid(batch_sz, height, width).type_as(image) # B2HW v_grid = norm_grid(base_grid + flow12) # BHW2 im1_recons = nn.functional.grid_sample(image, v_grid, mode=mode, padding_mode=pad, align_corners=False) return im1_recons def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5): ''' Get an occlusion mask using both flows such that they match each other ''' flow21_warped = flow_warp(flow21, flow12, pad='zeros') flow12_diff = flow12 + flow21_warped mag = (flow12 * flow12).sum(1, keepdim=True) + \ (flow21_warped * flow21_warped).sum(1, keepdim=True) occ_thresh = scale * mag + bias occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh return occ.float() def get_occu_mask_backward(flow21, theta=0.2): ''' Get an occlusion mask using backward propagation ''' B, _, H, W = flow21.size() base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW corr_map = get_corresponding_map(base_grid + flow21) # BHW occu_mask = corr_map.clamp(min=0., max=1.) < theta return occu_mask.float() # Credit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py def TernaryLoss(im, im_warp, max_distance=1): patch_size = 2 * max_distance + 1 def _rgb_to_grayscale(image): grayscale = image[:, 0, :, :] * 0.2989 + \ image[:, 1, :, :] * 0.5870 + \ image[:, 2, :, :] * 0.1140 return grayscale.unsqueeze(1) def _ternary_transform(image): intensities = _rgb_to_grayscale(image) * 255 out_channels = patch_size * patch_size w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size)) weights = w.type_as(im) patches = F.conv2d(intensities, weights, padding=max_distance) transf = patches - intensities transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2)) return transf_norm def _hamming_distance(t1, t2): dist = torch.pow(t1 - t2, 2) dist_norm = dist / (0.1 + dist) dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum return dist_mean def _valid_mask(t, padding): n, _, h, w = t.size() inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t) mask = F.pad(inner, [padding] * 4) return mask t1 = _ternary_transform(im) t2 = _ternary_transform(im_warp) dist = _hamming_distance(t1, t2) mask = _valid_mask(im, max_distance) return dist * mask def gradient(data):
def smooth_grad_1st(flow, image, alpha): img_dx, img_dy = gradient(image) weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha) weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha) dx, dy = gradient(flow) loss_x = weights_x * dx.abs() / 2. loss_y = weights_y * dy.abs() / 2 return (loss_x.mean() + loss_y.mean()) / 2. def smooth_grad_2nd(flow, image, alpha): img_dx, img_dy = gradient(image) weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha) weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha) dx, dy = gradient(flow) dx2, _ = gradient(dx) _, dy2 = gradient(dy) loss_x = weights_x[:, :, :, 1:] * dx2.abs() loss_y = weights_y[:, :, 1:, :] * dy2.abs() return (loss_x.mean() + loss_y.mean()) / 2. class unFlowLoss(nn.modules.Module): """ Loss function adopted by ARFlow from originally Unflow. """ def __init__(self, weight=1.0, weights=None, consistency=True, back_occ_only=False, **kwargs): super().__init__() self.weight = weight if "l1" in weights: self.l1_weight = weights["l1"] if "ssim" in weights: self.ssim_weight = weights["ssim"] self.SSIM = SSIM().to("cuda" if torch.cuda.is_available() else "cpu") if "ternary" in weights: self.ternary_weight = weights["ternary"] if 'smooth' in kwargs: self.smooth_args = kwargs['smooth'] else: self.smooth_args = {"degree": 2, "alpha" : 0.2, "weighting": 75.0} self.smooth_w = 75.0 if 'smooth_w' not in kwargs else kwargs['smooth_w'] if 'w_sm_scales' in kwargs: self.w_sm_scales = kwargs['w_sm_scales'] else: self.w_sm_scales = [1.0, 0.0, 0.0, 0.0, 0.0] if 'w_wrp_scales' in kwargs: self.w_wrp_scales = kwargs['w_wrp_scales'] else: self.w_wrp_scales = [1.0, 1.0, 1.0, 1.0, 0.0] self.consistency = consistency self.back_occ_only = back_occ_only def loss_photometric(self, im_orig: torch.Tensor, im_recons: torch.Tensor, occu_mask: torch.Tensor): loss = [] if occu_mask.mean() == 0: occu_mask = torch.ones_like(occu_mask) if hasattr(self, 'l1_weight'): loss += [self.l1_weight * (im_orig - im_recons).abs() * occu_mask] if hasattr(self, 'ssim_weight'): loss += [self.ssim_weight * self.SSIM(im_recons * occu_mask, im_orig * occu_mask)] if hasattr(self, 'ternary_weight'): loss += [self.ternary_weight *\ TernaryLoss(im_recons * occu_mask, im_orig * occu_mask)] return sum([l.mean() for l in loss]) / occu_mask.mean() def loss_smooth(self, flow, im_scaled): if self.smooth_args['degree'] == 2: func_smooth = smooth_grad_2nd elif self.smooth_args['degree'] == 1: func_smooth = smooth_grad_1st else: raise NotImplementedError(self.smooth_args['degree']) loss = [] loss += [func_smooth(flow, im_scaled, self.smooth_args['alpha'])] return sum([l.mean() for l in loss]) def forward(self, predictions: Dict[str, torch.Tensor], targets: Dict[str, torch.Tensor]) -> torch.Tensor: """ :param output: Multi-scale forward/backward flows n * [B x 4 x h x w] :param target: image pairs Nx6xHxW :return: """ assert all(key in predictions for key in ['flow', 'flow_b']) and \ all(key in targets for key in ['l_img', 'l_seq']) pyramid_flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in zip(predictions['flow'], predictions['flow_b'])] pyramid_warp_losses = [] pyramid_smooth_losses = [] s = 1. for i, flow in enumerate(pyramid_flows): if self.w_wrp_scales[i] == 0: pyramid_warp_losses.append(0) pyramid_smooth_losses.append(0) continue # resize images to match the size of layer im1_scaled = F.interpolate(targets['l_img'], tuple(flow.size()[2:]), mode='area') im2_scaled = F.interpolate(targets['l_seq'], tuple(flow.size()[2:]), mode='area') im1_recons = flow_warp(im2_scaled, flow[:, :2], pad='border') im2_recons = flow_warp(im1_scaled, flow[:, 2:], pad='border') # Occlusion mask is broken, always returns zeros... # if i == 0: # if self.back_occ_only: # occu_mask1 = 1 - get_occu_mask_backward(flow[:, 2:], theta=0.2) # occu_mask2 = 1 - get_occu_mask_backward(flow[:, :2], theta=0.2) # else: # occu_mask1 = 1 - get_occu_mask_bidirection(flow[:, :2], flow[:, 2:]) # occu_mask2 = 1 - get_occu_mask_bidirection(flow[:, 2:], flow[:, :2]) # else: # occu_mask1 = F.interpolate(occu_mask1, tuple(flow.size()[2:]), mode='nearest') # occu_mask2 = F.interpolate(occu_mask2, tuple(flow.size()[2:]), mode='nearest') occu_mask1 = occu_mask2 = torch.ones_like(im1_scaled) loss_warp = self.loss_photometric(im1_scaled, im1_recons, occu_mask1) if i == 0: s = min(flow.size()[2:]) loss_smooth = self.loss_smooth(flow[:, :2] / s, im1_scaled) if self.consistency: loss_warp += self.loss_photometric(im2_scaled, im2_recons, occu_mask2) loss_smooth += self.loss_smooth(flow[:, 2:] / s, im2_scaled) loss_warp /= 2. loss_smooth /= 2. pyramid_warp_losses.append(loss_warp) pyramid_smooth_losses.append(loss_smooth) pyramid_warp_losses = [l * w for l, w in zip(pyramid_warp_losses, self.w_wrp_scales)] pyramid_smooth_losses = [l * w for l, w in zip(pyramid_smooth_losses, self.w_sm_scales)] return self.weight * (sum(pyramid_warp_losses) + self.smooth_args['weighting'] * sum(pyramid_smooth_losses))
D_dy = data[:, :, 1:] - data[:, :, :-1] D_dx = data[:, :, :, 1:] - data[:, :, :, :-1] return D_dx, D_dy
identifier_body
UnFlowLoss.py
from typing import Dict import torch import torch.nn as nn import torch.nn.functional as F from .loss_functions import SSIM __all__ = ['unFlowLoss', 'flow_warp'] def mesh_grid(batch_sz, height, width): ''' Creates meshgrid of two dimensions which is the pixel location ''' # mesh grid x_base = torch.arange(0, width).repeat(batch_sz, height, 1) # BHW y_base = torch.arange(0, height).repeat(batch_sz, width, 1).transpose(1, 2) # BHW base_grid = torch.stack([x_base, y_base], 1) # B2HW return base_grid def norm_grid(v_grid): ''' Normalizses a meshgrid between (-1,1) ''' _, _, height, width = v_grid.size() # scale grid to [-1,1] v_grid_norm = torch.zeros_like(v_grid) v_grid_norm[:, 0, :, :] = 2.0 * v_grid[:, 0, :, :] / (width - 1) - 1.0 v_grid_norm[:, 1, :, :] = 2.0 * v_grid[:, 1, :, :] / (height - 1) - 1.0 return v_grid_norm.permute(0, 2, 3, 1) # BHW2 def get_corresponding_map(data): """ :param data: unnormalized coordinates Bx2xHxW :return: Bx1xHxW """ B, _, H, W = data.size() x = data[:, 0, :, :].view(B, -1) # BxN (N=H*W) y = data[:, 1, :, :].view(B, -1) x1 = torch.floor(x) x_floor = x1.clamp(0, W - 1) y1 = torch.floor(y) y_floor = y1.clamp(0, H - 1) x0 = x1 + 1 x_ceil = x0.clamp(0, W - 1) y0 = y1 + 1 y_ceil = y0.clamp(0, H - 1) x_ceil_out = x0 != x_ceil y_ceil_out = y0 != y_ceil x_floor_out = x1 != x_floor y_floor_out = y1 != y_floor invalid = torch.cat([x_ceil_out | y_ceil_out, x_ceil_out | y_floor_out, x_floor_out | y_ceil_out, x_floor_out | y_floor_out], dim=1) # encode coordinates, since the scatter function can only index along one axis corresponding_map = torch.zeros(B, H * W).type_as(data) indices = torch.cat([x_ceil + y_ceil * W, x_ceil + y_floor * W, x_floor + y_ceil * W, x_floor + y_floor * W], 1).long() # BxN (N=4*H*W) values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)), (1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)), (1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)), (1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))], 1) # values = torch.ones_like(values) values[invalid] = 0 corresponding_map.scatter_add_(1, indices, values) # decode coordinates corresponding_map = corresponding_map.view(B, H, W) return corresponding_map.unsqueeze(1) def flow_warp(image, flow12, pad='border', mode='bilinear'): ''' Warps an image given a flow prediction using grid_sample ''' batch_sz, _, height, width = image.size() base_grid = mesh_grid(batch_sz, height, width).type_as(image) # B2HW v_grid = norm_grid(base_grid + flow12) # BHW2 im1_recons = nn.functional.grid_sample(image, v_grid, mode=mode, padding_mode=pad, align_corners=False) return im1_recons def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5): ''' Get an occlusion mask using both flows such that they match each other ''' flow21_warped = flow_warp(flow21, flow12, pad='zeros') flow12_diff = flow12 + flow21_warped mag = (flow12 * flow12).sum(1, keepdim=True) + \ (flow21_warped * flow21_warped).sum(1, keepdim=True) occ_thresh = scale * mag + bias occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh return occ.float() def get_occu_mask_backward(flow21, theta=0.2): ''' Get an occlusion mask using backward propagation ''' B, _, H, W = flow21.size() base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW corr_map = get_corresponding_map(base_grid + flow21) # BHW occu_mask = corr_map.clamp(min=0., max=1.) < theta return occu_mask.float() # Credit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py def TernaryLoss(im, im_warp, max_distance=1): patch_size = 2 * max_distance + 1 def _rgb_to_grayscale(image): grayscale = image[:, 0, :, :] * 0.2989 + \ image[:, 1, :, :] * 0.5870 + \ image[:, 2, :, :] * 0.1140 return grayscale.unsqueeze(1) def _ternary_transform(image): intensities = _rgb_to_grayscale(image) * 255 out_channels = patch_size * patch_size w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size)) weights = w.type_as(im) patches = F.conv2d(intensities, weights, padding=max_distance) transf = patches - intensities transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2)) return transf_norm def _hamming_distance(t1, t2): dist = torch.pow(t1 - t2, 2) dist_norm = dist / (0.1 + dist) dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum return dist_mean def _valid_mask(t, padding): n, _, h, w = t.size() inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t) mask = F.pad(inner, [padding] * 4) return mask t1 = _ternary_transform(im) t2 = _ternary_transform(im_warp) dist = _hamming_distance(t1, t2) mask = _valid_mask(im, max_distance) return dist * mask def gradient(data): D_dy = data[:, :, 1:] - data[:, :, :-1] D_dx = data[:, :, :, 1:] - data[:, :, :, :-1] return D_dx, D_dy def smooth_grad_1st(flow, image, alpha): img_dx, img_dy = gradient(image) weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha) weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha) dx, dy = gradient(flow) loss_x = weights_x * dx.abs() / 2. loss_y = weights_y * dy.abs() / 2 return (loss_x.mean() + loss_y.mean()) / 2. def smooth_grad_2nd(flow, image, alpha): img_dx, img_dy = gradient(image) weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha) weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha) dx, dy = gradient(flow) dx2, _ = gradient(dx) _, dy2 = gradient(dy) loss_x = weights_x[:, :, :, 1:] * dx2.abs() loss_y = weights_y[:, :, 1:, :] * dy2.abs() return (loss_x.mean() + loss_y.mean()) / 2. class unFlowLoss(nn.modules.Module): """ Loss function adopted by ARFlow from originally Unflow. """ def __init__(self, weight=1.0, weights=None, consistency=True, back_occ_only=False, **kwargs): super().__init__() self.weight = weight if "l1" in weights: self.l1_weight = weights["l1"] if "ssim" in weights: self.ssim_weight = weights["ssim"] self.SSIM = SSIM().to("cuda" if torch.cuda.is_available() else "cpu") if "ternary" in weights: self.ternary_weight = weights["ternary"] if 'smooth' in kwargs: self.smooth_args = kwargs['smooth'] else:
self.smooth_w = 75.0 if 'smooth_w' not in kwargs else kwargs['smooth_w'] if 'w_sm_scales' in kwargs: self.w_sm_scales = kwargs['w_sm_scales'] else: self.w_sm_scales = [1.0, 0.0, 0.0, 0.0, 0.0] if 'w_wrp_scales' in kwargs: self.w_wrp_scales = kwargs['w_wrp_scales'] else: self.w_wrp_scales = [1.0, 1.0, 1.0, 1.0, 0.0] self.consistency = consistency self.back_occ_only = back_occ_only def loss_photometric(self, im_orig: torch.Tensor, im_recons: torch.Tensor, occu_mask: torch.Tensor): loss = [] if occu_mask.mean() == 0: occu_mask = torch.ones_like(occu_mask) if hasattr(self, 'l1_weight'): loss += [self.l1_weight * (im_orig - im_recons).abs() * occu_mask] if hasattr(self, 'ssim_weight'): loss += [self.ssim_weight * self.SSIM(im_recons * occu_mask, im_orig * occu_mask)] if hasattr(self, 'ternary_weight'): loss += [self.ternary_weight *\ TernaryLoss(im_recons * occu_mask, im_orig * occu_mask)] return sum([l.mean() for l in loss]) / occu_mask.mean() def loss_smooth(self, flow, im_scaled): if self.smooth_args['degree'] == 2: func_smooth = smooth_grad_2nd elif self.smooth_args['degree'] == 1: func_smooth = smooth_grad_1st else: raise NotImplementedError(self.smooth_args['degree']) loss = [] loss += [func_smooth(flow, im_scaled, self.smooth_args['alpha'])] return sum([l.mean() for l in loss]) def forward(self, predictions: Dict[str, torch.Tensor], targets: Dict[str, torch.Tensor]) -> torch.Tensor: """ :param output: Multi-scale forward/backward flows n * [B x 4 x h x w] :param target: image pairs Nx6xHxW :return: """ assert all(key in predictions for key in ['flow', 'flow_b']) and \ all(key in targets for key in ['l_img', 'l_seq']) pyramid_flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in zip(predictions['flow'], predictions['flow_b'])] pyramid_warp_losses = [] pyramid_smooth_losses = [] s = 1. for i, flow in enumerate(pyramid_flows): if self.w_wrp_scales[i] == 0: pyramid_warp_losses.append(0) pyramid_smooth_losses.append(0) continue # resize images to match the size of layer im1_scaled = F.interpolate(targets['l_img'], tuple(flow.size()[2:]), mode='area') im2_scaled = F.interpolate(targets['l_seq'], tuple(flow.size()[2:]), mode='area') im1_recons = flow_warp(im2_scaled, flow[:, :2], pad='border') im2_recons = flow_warp(im1_scaled, flow[:, 2:], pad='border') # Occlusion mask is broken, always returns zeros... # if i == 0: # if self.back_occ_only: # occu_mask1 = 1 - get_occu_mask_backward(flow[:, 2:], theta=0.2) # occu_mask2 = 1 - get_occu_mask_backward(flow[:, :2], theta=0.2) # else: # occu_mask1 = 1 - get_occu_mask_bidirection(flow[:, :2], flow[:, 2:]) # occu_mask2 = 1 - get_occu_mask_bidirection(flow[:, 2:], flow[:, :2]) # else: # occu_mask1 = F.interpolate(occu_mask1, tuple(flow.size()[2:]), mode='nearest') # occu_mask2 = F.interpolate(occu_mask2, tuple(flow.size()[2:]), mode='nearest') occu_mask1 = occu_mask2 = torch.ones_like(im1_scaled) loss_warp = self.loss_photometric(im1_scaled, im1_recons, occu_mask1) if i == 0: s = min(flow.size()[2:]) loss_smooth = self.loss_smooth(flow[:, :2] / s, im1_scaled) if self.consistency: loss_warp += self.loss_photometric(im2_scaled, im2_recons, occu_mask2) loss_smooth += self.loss_smooth(flow[:, 2:] / s, im2_scaled) loss_warp /= 2. loss_smooth /= 2. pyramid_warp_losses.append(loss_warp) pyramid_smooth_losses.append(loss_smooth) pyramid_warp_losses = [l * w for l, w in zip(pyramid_warp_losses, self.w_wrp_scales)] pyramid_smooth_losses = [l * w for l, w in zip(pyramid_smooth_losses, self.w_sm_scales)] return self.weight * (sum(pyramid_warp_losses) + self.smooth_args['weighting'] * sum(pyramid_smooth_losses))
self.smooth_args = {"degree": 2, "alpha" : 0.2, "weighting": 75.0}
conditional_block
UnFlowLoss.py
from typing import Dict import torch import torch.nn as nn import torch.nn.functional as F from .loss_functions import SSIM __all__ = ['unFlowLoss', 'flow_warp'] def mesh_grid(batch_sz, height, width): ''' Creates meshgrid of two dimensions which is the pixel location ''' # mesh grid x_base = torch.arange(0, width).repeat(batch_sz, height, 1) # BHW y_base = torch.arange(0, height).repeat(batch_sz, width, 1).transpose(1, 2) # BHW base_grid = torch.stack([x_base, y_base], 1) # B2HW return base_grid def norm_grid(v_grid): ''' Normalizses a meshgrid between (-1,1) ''' _, _, height, width = v_grid.size() # scale grid to [-1,1] v_grid_norm = torch.zeros_like(v_grid) v_grid_norm[:, 0, :, :] = 2.0 * v_grid[:, 0, :, :] / (width - 1) - 1.0 v_grid_norm[:, 1, :, :] = 2.0 * v_grid[:, 1, :, :] / (height - 1) - 1.0 return v_grid_norm.permute(0, 2, 3, 1) # BHW2 def get_corresponding_map(data): """ :param data: unnormalized coordinates Bx2xHxW :return: Bx1xHxW """ B, _, H, W = data.size() x = data[:, 0, :, :].view(B, -1) # BxN (N=H*W) y = data[:, 1, :, :].view(B, -1) x1 = torch.floor(x) x_floor = x1.clamp(0, W - 1) y1 = torch.floor(y) y_floor = y1.clamp(0, H - 1) x0 = x1 + 1 x_ceil = x0.clamp(0, W - 1) y0 = y1 + 1 y_ceil = y0.clamp(0, H - 1) x_ceil_out = x0 != x_ceil y_ceil_out = y0 != y_ceil x_floor_out = x1 != x_floor y_floor_out = y1 != y_floor invalid = torch.cat([x_ceil_out | y_ceil_out, x_ceil_out | y_floor_out, x_floor_out | y_ceil_out, x_floor_out | y_floor_out], dim=1) # encode coordinates, since the scatter function can only index along one axis corresponding_map = torch.zeros(B, H * W).type_as(data) indices = torch.cat([x_ceil + y_ceil * W, x_ceil + y_floor * W, x_floor + y_ceil * W, x_floor + y_floor * W], 1).long() # BxN (N=4*H*W) values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)), (1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)), (1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)), (1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))], 1) # values = torch.ones_like(values) values[invalid] = 0 corresponding_map.scatter_add_(1, indices, values) # decode coordinates corresponding_map = corresponding_map.view(B, H, W) return corresponding_map.unsqueeze(1) def
(image, flow12, pad='border', mode='bilinear'): ''' Warps an image given a flow prediction using grid_sample ''' batch_sz, _, height, width = image.size() base_grid = mesh_grid(batch_sz, height, width).type_as(image) # B2HW v_grid = norm_grid(base_grid + flow12) # BHW2 im1_recons = nn.functional.grid_sample(image, v_grid, mode=mode, padding_mode=pad, align_corners=False) return im1_recons def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5): ''' Get an occlusion mask using both flows such that they match each other ''' flow21_warped = flow_warp(flow21, flow12, pad='zeros') flow12_diff = flow12 + flow21_warped mag = (flow12 * flow12).sum(1, keepdim=True) + \ (flow21_warped * flow21_warped).sum(1, keepdim=True) occ_thresh = scale * mag + bias occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh return occ.float() def get_occu_mask_backward(flow21, theta=0.2): ''' Get an occlusion mask using backward propagation ''' B, _, H, W = flow21.size() base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW corr_map = get_corresponding_map(base_grid + flow21) # BHW occu_mask = corr_map.clamp(min=0., max=1.) < theta return occu_mask.float() # Credit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py def TernaryLoss(im, im_warp, max_distance=1): patch_size = 2 * max_distance + 1 def _rgb_to_grayscale(image): grayscale = image[:, 0, :, :] * 0.2989 + \ image[:, 1, :, :] * 0.5870 + \ image[:, 2, :, :] * 0.1140 return grayscale.unsqueeze(1) def _ternary_transform(image): intensities = _rgb_to_grayscale(image) * 255 out_channels = patch_size * patch_size w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size)) weights = w.type_as(im) patches = F.conv2d(intensities, weights, padding=max_distance) transf = patches - intensities transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2)) return transf_norm def _hamming_distance(t1, t2): dist = torch.pow(t1 - t2, 2) dist_norm = dist / (0.1 + dist) dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum return dist_mean def _valid_mask(t, padding): n, _, h, w = t.size() inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t) mask = F.pad(inner, [padding] * 4) return mask t1 = _ternary_transform(im) t2 = _ternary_transform(im_warp) dist = _hamming_distance(t1, t2) mask = _valid_mask(im, max_distance) return dist * mask def gradient(data): D_dy = data[:, :, 1:] - data[:, :, :-1] D_dx = data[:, :, :, 1:] - data[:, :, :, :-1] return D_dx, D_dy def smooth_grad_1st(flow, image, alpha): img_dx, img_dy = gradient(image) weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha) weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha) dx, dy = gradient(flow) loss_x = weights_x * dx.abs() / 2. loss_y = weights_y * dy.abs() / 2 return (loss_x.mean() + loss_y.mean()) / 2. def smooth_grad_2nd(flow, image, alpha): img_dx, img_dy = gradient(image) weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha) weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha) dx, dy = gradient(flow) dx2, _ = gradient(dx) _, dy2 = gradient(dy) loss_x = weights_x[:, :, :, 1:] * dx2.abs() loss_y = weights_y[:, :, 1:, :] * dy2.abs() return (loss_x.mean() + loss_y.mean()) / 2. class unFlowLoss(nn.modules.Module): """ Loss function adopted by ARFlow from originally Unflow. """ def __init__(self, weight=1.0, weights=None, consistency=True, back_occ_only=False, **kwargs): super().__init__() self.weight = weight if "l1" in weights: self.l1_weight = weights["l1"] if "ssim" in weights: self.ssim_weight = weights["ssim"] self.SSIM = SSIM().to("cuda" if torch.cuda.is_available() else "cpu") if "ternary" in weights: self.ternary_weight = weights["ternary"] if 'smooth' in kwargs: self.smooth_args = kwargs['smooth'] else: self.smooth_args = {"degree": 2, "alpha" : 0.2, "weighting": 75.0} self.smooth_w = 75.0 if 'smooth_w' not in kwargs else kwargs['smooth_w'] if 'w_sm_scales' in kwargs: self.w_sm_scales = kwargs['w_sm_scales'] else: self.w_sm_scales = [1.0, 0.0, 0.0, 0.0, 0.0] if 'w_wrp_scales' in kwargs: self.w_wrp_scales = kwargs['w_wrp_scales'] else: self.w_wrp_scales = [1.0, 1.0, 1.0, 1.0, 0.0] self.consistency = consistency self.back_occ_only = back_occ_only def loss_photometric(self, im_orig: torch.Tensor, im_recons: torch.Tensor, occu_mask: torch.Tensor): loss = [] if occu_mask.mean() == 0: occu_mask = torch.ones_like(occu_mask) if hasattr(self, 'l1_weight'): loss += [self.l1_weight * (im_orig - im_recons).abs() * occu_mask] if hasattr(self, 'ssim_weight'): loss += [self.ssim_weight * self.SSIM(im_recons * occu_mask, im_orig * occu_mask)] if hasattr(self, 'ternary_weight'): loss += [self.ternary_weight *\ TernaryLoss(im_recons * occu_mask, im_orig * occu_mask)] return sum([l.mean() for l in loss]) / occu_mask.mean() def loss_smooth(self, flow, im_scaled): if self.smooth_args['degree'] == 2: func_smooth = smooth_grad_2nd elif self.smooth_args['degree'] == 1: func_smooth = smooth_grad_1st else: raise NotImplementedError(self.smooth_args['degree']) loss = [] loss += [func_smooth(flow, im_scaled, self.smooth_args['alpha'])] return sum([l.mean() for l in loss]) def forward(self, predictions: Dict[str, torch.Tensor], targets: Dict[str, torch.Tensor]) -> torch.Tensor: """ :param output: Multi-scale forward/backward flows n * [B x 4 x h x w] :param target: image pairs Nx6xHxW :return: """ assert all(key in predictions for key in ['flow', 'flow_b']) and \ all(key in targets for key in ['l_img', 'l_seq']) pyramid_flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in zip(predictions['flow'], predictions['flow_b'])] pyramid_warp_losses = [] pyramid_smooth_losses = [] s = 1. for i, flow in enumerate(pyramid_flows): if self.w_wrp_scales[i] == 0: pyramid_warp_losses.append(0) pyramid_smooth_losses.append(0) continue # resize images to match the size of layer im1_scaled = F.interpolate(targets['l_img'], tuple(flow.size()[2:]), mode='area') im2_scaled = F.interpolate(targets['l_seq'], tuple(flow.size()[2:]), mode='area') im1_recons = flow_warp(im2_scaled, flow[:, :2], pad='border') im2_recons = flow_warp(im1_scaled, flow[:, 2:], pad='border') # Occlusion mask is broken, always returns zeros... # if i == 0: # if self.back_occ_only: # occu_mask1 = 1 - get_occu_mask_backward(flow[:, 2:], theta=0.2) # occu_mask2 = 1 - get_occu_mask_backward(flow[:, :2], theta=0.2) # else: # occu_mask1 = 1 - get_occu_mask_bidirection(flow[:, :2], flow[:, 2:]) # occu_mask2 = 1 - get_occu_mask_bidirection(flow[:, 2:], flow[:, :2]) # else: # occu_mask1 = F.interpolate(occu_mask1, tuple(flow.size()[2:]), mode='nearest') # occu_mask2 = F.interpolate(occu_mask2, tuple(flow.size()[2:]), mode='nearest') occu_mask1 = occu_mask2 = torch.ones_like(im1_scaled) loss_warp = self.loss_photometric(im1_scaled, im1_recons, occu_mask1) if i == 0: s = min(flow.size()[2:]) loss_smooth = self.loss_smooth(flow[:, :2] / s, im1_scaled) if self.consistency: loss_warp += self.loss_photometric(im2_scaled, im2_recons, occu_mask2) loss_smooth += self.loss_smooth(flow[:, 2:] / s, im2_scaled) loss_warp /= 2. loss_smooth /= 2. pyramid_warp_losses.append(loss_warp) pyramid_smooth_losses.append(loss_smooth) pyramid_warp_losses = [l * w for l, w in zip(pyramid_warp_losses, self.w_wrp_scales)] pyramid_smooth_losses = [l * w for l, w in zip(pyramid_smooth_losses, self.w_sm_scales)] return self.weight * (sum(pyramid_warp_losses) + self.smooth_args['weighting'] * sum(pyramid_smooth_losses))
flow_warp
identifier_name
UnFlowLoss.py
from typing import Dict import torch import torch.nn as nn import torch.nn.functional as F from .loss_functions import SSIM __all__ = ['unFlowLoss', 'flow_warp'] def mesh_grid(batch_sz, height, width): ''' Creates meshgrid of two dimensions which is the pixel location ''' # mesh grid x_base = torch.arange(0, width).repeat(batch_sz, height, 1) # BHW y_base = torch.arange(0, height).repeat(batch_sz, width, 1).transpose(1, 2) # BHW base_grid = torch.stack([x_base, y_base], 1) # B2HW return base_grid def norm_grid(v_grid): ''' Normalizses a meshgrid between (-1,1) ''' _, _, height, width = v_grid.size() # scale grid to [-1,1] v_grid_norm = torch.zeros_like(v_grid) v_grid_norm[:, 0, :, :] = 2.0 * v_grid[:, 0, :, :] / (width - 1) - 1.0 v_grid_norm[:, 1, :, :] = 2.0 * v_grid[:, 1, :, :] / (height - 1) - 1.0 return v_grid_norm.permute(0, 2, 3, 1) # BHW2 def get_corresponding_map(data): """ :param data: unnormalized coordinates Bx2xHxW :return: Bx1xHxW """ B, _, H, W = data.size() x = data[:, 0, :, :].view(B, -1) # BxN (N=H*W) y = data[:, 1, :, :].view(B, -1) x1 = torch.floor(x) x_floor = x1.clamp(0, W - 1) y1 = torch.floor(y) y_floor = y1.clamp(0, H - 1) x0 = x1 + 1 x_ceil = x0.clamp(0, W - 1) y0 = y1 + 1 y_ceil = y0.clamp(0, H - 1) x_ceil_out = x0 != x_ceil y_ceil_out = y0 != y_ceil x_floor_out = x1 != x_floor y_floor_out = y1 != y_floor invalid = torch.cat([x_ceil_out | y_ceil_out, x_ceil_out | y_floor_out, x_floor_out | y_ceil_out, x_floor_out | y_floor_out], dim=1) # encode coordinates, since the scatter function can only index along one axis corresponding_map = torch.zeros(B, H * W).type_as(data) indices = torch.cat([x_ceil + y_ceil * W, x_ceil + y_floor * W, x_floor + y_ceil * W, x_floor + y_floor * W], 1).long() # BxN (N=4*H*W) values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)), (1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)), (1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)), (1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))], 1) # values = torch.ones_like(values)
corresponding_map.scatter_add_(1, indices, values) # decode coordinates corresponding_map = corresponding_map.view(B, H, W) return corresponding_map.unsqueeze(1) def flow_warp(image, flow12, pad='border', mode='bilinear'): ''' Warps an image given a flow prediction using grid_sample ''' batch_sz, _, height, width = image.size() base_grid = mesh_grid(batch_sz, height, width).type_as(image) # B2HW v_grid = norm_grid(base_grid + flow12) # BHW2 im1_recons = nn.functional.grid_sample(image, v_grid, mode=mode, padding_mode=pad, align_corners=False) return im1_recons def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5): ''' Get an occlusion mask using both flows such that they match each other ''' flow21_warped = flow_warp(flow21, flow12, pad='zeros') flow12_diff = flow12 + flow21_warped mag = (flow12 * flow12).sum(1, keepdim=True) + \ (flow21_warped * flow21_warped).sum(1, keepdim=True) occ_thresh = scale * mag + bias occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh return occ.float() def get_occu_mask_backward(flow21, theta=0.2): ''' Get an occlusion mask using backward propagation ''' B, _, H, W = flow21.size() base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW corr_map = get_corresponding_map(base_grid + flow21) # BHW occu_mask = corr_map.clamp(min=0., max=1.) < theta return occu_mask.float() # Credit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py def TernaryLoss(im, im_warp, max_distance=1): patch_size = 2 * max_distance + 1 def _rgb_to_grayscale(image): grayscale = image[:, 0, :, :] * 0.2989 + \ image[:, 1, :, :] * 0.5870 + \ image[:, 2, :, :] * 0.1140 return grayscale.unsqueeze(1) def _ternary_transform(image): intensities = _rgb_to_grayscale(image) * 255 out_channels = patch_size * patch_size w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size)) weights = w.type_as(im) patches = F.conv2d(intensities, weights, padding=max_distance) transf = patches - intensities transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2)) return transf_norm def _hamming_distance(t1, t2): dist = torch.pow(t1 - t2, 2) dist_norm = dist / (0.1 + dist) dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum return dist_mean def _valid_mask(t, padding): n, _, h, w = t.size() inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t) mask = F.pad(inner, [padding] * 4) return mask t1 = _ternary_transform(im) t2 = _ternary_transform(im_warp) dist = _hamming_distance(t1, t2) mask = _valid_mask(im, max_distance) return dist * mask def gradient(data): D_dy = data[:, :, 1:] - data[:, :, :-1] D_dx = data[:, :, :, 1:] - data[:, :, :, :-1] return D_dx, D_dy def smooth_grad_1st(flow, image, alpha): img_dx, img_dy = gradient(image) weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha) weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha) dx, dy = gradient(flow) loss_x = weights_x * dx.abs() / 2. loss_y = weights_y * dy.abs() / 2 return (loss_x.mean() + loss_y.mean()) / 2. def smooth_grad_2nd(flow, image, alpha): img_dx, img_dy = gradient(image) weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha) weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha) dx, dy = gradient(flow) dx2, _ = gradient(dx) _, dy2 = gradient(dy) loss_x = weights_x[:, :, :, 1:] * dx2.abs() loss_y = weights_y[:, :, 1:, :] * dy2.abs() return (loss_x.mean() + loss_y.mean()) / 2. class unFlowLoss(nn.modules.Module): """ Loss function adopted by ARFlow from originally Unflow. """ def __init__(self, weight=1.0, weights=None, consistency=True, back_occ_only=False, **kwargs): super().__init__() self.weight = weight if "l1" in weights: self.l1_weight = weights["l1"] if "ssim" in weights: self.ssim_weight = weights["ssim"] self.SSIM = SSIM().to("cuda" if torch.cuda.is_available() else "cpu") if "ternary" in weights: self.ternary_weight = weights["ternary"] if 'smooth' in kwargs: self.smooth_args = kwargs['smooth'] else: self.smooth_args = {"degree": 2, "alpha" : 0.2, "weighting": 75.0} self.smooth_w = 75.0 if 'smooth_w' not in kwargs else kwargs['smooth_w'] if 'w_sm_scales' in kwargs: self.w_sm_scales = kwargs['w_sm_scales'] else: self.w_sm_scales = [1.0, 0.0, 0.0, 0.0, 0.0] if 'w_wrp_scales' in kwargs: self.w_wrp_scales = kwargs['w_wrp_scales'] else: self.w_wrp_scales = [1.0, 1.0, 1.0, 1.0, 0.0] self.consistency = consistency self.back_occ_only = back_occ_only def loss_photometric(self, im_orig: torch.Tensor, im_recons: torch.Tensor, occu_mask: torch.Tensor): loss = [] if occu_mask.mean() == 0: occu_mask = torch.ones_like(occu_mask) if hasattr(self, 'l1_weight'): loss += [self.l1_weight * (im_orig - im_recons).abs() * occu_mask] if hasattr(self, 'ssim_weight'): loss += [self.ssim_weight * self.SSIM(im_recons * occu_mask, im_orig * occu_mask)] if hasattr(self, 'ternary_weight'): loss += [self.ternary_weight *\ TernaryLoss(im_recons * occu_mask, im_orig * occu_mask)] return sum([l.mean() for l in loss]) / occu_mask.mean() def loss_smooth(self, flow, im_scaled): if self.smooth_args['degree'] == 2: func_smooth = smooth_grad_2nd elif self.smooth_args['degree'] == 1: func_smooth = smooth_grad_1st else: raise NotImplementedError(self.smooth_args['degree']) loss = [] loss += [func_smooth(flow, im_scaled, self.smooth_args['alpha'])] return sum([l.mean() for l in loss]) def forward(self, predictions: Dict[str, torch.Tensor], targets: Dict[str, torch.Tensor]) -> torch.Tensor: """ :param output: Multi-scale forward/backward flows n * [B x 4 x h x w] :param target: image pairs Nx6xHxW :return: """ assert all(key in predictions for key in ['flow', 'flow_b']) and \ all(key in targets for key in ['l_img', 'l_seq']) pyramid_flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in zip(predictions['flow'], predictions['flow_b'])] pyramid_warp_losses = [] pyramid_smooth_losses = [] s = 1. for i, flow in enumerate(pyramid_flows): if self.w_wrp_scales[i] == 0: pyramid_warp_losses.append(0) pyramid_smooth_losses.append(0) continue # resize images to match the size of layer im1_scaled = F.interpolate(targets['l_img'], tuple(flow.size()[2:]), mode='area') im2_scaled = F.interpolate(targets['l_seq'], tuple(flow.size()[2:]), mode='area') im1_recons = flow_warp(im2_scaled, flow[:, :2], pad='border') im2_recons = flow_warp(im1_scaled, flow[:, 2:], pad='border') # Occlusion mask is broken, always returns zeros... # if i == 0: # if self.back_occ_only: # occu_mask1 = 1 - get_occu_mask_backward(flow[:, 2:], theta=0.2) # occu_mask2 = 1 - get_occu_mask_backward(flow[:, :2], theta=0.2) # else: # occu_mask1 = 1 - get_occu_mask_bidirection(flow[:, :2], flow[:, 2:]) # occu_mask2 = 1 - get_occu_mask_bidirection(flow[:, 2:], flow[:, :2]) # else: # occu_mask1 = F.interpolate(occu_mask1, tuple(flow.size()[2:]), mode='nearest') # occu_mask2 = F.interpolate(occu_mask2, tuple(flow.size()[2:]), mode='nearest') occu_mask1 = occu_mask2 = torch.ones_like(im1_scaled) loss_warp = self.loss_photometric(im1_scaled, im1_recons, occu_mask1) if i == 0: s = min(flow.size()[2:]) loss_smooth = self.loss_smooth(flow[:, :2] / s, im1_scaled) if self.consistency: loss_warp += self.loss_photometric(im2_scaled, im2_recons, occu_mask2) loss_smooth += self.loss_smooth(flow[:, 2:] / s, im2_scaled) loss_warp /= 2. loss_smooth /= 2. pyramid_warp_losses.append(loss_warp) pyramid_smooth_losses.append(loss_smooth) pyramid_warp_losses = [l * w for l, w in zip(pyramid_warp_losses, self.w_wrp_scales)] pyramid_smooth_losses = [l * w for l, w in zip(pyramid_smooth_losses, self.w_sm_scales)] return self.weight * (sum(pyramid_warp_losses) + self.smooth_args['weighting'] * sum(pyramid_smooth_losses))
values[invalid] = 0
random_line_split
model.py
# -*- coding: utf-8 -*- import numpy as np from sample import Sample from kernel import Method, Kernel from enum import Enum from group import Group import copy class TrainingTypes(Enum): OneIterationFinished = 0, # 一个迭代的结束 AllConformedKKT = 1, # 全部点皆符合KKT条件 Failed = 2 class Model: iteration_times = 0 def __init__(self): self.label = float("inf") # 预设是 Float 最大值, 代表这是一个标准只处理 2 分类的 SVM Model # Label 的原意思是用在多分类上,看这一个 Model 主要是用来分类哪一种【正样本】 的。 self.samples = [] # Sample Object, from sample.py self.weights = [] self.bias = 0.0 # bias 只有 1 个 self.groups = {} # 分到正样本(+1)或负样本(-1)群里:[target value] = group self.const_value = 0.0 self.tolerance_error = 0.0 self.max_iteration = 100 self.kernel = Kernel(Method.Linear) # 预设使用线性分割(Linear) self.iteration_callback = None self.completion_callback = None self.examine_all = False # 是否遍历全部的点 self._create_groups([1, -1]) # 建立 +1, -1 这 2 个分类群,之后多分类会用到 self.split_index = 0 self.iteration_update_count = 0 # A sample <Sample Object> has a lot of features. def add_sample(self, sample): self.samples.append(copy.copy(sample)) def append_sample(self, features=[], target_value=0.0): sample = Sample(features, target_value) sample.kernel.method = self.kernel.method self.add_sample(sample) def zero_weights(self, count=0): if count <= 0: count = len(self.samples[0].features) del self.weights[:] for i in xrange(0, count): self.weights.append(0.0) def clear_samples(self): del self.samples[:] def clear_groups(self): # 清空 group 里记录的 samples for target, group in self.groups.items(): group.clear() # 从每一个 Sample 的target value 来逐一判断该点是属于哪一群 def classify_to_group(self): self.clear_groups() # 再全部重新分类 for sample in self.samples: to_group = self.groups.get(sample.target_value) if to_group: to_group.add_sample(sample) def classify(self, iteration_callback, completion_callback): self.iteration_callback = iteration_callback self.completion_callback = completion_callback self.iteration_times = 0 self.clear_groups() self._training() def predicate(self, features=[]): # Dirctly output the target value by formula : yi = (W^T * xi + b) or (W^T * xi - b) # 计算目标估值 target_value = -self.bias for sample_x in self.samples: if sample_x.alpha_value != 0: # SUM ai * yi * K(Xi * x) target_value += sample_x.alpha_value * sample_x.target_value * self.kernel.calculate(sample_x.features, features) return self.sgn(target_value) # 用于在预测输出时,将计算完的样本点目标值正规化成分类目标的 +1 / -1 def sgn(self, value=0.0): return 1.0 if value >=0.0 else -1.0 ''' @ Private ''' # 建立要分类的群 def _create_groups(self, targets=[]): for target_value in targets: self.groups[target_value] = Group(target_value) def _training(self): self.iteration_times += 1 waiting_samples = [] if self.examine_all == True: waiting_samples = self._samples_without_kkt(self.split_index) else: waiting_samples = np.copy(self.samples).tolist() self._start_to_update(waiting_samples) def _complet
self.classify_to_group() # 分类到所属群里 self.completion_callback(self.iteration_times, self.weights, self.bias, self.groups.values()) def _iteration(self): if self.iteration_callback: self.iteration_callback(self.iteration_times, self.weights, self.bias) def _random_pick_index(self, avoid_index=0): max = len(self.samples) random_index = 0 # 整体样本数有2个,就直接选择另一个点来做 if max == 2: random_index = (max - 1) - avoid_index else: # 整体样本有多个,就跑 Random Picking random_index = np.random.random_integers(0, max-1) if random_index == avoid_index: random_index = self._random_pick_index(avoid_index) return random_index def _update_parameters(self, update_alphas=[]): alphas_count = len(update_alphas) # 如果 update_alphas 为空,代表完成本次迭代训练, 但所有Samples 都还未全部符合 KKT 条件 if alphas_count == 0: return TrainingTypes.OneIterationFinished self._calculate_error_value() self.iteration_update_count += 1 # If we still have over 2 samples can do match-update task if alphas_count > 1: match_sample = update_alphas.pop(0) # Romoved the sample from array self.split_index = self.samples.index(match_sample) +1 max_index = -1 max_error_value = -1.0 for index, other_sample in enumerate(self.samples): # 找到误差距离绝对值最大的样本点 error_distance = abs(other_sample.error_value - match_sample.error_value) if error_distance > max_error_value and index >= self.split_index: max_error_value = error_distance max_index = index # If we successfully chose a sample if max_index >= 0: self.update_alpha(max_index, self.samples.index(match_sample)) # 单纯检查是否所有数据都符合 KKT 条件了 ? 还有不符合的就再递归跑本 function if self._all_conform_kkt() == False: if self.examine_all == True: update_alphas = self._samples_without_kkt(self.split_index) # 将其它不符合 KKT 条件的点都再重新进行更新 weights & bias 运算, 直至所有点都运算完毕, 才 return 完成 1 迭代 return self._update_parameters(update_alphas) else: # 更新完所有不符合 KKT 条件的点, 同时代表完成完整的 1 迭代运算就 return 完成 return TrainingTypes.AllConformedKKT else: # 挑 1 出来搭配,之后重新跑一次上次的运算 # 这里有 2 个挑选的方式 match_sample = update_alphas.pop(0) if self.examine_all == True: self.split_index = self.samples.index(match_sample) + 1 update_alphas = self._samples_without_kkt(self.split_index) match_index = self.samples.index(match_sample) self.update_alpha(self._random_pick_index(match_index), match_index) return self._update_parameters(update_alphas) # Default is failed. return TrainingTypes.Failed # Updating alpha and bias. def update_alpha(self, main_index, match_index): main = self.samples[main_index] match = self.samples[match_index] new_match_alpha = self._calculate_new_match_alpha(main, match) new_main_alpha =self._calculate_new_main_alpha(main, match, new_match_alpha) # Quickly updating the weights and bias by used 2 new alpha values # 1). calculates the delta weights, Formula: # delta main = (new alpha 1 - old alpha 1) * target1 * x1 # delta match = (new alpha 2 - old alpha 2) * target2 * x2 # delta weights = delta main + delta match main_factor = (new_main_alpha - main.alpha_value) * main.target_value delta_main = np.multiply(main.features, main_factor) match_factor = (new_match_alpha - match.alpha_value) * match.target_value delta_match = np.multiply(match.features, match_factor) delta_weights = np.add(delta_main, delta_match) # 2). let original weights + delta weights to be new weights array, Formula: new_weights = np.add(self.weights, delta_weights) # 这里 new_weights 会是 numpy.ndarray del self.weights[:] self.weights = new_weights.tolist() # 3). quickly updating bias via 2 samples (Main & Match), Formula: # W: weights, X: sample features, b: bias, T: sample target value (+1 / -1) # WX - b = T # -> -b = T - WX # b = WX -T # 故 new bias = new weights * X - (+1 or -1) # +1 或 -1 是看当前的 X 是被分到 +1 或者 -1 的标签(Target) # 这里会有 2 个 new bias, 再去按照条件做挑选 1 个出来用。 # 以下有个更新 bias 的方法( New, Old): # Linear method # new_main_bias = np.dot(self.weights, main.features) - main.target_value # new_match_bias = np.dot(self.weights, match.features) - match.target_value # Old method new_main_bias = self.bias + main.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, main.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, main.features)) new_match_bias = self.bias + match.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, match.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, match.features)) # 4). to choose the final bias or to get the average value of biases self.samples[main_index].alpha_value = new_main_alpha self.samples[match_index].alpha_value = new_match_alpha new_bias = 0.0 if self._is_accept_alpha(new_main_alpha): new_bias = new_main_bias elif self._is_accept_alpha(new_match_alpha): new_bias = new_match_bias else: new_bias = (new_main_bias + new_match_bias) * 0.5 # Update old bias self.bias = new_bias # 更新 Weights / Bias def _start_to_update(self, waiting_samples=[]): # if len(waiting_samples) == 0: # self._completion() # return # 更新参数(权重与偏权)后,再判断是否需要停止迭代或要继续下一迭代的训练 training_result = self._update_parameters(waiting_samples) self.split_index = 0 self.examine_all = True # 完成 1 个迭代的运算 if training_result == TrainingTypes.OneIterationFinished: # 先判断迭代是否达到上限 if self.iteration_times >= self.max_iteration: self._completion() elif self.iteration_update_count == 0: self._completion() else: # 继续迭代运算 self.iteration_update_count = 0 self._iteration() self._training() # 所有样本点都符合 KKT 条件 elif training_result == TrainingTypes.AllConformedKKT: self._completion() else: # TrainingTypes.Failed self._completion() # 找出不符合 KKT 条件的样本点 (等待更新的样本点) def _samples_without_kkt(self, split_index=0): waiting_samples = [] for sample in self.samples[split_index:]: is_conform_kkt = sample.is_confom_kkt(self.samples, self.bias, self.const_value) # 不符合 KKT 条件 if is_conform_kkt == False and sample.alpha_value > 0 and sample.alpha_value < self.const_value: # 把要更新的样本记起来(不符合 KKT 的都为待更新样本) waiting_samples.append(sample) return waiting_samples # 是否所有样本都已符合 KKT, return BOOL def _all_conform_kkt(self): all_conform_kkt = True for sample in self.samples: all_conform_kkt = sample.is_confom_kkt(self.samples, self.bias, self.const_value) # 有任一样本点不符合 KKT 条件 if all_conform_kkt == False: sum_x = np.dot(self.weights, sample.features) kkt_value = sample.target_value * (sum_x - self.bias) break return all_conform_kkt # 计算每个Sample 的 Error Value def _calculate_error_value(self): errors = [] for current_sample in self.samples: # Sample error error_value = 0.0 # 跟其它的样本点做比较(计算误差值) for other_sample in self.samples: # kernel_value = 求当前的主要 current_sample 特征值与所有的 other_sample 特征值做内和积 # (包含目前 current_sample 自己对自己的内积),而后将内积值再传入至 kernel 做运算后的值。 kernel_value = self.kernel.calculate(current_sample.features, other_sample.features) other_target_value = other_sample.target_value other_alpha_value = other_sample.alpha_value error_value += (other_target_value * other_alpha_value * kernel_value) error_value += -self.bias - current_sample.target_value errors.append(error_value) # 将Error Value 存回去当前的样本点的误差值里 current_sample.error_value = error_value return errors # 计算 New Matched Pattern Alpha Value & 判断其是否符合上下限范围 def _calculate_new_match_alpha(self, main_sample, match_sample): old_main_alpha = main_sample.alpha_value main_target = main_sample.target_value # Update the alpha value of match-pattern in first # Old match alpha value + ( match target value * ( main error - match error)) / ((x1 * x1 ) + ( x2 * x2) + (2 * x1 * x2) old_match_alpha = match_sample.alpha_value match_target = match_sample.target_value # 分子:match target * ( main error - match error) and it won't need to do fabs(error) numerator = match_target * (main_sample.error_value - match_sample.error_value) # 分母 denominator = self.kernel.calculate(main_sample.features, main_sample.features) + self.kernel.calculate(match_sample.features, match_sample.features) - (2.0 * self.kernel.calculate(main_sample.features, match_sample.features)) # New match alpha new_match_alpha = old_match_alpha + (numerator / denominator) # Checking the max-min limitation(检查上下限范围) min_scope = 0.0 max_scope = 0.0 # 相关讯号: If main target * match target = -1 (minor singal), using this formula: if main_target * match_target < 0.0: # Min scope is MAX( 0.0f, (old_match_alpha - old_main_alpha)) min_scope = np.maximum(0.0, (old_match_alpha - old_main_alpha)) # Max scope is MIN( const value, const value + old match alpha - old main alpha) max_scope = np.minimum(self.const_value, (self.const_value + old_match_alpha - old_main_alpha)) else: # 同讯号 # If main target * match target = 1 (plus singal), using this formula: # Min scope is MIN(0.0f, ( old main alpha + old match alpha - const value)) min_scope = np.maximum(0.0, (old_main_alpha + old_match_alpha - self.const_value)) max_scope = np.minimum(self.const_value, (old_match_alpha + old_main_alpha)) # Compares max and min value of new match alpha value. # 如果 match 的 alpha 值在原公式制定的标准范围内,就什么都不处理,仅处理以下 2 个条件 # 如果 match 的 alpha 值小于下限值,就变成下限值 if new_match_alpha < min_scope: new_match_alpha = min_scope # 如果大于上限值,就变成上限值 elif new_match_alpha > max_scope: new_match_alpha = max_scope return new_match_alpha # 更新 New Main Alpha Value def _calculate_new_main_alpha(self, main_sample, match_sample, new_match_alpha): # Formula: new main alpha = old main alpha + ( main target * match target * (old match alpha - new match alpha)) return main_sample.alpha_value + (main_sample.target_value * match_sample.target_value * (match_sample.alpha_value - new_match_alpha)) # 判断 New Alpha Value 是否在接受范围里 def _is_accept_alpha(self, alpha_value=0.0): return True if(alpha_value > 0.0 and alpha_value < self.const_value) else False
ion(self): if self.completion_callback:
conditional_block
model.py
# -*- coding: utf-8 -*- import numpy as np from sample import Sample from kernel import Method, Kernel from enum import Enum from group import Group import copy class TrainingTypes(Enum): OneIterationFinished = 0, # 一个迭代的结束 AllConformedKKT = 1, # 全部点皆符合KKT条件 Failed = 2 class Model: iteration_times = 0 def __init__(self): self.label = float("inf") # 预设是 Float 最大值, 代表这是一个标准只处理 2 分类的 SVM Model # Label 的原意思是用在多分类上,看这一个 Model 主要是用来分类哪一种【正样本】 的。 self.samples = [] # Sample Object, from sample.py self.weights = [] self.bias = 0.0 # bias 只有 1 个 self.groups = {} # 分到正样本(+1)或负样本(-1)群里:[target value] = group self.const_value = 0.0 self.tolerance_error = 0.0 self.max_iteration = 100 self.kernel = Kernel(Method.Linear) # 预设使用线性分割(Linear) self.iteration_callback = None self.completion_callback = None self.examine_all = False # 是否遍历全部的点 self._create_groups([1, -1]) # 建立 +1, -1 这 2 个分类群,之后多分类会用到 self.split_index = 0 self.iteration_update_count = 0 # A sample <Sample Object> has a lot of features. def add_sample(self, sample): self.samples.append(copy.copy(sample)) def append_sample(self, features=[], target_value=0.0): sample = Sample(features, target_value) sample.kernel.method = self.kernel.method self.add_sample(sample) def zero_weights(self, count=0): if count <= 0: count = len(self.samples[0].features) del self.weights[:] for i in xrange(0, count): self.weights.append(0.0) def clear_samples(self): del self.samples[:] def clear_groups(self): # 清空 group 里记录的 samples for target, group in self.groups.items(): group.clear() # 从每一个 Sample 的target value 来逐一判断该点是属于哪一群 def classify_to_group(self): self.clear_groups() # 再全部重新分类 for sample in self.samples: to_group = self.groups.get(sample.target_value) if to_group: to_group.add_sample(sample) def classify(self, iteration_callback, completion_callback): self.iteration_callback = iteration_callback self.completion_callback = completion_callback self.iteration_times = 0 self.clear_groups() self._training() def predicate(self, features=[]): # Dirctly output the target value by formula : yi = (W^T * xi + b) or (W^T * xi - b) # 计算目标估值 target_value = -self.bias for sample_x in self.samples: if sample_x.alpha_value != 0: # SUM ai * yi * K(Xi * x) target_value += sample_x.alpha_value * sample_x.target_value * self.kernel.calculate(sample_x.features, features) return self.sgn(target_value) # 用于在预测输出时,将计算完的样本点目标值正规化成分类目标的 +1 / -1 def sgn(self, value=0.0): return 1.0 if value >=0.0 else -1.0 ''' @ Private ''' # 建立要分类的群 def _create_groups(self, targets=[]): for target_value in targets: self.groups[target_value] = Group(target_value) def _training(self): self.iteration_times += 1 waiting_samples = [] if self.examine_all == True: waiting_samples = self._samples_without_kkt(self.split_index) else: waiting_samples = np.copy(self.samples).tolist() self._start_to_update(waiting_samples) def _completion(self): if self.completion_callback: self.classify_to_group() # 分类到所属群里 self.completion_callback(self.iteration_times, self.weights, self.bias, self.groups.values()) def _iteration(self): if self.iteration_callback: self.iteration_callback(self.iteration_times, self.weights, self.bias) def _random_pick_index(self, avoid_index=0): max = len(self.samples) random_index = 0 # 整体样本数有2个,就直接选择另一个点来做 if max == 2: random_index = (max - 1) - avoid_index else: # 整体样本有多个,就跑 Random Pi
random_index = np.random.random_integers(0, max-1) if random_index == avoid_index: random_index = self._random_pick_index(avoid_index) return random_index def _update_parameters(self, update_alphas=[]): alphas_count = len(update_alphas) # 如果 update_alphas 为空,代表完成本次迭代训练, 但所有Samples 都还未全部符合 KKT 条件 if alphas_count == 0: return TrainingTypes.OneIterationFinished self._calculate_error_value() self.iteration_update_count += 1 # If we still have over 2 samples can do match-update task if alphas_count > 1: match_sample = update_alphas.pop(0) # Romoved the sample from array self.split_index = self.samples.index(match_sample) +1 max_index = -1 max_error_value = -1.0 for index, other_sample in enumerate(self.samples): # 找到误差距离绝对值最大的样本点 error_distance = abs(other_sample.error_value - match_sample.error_value) if error_distance > max_error_value and index >= self.split_index: max_error_value = error_distance max_index = index # If we successfully chose a sample if max_index >= 0: self.update_alpha(max_index, self.samples.index(match_sample)) # 单纯检查是否所有数据都符合 KKT 条件了 ? 还有不符合的就再递归跑本 function if self._all_conform_kkt() == False: if self.examine_all == True: update_alphas = self._samples_without_kkt(self.split_index) # 将其它不符合 KKT 条件的点都再重新进行更新 weights & bias 运算, 直至所有点都运算完毕, 才 return 完成 1 迭代 return self._update_parameters(update_alphas) else: # 更新完所有不符合 KKT 条件的点, 同时代表完成完整的 1 迭代运算就 return 完成 return TrainingTypes.AllConformedKKT else: # 挑 1 出来搭配,之后重新跑一次上次的运算 # 这里有 2 个挑选的方式 match_sample = update_alphas.pop(0) if self.examine_all == True: self.split_index = self.samples.index(match_sample) + 1 update_alphas = self._samples_without_kkt(self.split_index) match_index = self.samples.index(match_sample) self.update_alpha(self._random_pick_index(match_index), match_index) return self._update_parameters(update_alphas) # Default is failed. return TrainingTypes.Failed # Updating alpha and bias. def update_alpha(self, main_index, match_index): main = self.samples[main_index] match = self.samples[match_index] new_match_alpha = self._calculate_new_match_alpha(main, match) new_main_alpha =self._calculate_new_main_alpha(main, match, new_match_alpha) # Quickly updating the weights and bias by used 2 new alpha values # 1). calculates the delta weights, Formula: # delta main = (new alpha 1 - old alpha 1) * target1 * x1 # delta match = (new alpha 2 - old alpha 2) * target2 * x2 # delta weights = delta main + delta match main_factor = (new_main_alpha - main.alpha_value) * main.target_value delta_main = np.multiply(main.features, main_factor) match_factor = (new_match_alpha - match.alpha_value) * match.target_value delta_match = np.multiply(match.features, match_factor) delta_weights = np.add(delta_main, delta_match) # 2). let original weights + delta weights to be new weights array, Formula: new_weights = np.add(self.weights, delta_weights) # 这里 new_weights 会是 numpy.ndarray del self.weights[:] self.weights = new_weights.tolist() # 3). quickly updating bias via 2 samples (Main & Match), Formula: # W: weights, X: sample features, b: bias, T: sample target value (+1 / -1) # WX - b = T # -> -b = T - WX # b = WX -T # 故 new bias = new weights * X - (+1 or -1) # +1 或 -1 是看当前的 X 是被分到 +1 或者 -1 的标签(Target) # 这里会有 2 个 new bias, 再去按照条件做挑选 1 个出来用。 # 以下有个更新 bias 的方法( New, Old): # Linear method # new_main_bias = np.dot(self.weights, main.features) - main.target_value # new_match_bias = np.dot(self.weights, match.features) - match.target_value # Old method new_main_bias = self.bias + main.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, main.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, main.features)) new_match_bias = self.bias + match.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, match.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, match.features)) # 4). to choose the final bias or to get the average value of biases self.samples[main_index].alpha_value = new_main_alpha self.samples[match_index].alpha_value = new_match_alpha new_bias = 0.0 if self._is_accept_alpha(new_main_alpha): new_bias = new_main_bias elif self._is_accept_alpha(new_match_alpha): new_bias = new_match_bias else: new_bias = (new_main_bias + new_match_bias) * 0.5 # Update old bias self.bias = new_bias # 更新 Weights / Bias def _start_to_update(self, waiting_samples=[]): # if len(waiting_samples) == 0: # self._completion() # return # 更新参数(权重与偏权)后,再判断是否需要停止迭代或要继续下一迭代的训练 training_result = self._update_parameters(waiting_samples) self.split_index = 0 self.examine_all = True # 完成 1 个迭代的运算 if training_result == TrainingTypes.OneIterationFinished: # 先判断迭代是否达到上限 if self.iteration_times >= self.max_iteration: self._completion() elif self.iteration_update_count == 0: self._completion() else: # 继续迭代运算 self.iteration_update_count = 0 self._iteration() self._training() # 所有样本点都符合 KKT 条件 elif training_result == TrainingTypes.AllConformedKKT: self._completion() else: # TrainingTypes.Failed self._completion() # 找出不符合 KKT 条件的样本点 (等待更新的样本点) def _samples_without_kkt(self, split_index=0): waiting_samples = [] for sample in self.samples[split_index:]: is_conform_kkt = sample.is_confom_kkt(self.samples, self.bias, self.const_value) # 不符合 KKT 条件 if is_conform_kkt == False and sample.alpha_value > 0 and sample.alpha_value < self.const_value: # 把要更新的样本记起来(不符合 KKT 的都为待更新样本) waiting_samples.append(sample) return waiting_samples # 是否所有样本都已符合 KKT, return BOOL def _all_conform_kkt(self): all_conform_kkt = True for sample in self.samples: all_conform_kkt = sample.is_confom_kkt(self.samples, self.bias, self.const_value) # 有任一样本点不符合 KKT 条件 if all_conform_kkt == False: sum_x = np.dot(self.weights, sample.features) kkt_value = sample.target_value * (sum_x - self.bias) break return all_conform_kkt # 计算每个Sample 的 Error Value def _calculate_error_value(self): errors = [] for current_sample in self.samples: # Sample error error_value = 0.0 # 跟其它的样本点做比较(计算误差值) for other_sample in self.samples: # kernel_value = 求当前的主要 current_sample 特征值与所有的 other_sample 特征值做内和积 # (包含目前 current_sample 自己对自己的内积),而后将内积值再传入至 kernel 做运算后的值。 kernel_value = self.kernel.calculate(current_sample.features, other_sample.features) other_target_value = other_sample.target_value other_alpha_value = other_sample.alpha_value error_value += (other_target_value * other_alpha_value * kernel_value) error_value += -self.bias - current_sample.target_value errors.append(error_value) # 将Error Value 存回去当前的样本点的误差值里 current_sample.error_value = error_value return errors # 计算 New Matched Pattern Alpha Value & 判断其是否符合上下限范围 def _calculate_new_match_alpha(self, main_sample, match_sample): old_main_alpha = main_sample.alpha_value main_target = main_sample.target_value # Update the alpha value of match-pattern in first # Old match alpha value + ( match target value * ( main error - match error)) / ((x1 * x1 ) + ( x2 * x2) + (2 * x1 * x2) old_match_alpha = match_sample.alpha_value match_target = match_sample.target_value # 分子:match target * ( main error - match error) and it won't need to do fabs(error) numerator = match_target * (main_sample.error_value - match_sample.error_value) # 分母 denominator = self.kernel.calculate(main_sample.features, main_sample.features) + self.kernel.calculate(match_sample.features, match_sample.features) - (2.0 * self.kernel.calculate(main_sample.features, match_sample.features)) # New match alpha new_match_alpha = old_match_alpha + (numerator / denominator) # Checking the max-min limitation(检查上下限范围) min_scope = 0.0 max_scope = 0.0 # 相关讯号: If main target * match target = -1 (minor singal), using this formula: if main_target * match_target < 0.0: # Min scope is MAX( 0.0f, (old_match_alpha - old_main_alpha)) min_scope = np.maximum(0.0, (old_match_alpha - old_main_alpha)) # Max scope is MIN( const value, const value + old match alpha - old main alpha) max_scope = np.minimum(self.const_value, (self.const_value + old_match_alpha - old_main_alpha)) else: # 同讯号 # If main target * match target = 1 (plus singal), using this formula: # Min scope is MIN(0.0f, ( old main alpha + old match alpha - const value)) min_scope = np.maximum(0.0, (old_main_alpha + old_match_alpha - self.const_value)) max_scope = np.minimum(self.const_value, (old_match_alpha + old_main_alpha)) # Compares max and min value of new match alpha value. # 如果 match 的 alpha 值在原公式制定的标准范围内,就什么都不处理,仅处理以下 2 个条件 # 如果 match 的 alpha 值小于下限值,就变成下限值 if new_match_alpha < min_scope: new_match_alpha = min_scope # 如果大于上限值,就变成上限值 elif new_match_alpha > max_scope: new_match_alpha = max_scope return new_match_alpha # 更新 New Main Alpha Value def _calculate_new_main_alpha(self, main_sample, match_sample, new_match_alpha): # Formula: new main alpha = old main alpha + ( main target * match target * (old match alpha - new match alpha)) return main_sample.alpha_value + (main_sample.target_value * match_sample.target_value * (match_sample.alpha_value - new_match_alpha)) # 判断 New Alpha Value 是否在接受范围里 def _is_accept_alpha(self, alpha_value=0.0): return True if(alpha_value > 0.0 and alpha_value < self.const_value) else False
cking
identifier_name
model.py
# -*- coding: utf-8 -*- import numpy as np from sample import Sample from kernel import Method, Kernel from enum import Enum from group import Group import copy class TrainingTypes(Enum): OneIterationFinished = 0, # 一个迭代的结束 AllConformedKKT = 1, # 全部点皆符合KKT条件 Failed = 2 class Model: iteration_times = 0 def __init__(self): self.label = float("inf") # 预设是 Float 最大值, 代表这是一个标准只处理 2 分类的 SVM Model # Label 的原意思是用在多分类上,看这一个 Model 主要是用来分类哪一种【正样本】 的。 self.samples = [] # Sample Object, from sample.py self.weights = [] self.bias = 0.0 # bias 只有 1 个 self.groups = {} # 分到正样本(+1)或负样本(-1)群里:[target value] = group self.const_value = 0.0 self.tolerance_error = 0.0 self.max_iteration = 100 self.kernel = Kernel(Method.Linear) # 预设使用线性分割(Linear) self.iteration_callback = None self.completion_callback = None self.examine_all = False # 是否遍历全部的点 self._create_groups([1, -1]) # 建立 +1, -1 这 2 个分类群,之后多分类会用到 self.split_index = 0 self.iteration_update_count = 0 # A sample <Sample Object> has a lot of features. def add_sample(self, sample): self.samples.append(copy.copy(sample)) def append_sample(self, features=[], target_value=0.0): sample = Sample(features, target_value) sample.kernel.method = self.kernel.method self.add_sample(sample) def zero_weights(self, count=0): if count <= 0: count = len(self.samples[0].features) del self.weights[:] for i in xrange(0, count): self.weights.append(0.0) def clear_samples(self): del self.samples[:] def clear_groups(self): # 清空 group 里记录的 samples for target, group in self.groups.items(): group.clear() # 从每一个 Sample 的target value 来逐一判断该点是属于哪一群 def classify_to_group(self): self.clear_groups() # 再全部重新分类 for sample in self.samples: to_group = self.groups.get(sample.target_value) if to_group: to_group.add_sample(sample) def classify(self, iteration_callback, completion_callback): self.iteration_callback = iteration_callback self.completion_callback = completion_callback self.iteration_times = 0 self.clear_groups() self._training() def predicate(self, features=[]): # Dirctly output the target value by formula : yi = (W^T * xi + b) or (W^T * xi - b) # 计算目标估值 target_value = -self.bias for sample_x in self.samples: if sample_x.alpha_value != 0: # SUM ai * yi * K(Xi * x) target_value += sample_x.alpha_value * sample_x.target_value * self.kernel.calculate(sample_x.features, features) return self.sgn(target_value) # 用于在预测输出时,将计算完的样本点目标值正规化成分类目标的 +1 / -1 def sgn(self, value=0.0): return 1.0 if value >=0.0 else -1.0 ''' @ Private ''' # 建立要分类的群 def _create_groups(self, targets=[]): for target_value in targets: self.groups[target_value] = Group(target_value) def _training(self): self.iteration_times += 1 waiting_samples = [] if self.examine_all == True: waiting_samples = self._samples_without_kkt(self.split_index) else: waiting_samples = np.copy(self.samples).tolist() self._start_to_update(waiting_samples) def _completion(self): if self.completion_callback: self.classify_to_group() # 分类到所属群里 self.completion_callback(self.iteration_times, self.weights, self.bias, self.groups.values()) def _iteration(self): if self.iteration_callback: self.iteration_callback(self.iteration_times, self.weights, self.bias) def _random_pick_index(self, avoid_index=0): max = len(self.samples) random_index = 0 # 整体样本数有2个,就直接选择另一个点来做 if max == 2: random_index = (max - 1) - avoid_index else: # 整体样本有多个,就跑 Random Picking random_index = np.random.random_integers(0, max-1) if random_index == avoid_index: random_index = self._random_pick_index(avoid_index) return random_index def _update_parameters(self, update_alphas=[]): alphas_count = len(update_alphas) # 如果 update_alphas 为空,代表完成本次迭代训练, 但所有Samples 都还未全部符合 KKT 条件 if alphas_count == 0: return TrainingTypes.OneIterationFinished self._calculate_error_value() self.iteration_update_count += 1 # If we still have over 2 samples can do match-update task if alphas_count > 1: match_sample = update_alphas.pop(0) # Romoved the sample from array self.split_index = self.samples.index(match_sample) +1 max_index = -1 max_error_value = -1.0 for index, other_sample in enumerate(self.samples): # 找到误差距离绝对值最大的样本点 error_distance = abs(other_sample.error_value - match_sample.error_value) if error_distance > max_error_value and index >= self.split_index: max_error_value = error_distance max_index = index # If we successfully chose a sample if max_index >= 0: self.update_alpha(max_index, self.samples.index(match_sample)) # 单纯检查是否所有数据都符合 KKT 条件了 ? 还有不符合的就再递归跑本 function if self._all_conform_kkt() == False: if self.examine_all == True: update_alphas = self._samples_without_kkt(self.split_index) # 将其它不符合 KKT 条件的点都再重新进行更新 weights & bias 运算, 直至所有点都运算完毕, 才 return 完成 1 迭代 return self._update_parameters(update_alphas) else: # 更新完所有不符合 KKT 条件的点, 同时代表完成完整的 1 迭代运算就 return 完成 return TrainingTypes.AllConformedKKT else: # 挑 1 出来搭配,之后重新跑一次上次的运算 # 这里有 2 个挑选的方式 match_sample = update_alphas.pop(0) if self.examine_all == True: self.split_index = self.samples.index(match_sample) + 1 update_alphas = self._samples_without_kkt(self.split_index) match_index = self.samples.index(match_sample) self.update_alpha(self._random_pick_index(match_index), match_index) return self._update_parameters(update_alphas) # Default is failed. return TrainingTypes.Failed # Updating alpha and bias. def update_alpha(self, main_index, match_index):
# Quickly updating the weights and bias by used 2 new alpha values # 1). calculates the delta weights, Formula: # delta main = (new alpha 1 - old alpha 1) * target1 * x1 # delta match = (new alpha 2 - old alpha 2) * target2 * x2 # delta weights = delta main + delta match main_factor = (new_main_alpha - main.alpha_value) * main.target_value delta_main = np.multiply(main.features, main_factor) match_factor = (new_match_alpha - match.alpha_value) * match.target_value delta_match = np.multiply(match.features, match_factor) delta_weights = np.add(delta_main, delta_match) # 2). let original weights + delta weights to be new weights array, Formula: new_weights = np.add(self.weights, delta_weights) # 这里 new_weights 会是 numpy.ndarray del self.weights[:] self.weights = new_weights.tolist() # 3). quickly updating bias via 2 samples (Main & Match), Formula: # W: weights, X: sample features, b: bias, T: sample target value (+1 / -1) # WX - b = T # -> -b = T - WX # b = WX -T # 故 new bias = new weights * X - (+1 or -1) # +1 或 -1 是看当前的 X 是被分到 +1 或者 -1 的标签(Target) # 这里会有 2 个 new bias, 再去按照条件做挑选 1 个出来用。 # 以下有个更新 bias 的方法( New, Old): # Linear method # new_main_bias = np.dot(self.weights, main.features) - main.target_value # new_match_bias = np.dot(self.weights, match.features) - match.target_value # Old method new_main_bias = self.bias + main.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, main.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, main.features)) new_match_bias = self.bias + match.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, match.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, match.features)) # 4). to choose the final bias or to get the average value of biases self.samples[main_index].alpha_value = new_main_alpha self.samples[match_index].alpha_value = new_match_alpha new_bias = 0.0 if self._is_accept_alpha(new_main_alpha): new_bias = new_main_bias elif self._is_accept_alpha(new_match_alpha): new_bias = new_match_bias else: new_bias = (new_main_bias + new_match_bias) * 0.5 # Update old bias self.bias = new_bias # 更新 Weights / Bias def _start_to_update(self, waiting_samples=[]): # if len(waiting_samples) == 0: # self._completion() # return # 更新参数(权重与偏权)后,再判断是否需要停止迭代或要继续下一迭代的训练 training_result = self._update_parameters(waiting_samples) self.split_index = 0 self.examine_all = True # 完成 1 个迭代的运算 if training_result == TrainingTypes.OneIterationFinished: # 先判断迭代是否达到上限 if self.iteration_times >= self.max_iteration: self._completion() elif self.iteration_update_count == 0: self._completion() else: # 继续迭代运算 self.iteration_update_count = 0 self._iteration() self._training() # 所有样本点都符合 KKT 条件 elif training_result == TrainingTypes.AllConformedKKT: self._completion() else: # TrainingTypes.Failed self._completion() # 找出不符合 KKT 条件的样本点 (等待更新的样本点) def _samples_without_kkt(self, split_index=0): waiting_samples = [] for sample in self.samples[split_index:]: is_conform_kkt = sample.is_confom_kkt(self.samples, self.bias, self.const_value) # 不符合 KKT 条件 if is_conform_kkt == False and sample.alpha_value > 0 and sample.alpha_value < self.const_value: # 把要更新的样本记起来(不符合 KKT 的都为待更新样本) waiting_samples.append(sample) return waiting_samples # 是否所有样本都已符合 KKT, return BOOL def _all_conform_kkt(self): all_conform_kkt = True for sample in self.samples: all_conform_kkt = sample.is_confom_kkt(self.samples, self.bias, self.const_value) # 有任一样本点不符合 KKT 条件 if all_conform_kkt == False: sum_x = np.dot(self.weights, sample.features) kkt_value = sample.target_value * (sum_x - self.bias) break return all_conform_kkt # 计算每个Sample 的 Error Value def _calculate_error_value(self): errors = [] for current_sample in self.samples: # Sample error error_value = 0.0 # 跟其它的样本点做比较(计算误差值) for other_sample in self.samples: # kernel_value = 求当前的主要 current_sample 特征值与所有的 other_sample 特征值做内和积 # (包含目前 current_sample 自己对自己的内积),而后将内积值再传入至 kernel 做运算后的值。 kernel_value = self.kernel.calculate(current_sample.features, other_sample.features) other_target_value = other_sample.target_value other_alpha_value = other_sample.alpha_value error_value += (other_target_value * other_alpha_value * kernel_value) error_value += -self.bias - current_sample.target_value errors.append(error_value) # 将Error Value 存回去当前的样本点的误差值里 current_sample.error_value = error_value return errors # 计算 New Matched Pattern Alpha Value & 判断其是否符合上下限范围 def _calculate_new_match_alpha(self, main_sample, match_sample): old_main_alpha = main_sample.alpha_value main_target = main_sample.target_value # Update the alpha value of match-pattern in first # Old match alpha value + ( match target value * ( main error - match error)) / ((x1 * x1 ) + ( x2 * x2) + (2 * x1 * x2) old_match_alpha = match_sample.alpha_value match_target = match_sample.target_value # 分子:match target * ( main error - match error) and it won't need to do fabs(error) numerator = match_target * (main_sample.error_value - match_sample.error_value) # 分母 denominator = self.kernel.calculate(main_sample.features, main_sample.features) + self.kernel.calculate(match_sample.features, match_sample.features) - (2.0 * self.kernel.calculate(main_sample.features, match_sample.features)) # New match alpha new_match_alpha = old_match_alpha + (numerator / denominator) # Checking the max-min limitation(检查上下限范围) min_scope = 0.0 max_scope = 0.0 # 相关讯号: If main target * match target = -1 (minor singal), using this formula: if main_target * match_target < 0.0: # Min scope is MAX( 0.0f, (old_match_alpha - old_main_alpha)) min_scope = np.maximum(0.0, (old_match_alpha - old_main_alpha)) # Max scope is MIN( const value, const value + old match alpha - old main alpha) max_scope = np.minimum(self.const_value, (self.const_value + old_match_alpha - old_main_alpha)) else: # 同讯号 # If main target * match target = 1 (plus singal), using this formula: # Min scope is MIN(0.0f, ( old main alpha + old match alpha - const value)) min_scope = np.maximum(0.0, (old_main_alpha + old_match_alpha - self.const_value)) max_scope = np.minimum(self.const_value, (old_match_alpha + old_main_alpha)) # Compares max and min value of new match alpha value. # 如果 match 的 alpha 值在原公式制定的标准范围内,就什么都不处理,仅处理以下 2 个条件 # 如果 match 的 alpha 值小于下限值,就变成下限值 if new_match_alpha < min_scope: new_match_alpha = min_scope # 如果大于上限值,就变成上限值 elif new_match_alpha > max_scope: new_match_alpha = max_scope return new_match_alpha # 更新 New Main Alpha Value def _calculate_new_main_alpha(self, main_sample, match_sample, new_match_alpha): # Formula: new main alpha = old main alpha + ( main target * match target * (old match alpha - new match alpha)) return main_sample.alpha_value + (main_sample.target_value * match_sample.target_value * (match_sample.alpha_value - new_match_alpha)) # 判断 New Alpha Value 是否在接受范围里 def _is_accept_alpha(self, alpha_value=0.0): return True if(alpha_value > 0.0 and alpha_value < self.const_value) else False
main = self.samples[main_index] match = self.samples[match_index] new_match_alpha = self._calculate_new_match_alpha(main, match) new_main_alpha =self._calculate_new_main_alpha(main, match, new_match_alpha)
random_line_split
model.py
# -*- coding: utf-8 -*- import numpy as np from sample import Sample from kernel import Method, Kernel from enum import Enum from group import Group import copy class TrainingTypes(Enum): OneIterationFinished = 0, # 一个迭代的结束 AllConformedKKT = 1, # 全部点皆符合KKT条件 Failed = 2 class Model: iteration_times = 0 def __init__(self): self.label = float("inf") # 预设是 Float 最大值, 代表这是一个标准只处理 2 分类的 SVM Model # Label 的原意思是用在多分类上,看这一个 Model 主要是用来分类哪一种【正样本】 的。 self.samples = [] # Sample Object, from sample.py self.weights = [] self.bias = 0.0 # bias 只有 1 个 self.groups = {} # 分到正样本(+1)或负样本(-1)群里:[target value] = group self.const_value = 0.0 self.tolerance_error = 0.0 self.max_iteration = 100 self.kernel = Kernel(Method.Linear) # 预设使用线性分割(Linear) self.iteration_callback = None self.completion_callback = None self.examine_all = False # 是否遍历全部的点 self._create_groups([1, -1]) # 建立 +1, -1 这 2 个分类群,之后多分类会用到 self.split_index = 0 self.iteration_update_count = 0 # A sample <Sample Object> has a lot of features. def add_sample(self, sample): self.samples.append(copy.copy(sample)) def append_sample(self, features=[], target_value=0.0): sample = Sample(features, target_value) sample.kernel.method = self.kernel.method self.add_sample(sample) def zero_weights(self, count=0): if count <= 0: count = len(self.samples[0].features) del
del self.samples[:] def clear_groups(self): # 清空 group 里记录的 samples for target, group in self.groups.items(): group.clear() # 从每一个 Sample 的target value 来逐一判断该点是属于哪一群 def classify_to_group(self): self.clear_groups() # 再全部重新分类 for sample in self.samples: to_group = self.groups.get(sample.target_value) if to_group: to_group.add_sample(sample) def classify(self, iteration_callback, completion_callback): self.iteration_callback = iteration_callback self.completion_callback = completion_callback self.iteration_times = 0 self.clear_groups() self._training() def predicate(self, features=[]): # Dirctly output the target value by formula : yi = (W^T * xi + b) or (W^T * xi - b) # 计算目标估值 target_value = -self.bias for sample_x in self.samples: if sample_x.alpha_value != 0: # SUM ai * yi * K(Xi * x) target_value += sample_x.alpha_value * sample_x.target_value * self.kernel.calculate(sample_x.features, features) return self.sgn(target_value) # 用于在预测输出时,将计算完的样本点目标值正规化成分类目标的 +1 / -1 def sgn(self, value=0.0): return 1.0 if value >=0.0 else -1.0 ''' @ Private ''' # 建立要分类的群 def _create_groups(self, targets=[]): for target_value in targets: self.groups[target_value] = Group(target_value) def _training(self): self.iteration_times += 1 waiting_samples = [] if self.examine_all == True: waiting_samples = self._samples_without_kkt(self.split_index) else: waiting_samples = np.copy(self.samples).tolist() self._start_to_update(waiting_samples) def _completion(self): if self.completion_callback: self.classify_to_group() # 分类到所属群里 self.completion_callback(self.iteration_times, self.weights, self.bias, self.groups.values()) def _iteration(self): if self.iteration_callback: self.iteration_callback(self.iteration_times, self.weights, self.bias) def _random_pick_index(self, avoid_index=0): max = len(self.samples) random_index = 0 # 整体样本数有2个,就直接选择另一个点来做 if max == 2: random_index = (max - 1) - avoid_index else: # 整体样本有多个,就跑 Random Picking random_index = np.random.random_integers(0, max-1) if random_index == avoid_index: random_index = self._random_pick_index(avoid_index) return random_index def _update_parameters(self, update_alphas=[]): alphas_count = len(update_alphas) # 如果 update_alphas 为空,代表完成本次迭代训练, 但所有Samples 都还未全部符合 KKT 条件 if alphas_count == 0: return TrainingTypes.OneIterationFinished self._calculate_error_value() self.iteration_update_count += 1 # If we still have over 2 samples can do match-update task if alphas_count > 1: match_sample = update_alphas.pop(0) # Romoved the sample from array self.split_index = self.samples.index(match_sample) +1 max_index = -1 max_error_value = -1.0 for index, other_sample in enumerate(self.samples): # 找到误差距离绝对值最大的样本点 error_distance = abs(other_sample.error_value - match_sample.error_value) if error_distance > max_error_value and index >= self.split_index: max_error_value = error_distance max_index = index # If we successfully chose a sample if max_index >= 0: self.update_alpha(max_index, self.samples.index(match_sample)) # 单纯检查是否所有数据都符合 KKT 条件了 ? 还有不符合的就再递归跑本 function if self._all_conform_kkt() == False: if self.examine_all == True: update_alphas = self._samples_without_kkt(self.split_index) # 将其它不符合 KKT 条件的点都再重新进行更新 weights & bias 运算, 直至所有点都运算完毕, 才 return 完成 1 迭代 return self._update_parameters(update_alphas) else: # 更新完所有不符合 KKT 条件的点, 同时代表完成完整的 1 迭代运算就 return 完成 return TrainingTypes.AllConformedKKT else: # 挑 1 出来搭配,之后重新跑一次上次的运算 # 这里有 2 个挑选的方式 match_sample = update_alphas.pop(0) if self.examine_all == True: self.split_index = self.samples.index(match_sample) + 1 update_alphas = self._samples_without_kkt(self.split_index) match_index = self.samples.index(match_sample) self.update_alpha(self._random_pick_index(match_index), match_index) return self._update_parameters(update_alphas) # Default is failed. return TrainingTypes.Failed # Updating alpha and bias. def update_alpha(self, main_index, match_index): main = self.samples[main_index] match = self.samples[match_index] new_match_alpha = self._calculate_new_match_alpha(main, match) new_main_alpha =self._calculate_new_main_alpha(main, match, new_match_alpha) # Quickly updating the weights and bias by used 2 new alpha values # 1). calculates the delta weights, Formula: # delta main = (new alpha 1 - old alpha 1) * target1 * x1 # delta match = (new alpha 2 - old alpha 2) * target2 * x2 # delta weights = delta main + delta match main_factor = (new_main_alpha - main.alpha_value) * main.target_value delta_main = np.multiply(main.features, main_factor) match_factor = (new_match_alpha - match.alpha_value) * match.target_value delta_match = np.multiply(match.features, match_factor) delta_weights = np.add(delta_main, delta_match) # 2). let original weights + delta weights to be new weights array, Formula: new_weights = np.add(self.weights, delta_weights) # 这里 new_weights 会是 numpy.ndarray del self.weights[:] self.weights = new_weights.tolist() # 3). quickly updating bias via 2 samples (Main & Match), Formula: # W: weights, X: sample features, b: bias, T: sample target value (+1 / -1) # WX - b = T # -> -b = T - WX # b = WX -T # 故 new bias = new weights * X - (+1 or -1) # +1 或 -1 是看当前的 X 是被分到 +1 或者 -1 的标签(Target) # 这里会有 2 个 new bias, 再去按照条件做挑选 1 个出来用。 # 以下有个更新 bias 的方法( New, Old): # Linear method # new_main_bias = np.dot(self.weights, main.features) - main.target_value # new_match_bias = np.dot(self.weights, match.features) - match.target_value # Old method new_main_bias = self.bias + main.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, main.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, main.features)) new_match_bias = self.bias + match.error_value + ((new_main_alpha - main.alpha_value) * main.target_value * self.kernel.calculate(main.features, match.features)) + ((new_match_alpha - match.alpha_value) * match.target_value * self.kernel.calculate(match.features, match.features)) # 4). to choose the final bias or to get the average value of biases self.samples[main_index].alpha_value = new_main_alpha self.samples[match_index].alpha_value = new_match_alpha new_bias = 0.0 if self._is_accept_alpha(new_main_alpha): new_bias = new_main_bias elif self._is_accept_alpha(new_match_alpha): new_bias = new_match_bias else: new_bias = (new_main_bias + new_match_bias) * 0.5 # Update old bias self.bias = new_bias # 更新 Weights / Bias def _start_to_update(self, waiting_samples=[]): # if len(waiting_samples) == 0: # self._completion() # return # 更新参数(权重与偏权)后,再判断是否需要停止迭代或要继续下一迭代的训练 training_result = self._update_parameters(waiting_samples) self.split_index = 0 self.examine_all = True # 完成 1 个迭代的运算 if training_result == TrainingTypes.OneIterationFinished: # 先判断迭代是否达到上限 if self.iteration_times >= self.max_iteration: self._completion() elif self.iteration_update_count == 0: self._completion() else: # 继续迭代运算 self.iteration_update_count = 0 self._iteration() self._training() # 所有样本点都符合 KKT 条件 elif training_result == TrainingTypes.AllConformedKKT: self._completion() else: # TrainingTypes.Failed self._completion() # 找出不符合 KKT 条件的样本点 (等待更新的样本点) def _samples_without_kkt(self, split_index=0): waiting_samples = [] for sample in self.samples[split_index:]: is_conform_kkt = sample.is_confom_kkt(self.samples, self.bias, self.const_value) # 不符合 KKT 条件 if is_conform_kkt == False and sample.alpha_value > 0 and sample.alpha_value < self.const_value: # 把要更新的样本记起来(不符合 KKT 的都为待更新样本) waiting_samples.append(sample) return waiting_samples # 是否所有样本都已符合 KKT, return BOOL def _all_conform_kkt(self): all_conform_kkt = True for sample in self.samples: all_conform_kkt = sample.is_confom_kkt(self.samples, self.bias, self.const_value) # 有任一样本点不符合 KKT 条件 if all_conform_kkt == False: sum_x = np.dot(self.weights, sample.features) kkt_value = sample.target_value * (sum_x - self.bias) break return all_conform_kkt # 计算每个Sample 的 Error Value def _calculate_error_value(self): errors = [] for current_sample in self.samples: # Sample error error_value = 0.0 # 跟其它的样本点做比较(计算误差值) for other_sample in self.samples: # kernel_value = 求当前的主要 current_sample 特征值与所有的 other_sample 特征值做内和积 # (包含目前 current_sample 自己对自己的内积),而后将内积值再传入至 kernel 做运算后的值。 kernel_value = self.kernel.calculate(current_sample.features, other_sample.features) other_target_value = other_sample.target_value other_alpha_value = other_sample.alpha_value error_value += (other_target_value * other_alpha_value * kernel_value) error_value += -self.bias - current_sample.target_value errors.append(error_value) # 将Error Value 存回去当前的样本点的误差值里 current_sample.error_value = error_value return errors # 计算 New Matched Pattern Alpha Value & 判断其是否符合上下限范围 def _calculate_new_match_alpha(self, main_sample, match_sample): old_main_alpha = main_sample.alpha_value main_target = main_sample.target_value # Update the alpha value of match-pattern in first # Old match alpha value + ( match target value * ( main error - match error)) / ((x1 * x1 ) + ( x2 * x2) + (2 * x1 * x2) old_match_alpha = match_sample.alpha_value match_target = match_sample.target_value # 分子:match target * ( main error - match error) and it won't need to do fabs(error) numerator = match_target * (main_sample.error_value - match_sample.error_value) # 分母 denominator = self.kernel.calculate(main_sample.features, main_sample.features) + self.kernel.calculate(match_sample.features, match_sample.features) - (2.0 * self.kernel.calculate(main_sample.features, match_sample.features)) # New match alpha new_match_alpha = old_match_alpha + (numerator / denominator) # Checking the max-min limitation(检查上下限范围) min_scope = 0.0 max_scope = 0.0 # 相关讯号: If main target * match target = -1 (minor singal), using this formula: if main_target * match_target < 0.0: # Min scope is MAX( 0.0f, (old_match_alpha - old_main_alpha)) min_scope = np.maximum(0.0, (old_match_alpha - old_main_alpha)) # Max scope is MIN( const value, const value + old match alpha - old main alpha) max_scope = np.minimum(self.const_value, (self.const_value + old_match_alpha - old_main_alpha)) else: # 同讯号 # If main target * match target = 1 (plus singal), using this formula: # Min scope is MIN(0.0f, ( old main alpha + old match alpha - const value)) min_scope = np.maximum(0.0, (old_main_alpha + old_match_alpha - self.const_value)) max_scope = np.minimum(self.const_value, (old_match_alpha + old_main_alpha)) # Compares max and min value of new match alpha value. # 如果 match 的 alpha 值在原公式制定的标准范围内,就什么都不处理,仅处理以下 2 个条件 # 如果 match 的 alpha 值小于下限值,就变成下限值 if new_match_alpha < min_scope: new_match_alpha = min_scope # 如果大于上限值,就变成上限值 elif new_match_alpha > max_scope: new_match_alpha = max_scope return new_match_alpha # 更新 New Main Alpha Value def _calculate_new_main_alpha(self, main_sample, match_sample, new_match_alpha): # Formula: new main alpha = old main alpha + ( main target * match target * (old match alpha - new match alpha)) return main_sample.alpha_value + (main_sample.target_value * match_sample.target_value * (match_sample.alpha_value - new_match_alpha)) # 判断 New Alpha Value 是否在接受范围里 def _is_accept_alpha(self, alpha_value=0.0): return True if(alpha_value > 0.0 and alpha_value < self.const_value) else False
self.weights[:] for i in xrange(0, count): self.weights.append(0.0) def clear_samples(self):
identifier_body
coeditor.rs
use druid::{Selector, WidgetPod, WidgetId, ExtEventSink, EventCtx, Event, Env, Widget, UpdateCtx, LayoutCtx, PaintCtx, BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size}; use std::sync::Arc; use tokio::sync::broadcast::{Sender}; use tokio::task::JoinHandle; use parking_lot::RwLock; use crate::{RustpadClient, Edit}; use std::time::Duration; use crate::editor_binding::EditorBinding; use crate::code_editor::code_editor::CodeEditor; use crate::code_editor::text::{Selection, EditableText}; use tokio::sync::broadcast; use std::collections::HashMap; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::connect_async; use futures::StreamExt; use log::{info, warn}; pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client"); pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit"); pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data"); fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> { tokio::spawn(async move { info!("connecting"); let conn = client.read().server_url.clone(); loop { let x = Arc::clone(&client); if try_connect(&conn, x, close_tx.clone()).await.is_none() { break; } tokio::time::sleep(Duration::from_millis(1000)).await; warn!("Reconnecting ..."); } }) } async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> { let url = url::Url::parse(connect_addr).unwrap(); let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>(); client.write().ws_sender = Some(ws_tx.clone()); client.write().users.clear(); // let res = connect_async(url).await; if res.is_err() { eprintln!("{:?}", res.err().unwrap()); return Some(()); } let (ws_stream, _) = res.unwrap(); println!("WebSocket handshake has been successfully completed"); client.read().on_connected.invoke(()); let (write, read) = ws_stream.split(); let websocket_tx = ws_rx.map(Ok).forward(write); let client2 = Arc::clone(&client); let receive_handler = read.for_each(|message| async { if message.is_err()
let data = message.unwrap().to_string(); println!("Received: {}", &data); client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed")); }); client.write().send_info(); client.write().send_cursor_data(); if let Some(outstanding) = &client.read().outstanding { client.write().send_operation(outstanding); } let mut close_rx = close_tx.subscribe(); tokio::select! { _ = close_rx.recv() => { ws_tx.unbounded_send(Message::Close(None)).unwrap(); println!("client closed."); return None; } _ = websocket_tx => {} _ = receive_handler => { println!("server closed"); } } println!("{} disconnected", &connect_addr); client.write().ws_sender = None; Some(()) } pub struct CoEditorWidget { inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>, id: WidgetId, pub server_url: String, client: Option<Arc<RwLock<RustpadClient>>>, connection_handle: Option<JoinHandle<()>>, event_sink: Option<ExtEventSink>, close_tx: Sender<()>, last_selection: Selection, } impl Drop for CoEditorWidget { fn drop(&mut self) { self.close_tx.send(()).unwrap(); futures::executor::block_on( tokio::time::timeout(Duration::from_secs(5), self.connection_handle.take().unwrap(), ) ); println!("CoEditorWidget destructed"); } } impl CoEditorWidget { pub fn new(server_url: String) -> Self { println!("CoEditorWidget created"); CoEditorWidget { inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()), server_url, id: WidgetId::next(), client: None, connection_handle: None, event_sink: None, close_tx: broadcast::channel(1).0, last_selection: Selection::default(), } } } impl Widget<EditorBinding> for CoEditorWidget { fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) { if let Event::Command(cmd) = event { println!("received {:?}", cmd); } match event { Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { let client = command.get(COEDITOR_INIT_CLIENT).unwrap(); data.set_client(client); println!("editor binding client initialized"); } Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { println!("received edit command"); let edit = command.get(USER_EDIT_SELECTOR).unwrap(); let selection = self.inner.widget().text().borrow().selection(); let transform_selection = |selection: Selection| -> Selection { let transform_index = |x: usize| -> usize { if x < edit.begin { x } else if x > edit.end { x + edit.begin + edit.content.len() - edit.end } else { edit.begin + edit.content.len() } }; Selection::new( transform_index(selection.anchor), transform_index(selection.active), ) }; data.edit_without_callback(edit); let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection)); self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut() .for_each(|(_, b)| *b = transform_selection(b.clone())); } Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { println!("received cursor command"); let content = &data.content; let unicode_offset_to_utf8_offset = |offset: u32| -> usize { content.iter().take(offset as usize).collect::<String>().len() }; let mut new_decorations = HashMap::new(); let my_id = self.client.as_ref().unwrap().read().id(); self.client.as_ref().unwrap().read().user_cursors.iter() .filter(|(&id, _)| id != my_id) .filter(|(_, data)| !data.selections.is_empty()) .for_each(|(&id, sel)| { new_decorations.insert(id, Selection::new( unicode_offset_to_utf8_offset(sel.selections[0].0), unicode_offset_to_utf8_offset(sel.selections[0].1), )); }); self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations; } _ => self.inner.event(ctx, event, data, env) } } fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) { self.inner.lifecycle(ctx, event, data, env); match event { LifeCycle::WidgetAdded => { self.id = ctx.widget_id(); println!("CoEditorWidget initialized with id: {:?}", self.id); self.event_sink = Some(ctx.get_external_handle()); let client = RustpadClient::create(self.server_url.clone()); client.write().widget_id = Some(self.id); client.write().set_event_sink( self.event_sink.as_ref().unwrap().clone(), self.id, ); self.client = Some(Arc::clone(&client)); ctx.get_external_handle().submit_command( COEDITOR_INIT_CLIENT, Box::new(Arc::clone(&client)), Target::Widget(self.id), ).expect("send command failed"); self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone())); } _ => {} } } fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) { if old_data.after_edits.len() != data.after_edits.len() { println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len()); } let new_selection = self.inner.widget().text().borrow().selection(); if self.last_selection != new_selection { self.last_selection = new_selection; let borrow = self.inner.widget_mut().text_mut().borrow_mut(); let content = &borrow.layout.text().unwrap().content_as_string; let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32; let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32; self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor)); } self.inner.update(ctx, data, env); } fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, data: &EditorBinding, env: &Env) -> Size { self.inner.layout(ctx, bc, data, env) } fn paint(&mut self, ctx: &mut PaintCtx, data: &EditorBinding, env: &Env) { self.inner.paint(ctx, data, env) } }
{ return; }
conditional_block
coeditor.rs
use druid::{Selector, WidgetPod, WidgetId, ExtEventSink, EventCtx, Event, Env, Widget, UpdateCtx, LayoutCtx, PaintCtx, BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size}; use std::sync::Arc; use tokio::sync::broadcast::{Sender}; use tokio::task::JoinHandle; use parking_lot::RwLock; use crate::{RustpadClient, Edit}; use std::time::Duration; use crate::editor_binding::EditorBinding; use crate::code_editor::code_editor::CodeEditor; use crate::code_editor::text::{Selection, EditableText}; use tokio::sync::broadcast; use std::collections::HashMap;
use futures::StreamExt; use log::{info, warn}; pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client"); pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit"); pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data"); fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> { tokio::spawn(async move { info!("connecting"); let conn = client.read().server_url.clone(); loop { let x = Arc::clone(&client); if try_connect(&conn, x, close_tx.clone()).await.is_none() { break; } tokio::time::sleep(Duration::from_millis(1000)).await; warn!("Reconnecting ..."); } }) } async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> { let url = url::Url::parse(connect_addr).unwrap(); let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>(); client.write().ws_sender = Some(ws_tx.clone()); client.write().users.clear(); // let res = connect_async(url).await; if res.is_err() { eprintln!("{:?}", res.err().unwrap()); return Some(()); } let (ws_stream, _) = res.unwrap(); println!("WebSocket handshake has been successfully completed"); client.read().on_connected.invoke(()); let (write, read) = ws_stream.split(); let websocket_tx = ws_rx.map(Ok).forward(write); let client2 = Arc::clone(&client); let receive_handler = read.for_each(|message| async { if message.is_err() { return; } let data = message.unwrap().to_string(); println!("Received: {}", &data); client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed")); }); client.write().send_info(); client.write().send_cursor_data(); if let Some(outstanding) = &client.read().outstanding { client.write().send_operation(outstanding); } let mut close_rx = close_tx.subscribe(); tokio::select! { _ = close_rx.recv() => { ws_tx.unbounded_send(Message::Close(None)).unwrap(); println!("client closed."); return None; } _ = websocket_tx => {} _ = receive_handler => { println!("server closed"); } } println!("{} disconnected", &connect_addr); client.write().ws_sender = None; Some(()) } pub struct CoEditorWidget { inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>, id: WidgetId, pub server_url: String, client: Option<Arc<RwLock<RustpadClient>>>, connection_handle: Option<JoinHandle<()>>, event_sink: Option<ExtEventSink>, close_tx: Sender<()>, last_selection: Selection, } impl Drop for CoEditorWidget { fn drop(&mut self) { self.close_tx.send(()).unwrap(); futures::executor::block_on( tokio::time::timeout(Duration::from_secs(5), self.connection_handle.take().unwrap(), ) ); println!("CoEditorWidget destructed"); } } impl CoEditorWidget { pub fn new(server_url: String) -> Self { println!("CoEditorWidget created"); CoEditorWidget { inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()), server_url, id: WidgetId::next(), client: None, connection_handle: None, event_sink: None, close_tx: broadcast::channel(1).0, last_selection: Selection::default(), } } } impl Widget<EditorBinding> for CoEditorWidget { fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) { if let Event::Command(cmd) = event { println!("received {:?}", cmd); } match event { Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { let client = command.get(COEDITOR_INIT_CLIENT).unwrap(); data.set_client(client); println!("editor binding client initialized"); } Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { println!("received edit command"); let edit = command.get(USER_EDIT_SELECTOR).unwrap(); let selection = self.inner.widget().text().borrow().selection(); let transform_selection = |selection: Selection| -> Selection { let transform_index = |x: usize| -> usize { if x < edit.begin { x } else if x > edit.end { x + edit.begin + edit.content.len() - edit.end } else { edit.begin + edit.content.len() } }; Selection::new( transform_index(selection.anchor), transform_index(selection.active), ) }; data.edit_without_callback(edit); let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection)); self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut() .for_each(|(_, b)| *b = transform_selection(b.clone())); } Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { println!("received cursor command"); let content = &data.content; let unicode_offset_to_utf8_offset = |offset: u32| -> usize { content.iter().take(offset as usize).collect::<String>().len() }; let mut new_decorations = HashMap::new(); let my_id = self.client.as_ref().unwrap().read().id(); self.client.as_ref().unwrap().read().user_cursors.iter() .filter(|(&id, _)| id != my_id) .filter(|(_, data)| !data.selections.is_empty()) .for_each(|(&id, sel)| { new_decorations.insert(id, Selection::new( unicode_offset_to_utf8_offset(sel.selections[0].0), unicode_offset_to_utf8_offset(sel.selections[0].1), )); }); self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations; } _ => self.inner.event(ctx, event, data, env) } } fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) { self.inner.lifecycle(ctx, event, data, env); match event { LifeCycle::WidgetAdded => { self.id = ctx.widget_id(); println!("CoEditorWidget initialized with id: {:?}", self.id); self.event_sink = Some(ctx.get_external_handle()); let client = RustpadClient::create(self.server_url.clone()); client.write().widget_id = Some(self.id); client.write().set_event_sink( self.event_sink.as_ref().unwrap().clone(), self.id, ); self.client = Some(Arc::clone(&client)); ctx.get_external_handle().submit_command( COEDITOR_INIT_CLIENT, Box::new(Arc::clone(&client)), Target::Widget(self.id), ).expect("send command failed"); self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone())); } _ => {} } } fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) { if old_data.after_edits.len() != data.after_edits.len() { println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len()); } let new_selection = self.inner.widget().text().borrow().selection(); if self.last_selection != new_selection { self.last_selection = new_selection; let borrow = self.inner.widget_mut().text_mut().borrow_mut(); let content = &borrow.layout.text().unwrap().content_as_string; let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32; let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32; self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor)); } self.inner.update(ctx, data, env); } fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, data: &EditorBinding, env: &Env) -> Size { self.inner.layout(ctx, bc, data, env) } fn paint(&mut self, ctx: &mut PaintCtx, data: &EditorBinding, env: &Env) { self.inner.paint(ctx, data, env) } }
use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::connect_async;
random_line_split
coeditor.rs
use druid::{Selector, WidgetPod, WidgetId, ExtEventSink, EventCtx, Event, Env, Widget, UpdateCtx, LayoutCtx, PaintCtx, BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size}; use std::sync::Arc; use tokio::sync::broadcast::{Sender}; use tokio::task::JoinHandle; use parking_lot::RwLock; use crate::{RustpadClient, Edit}; use std::time::Duration; use crate::editor_binding::EditorBinding; use crate::code_editor::code_editor::CodeEditor; use crate::code_editor::text::{Selection, EditableText}; use tokio::sync::broadcast; use std::collections::HashMap; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::connect_async; use futures::StreamExt; use log::{info, warn}; pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client"); pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit"); pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data"); fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> { tokio::spawn(async move { info!("connecting"); let conn = client.read().server_url.clone(); loop { let x = Arc::clone(&client); if try_connect(&conn, x, close_tx.clone()).await.is_none() { break; } tokio::time::sleep(Duration::from_millis(1000)).await; warn!("Reconnecting ..."); } }) } async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> { let url = url::Url::parse(connect_addr).unwrap(); let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>(); client.write().ws_sender = Some(ws_tx.clone()); client.write().users.clear(); // let res = connect_async(url).await; if res.is_err() { eprintln!("{:?}", res.err().unwrap()); return Some(()); } let (ws_stream, _) = res.unwrap(); println!("WebSocket handshake has been successfully completed"); client.read().on_connected.invoke(()); let (write, read) = ws_stream.split(); let websocket_tx = ws_rx.map(Ok).forward(write); let client2 = Arc::clone(&client); let receive_handler = read.for_each(|message| async { if message.is_err() { return; } let data = message.unwrap().to_string(); println!("Received: {}", &data); client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed")); }); client.write().send_info(); client.write().send_cursor_data(); if let Some(outstanding) = &client.read().outstanding { client.write().send_operation(outstanding); } let mut close_rx = close_tx.subscribe(); tokio::select! { _ = close_rx.recv() => { ws_tx.unbounded_send(Message::Close(None)).unwrap(); println!("client closed."); return None; } _ = websocket_tx => {} _ = receive_handler => { println!("server closed"); } } println!("{} disconnected", &connect_addr); client.write().ws_sender = None; Some(()) } pub struct CoEditorWidget { inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>, id: WidgetId, pub server_url: String, client: Option<Arc<RwLock<RustpadClient>>>, connection_handle: Option<JoinHandle<()>>, event_sink: Option<ExtEventSink>, close_tx: Sender<()>, last_selection: Selection, } impl Drop for CoEditorWidget { fn drop(&mut self)
} impl CoEditorWidget { pub fn new(server_url: String) -> Self { println!("CoEditorWidget created"); CoEditorWidget { inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()), server_url, id: WidgetId::next(), client: None, connection_handle: None, event_sink: None, close_tx: broadcast::channel(1).0, last_selection: Selection::default(), } } } impl Widget<EditorBinding> for CoEditorWidget { fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) { if let Event::Command(cmd) = event { println!("received {:?}", cmd); } match event { Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { let client = command.get(COEDITOR_INIT_CLIENT).unwrap(); data.set_client(client); println!("editor binding client initialized"); } Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { println!("received edit command"); let edit = command.get(USER_EDIT_SELECTOR).unwrap(); let selection = self.inner.widget().text().borrow().selection(); let transform_selection = |selection: Selection| -> Selection { let transform_index = |x: usize| -> usize { if x < edit.begin { x } else if x > edit.end { x + edit.begin + edit.content.len() - edit.end } else { edit.begin + edit.content.len() } }; Selection::new( transform_index(selection.anchor), transform_index(selection.active), ) }; data.edit_without_callback(edit); let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection)); self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut() .for_each(|(_, b)| *b = transform_selection(b.clone())); } Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { println!("received cursor command"); let content = &data.content; let unicode_offset_to_utf8_offset = |offset: u32| -> usize { content.iter().take(offset as usize).collect::<String>().len() }; let mut new_decorations = HashMap::new(); let my_id = self.client.as_ref().unwrap().read().id(); self.client.as_ref().unwrap().read().user_cursors.iter() .filter(|(&id, _)| id != my_id) .filter(|(_, data)| !data.selections.is_empty()) .for_each(|(&id, sel)| { new_decorations.insert(id, Selection::new( unicode_offset_to_utf8_offset(sel.selections[0].0), unicode_offset_to_utf8_offset(sel.selections[0].1), )); }); self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations; } _ => self.inner.event(ctx, event, data, env) } } fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) { self.inner.lifecycle(ctx, event, data, env); match event { LifeCycle::WidgetAdded => { self.id = ctx.widget_id(); println!("CoEditorWidget initialized with id: {:?}", self.id); self.event_sink = Some(ctx.get_external_handle()); let client = RustpadClient::create(self.server_url.clone()); client.write().widget_id = Some(self.id); client.write().set_event_sink( self.event_sink.as_ref().unwrap().clone(), self.id, ); self.client = Some(Arc::clone(&client)); ctx.get_external_handle().submit_command( COEDITOR_INIT_CLIENT, Box::new(Arc::clone(&client)), Target::Widget(self.id), ).expect("send command failed"); self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone())); } _ => {} } } fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) { if old_data.after_edits.len() != data.after_edits.len() { println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len()); } let new_selection = self.inner.widget().text().borrow().selection(); if self.last_selection != new_selection { self.last_selection = new_selection; let borrow = self.inner.widget_mut().text_mut().borrow_mut(); let content = &borrow.layout.text().unwrap().content_as_string; let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32; let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32; self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor)); } self.inner.update(ctx, data, env); } fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, data: &EditorBinding, env: &Env) -> Size { self.inner.layout(ctx, bc, data, env) } fn paint(&mut self, ctx: &mut PaintCtx, data: &EditorBinding, env: &Env) { self.inner.paint(ctx, data, env) } }
{ self.close_tx.send(()).unwrap(); futures::executor::block_on( tokio::time::timeout(Duration::from_secs(5), self.connection_handle.take().unwrap(), ) ); println!("CoEditorWidget destructed"); }
identifier_body
coeditor.rs
use druid::{Selector, WidgetPod, WidgetId, ExtEventSink, EventCtx, Event, Env, Widget, UpdateCtx, LayoutCtx, PaintCtx, BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size}; use std::sync::Arc; use tokio::sync::broadcast::{Sender}; use tokio::task::JoinHandle; use parking_lot::RwLock; use crate::{RustpadClient, Edit}; use std::time::Duration; use crate::editor_binding::EditorBinding; use crate::code_editor::code_editor::CodeEditor; use crate::code_editor::text::{Selection, EditableText}; use tokio::sync::broadcast; use std::collections::HashMap; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::connect_async; use futures::StreamExt; use log::{info, warn}; pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client"); pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit"); pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data"); fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> { tokio::spawn(async move { info!("connecting"); let conn = client.read().server_url.clone(); loop { let x = Arc::clone(&client); if try_connect(&conn, x, close_tx.clone()).await.is_none() { break; } tokio::time::sleep(Duration::from_millis(1000)).await; warn!("Reconnecting ..."); } }) } async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> { let url = url::Url::parse(connect_addr).unwrap(); let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>(); client.write().ws_sender = Some(ws_tx.clone()); client.write().users.clear(); // let res = connect_async(url).await; if res.is_err() { eprintln!("{:?}", res.err().unwrap()); return Some(()); } let (ws_stream, _) = res.unwrap(); println!("WebSocket handshake has been successfully completed"); client.read().on_connected.invoke(()); let (write, read) = ws_stream.split(); let websocket_tx = ws_rx.map(Ok).forward(write); let client2 = Arc::clone(&client); let receive_handler = read.for_each(|message| async { if message.is_err() { return; } let data = message.unwrap().to_string(); println!("Received: {}", &data); client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed")); }); client.write().send_info(); client.write().send_cursor_data(); if let Some(outstanding) = &client.read().outstanding { client.write().send_operation(outstanding); } let mut close_rx = close_tx.subscribe(); tokio::select! { _ = close_rx.recv() => { ws_tx.unbounded_send(Message::Close(None)).unwrap(); println!("client closed."); return None; } _ = websocket_tx => {} _ = receive_handler => { println!("server closed"); } } println!("{} disconnected", &connect_addr); client.write().ws_sender = None; Some(()) } pub struct CoEditorWidget { inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>, id: WidgetId, pub server_url: String, client: Option<Arc<RwLock<RustpadClient>>>, connection_handle: Option<JoinHandle<()>>, event_sink: Option<ExtEventSink>, close_tx: Sender<()>, last_selection: Selection, } impl Drop for CoEditorWidget { fn drop(&mut self) { self.close_tx.send(()).unwrap(); futures::executor::block_on( tokio::time::timeout(Duration::from_secs(5), self.connection_handle.take().unwrap(), ) ); println!("CoEditorWidget destructed"); } } impl CoEditorWidget { pub fn new(server_url: String) -> Self { println!("CoEditorWidget created"); CoEditorWidget { inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()), server_url, id: WidgetId::next(), client: None, connection_handle: None, event_sink: None, close_tx: broadcast::channel(1).0, last_selection: Selection::default(), } } } impl Widget<EditorBinding> for CoEditorWidget { fn
(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) { if let Event::Command(cmd) = event { println!("received {:?}", cmd); } match event { Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { let client = command.get(COEDITOR_INIT_CLIENT).unwrap(); data.set_client(client); println!("editor binding client initialized"); } Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { println!("received edit command"); let edit = command.get(USER_EDIT_SELECTOR).unwrap(); let selection = self.inner.widget().text().borrow().selection(); let transform_selection = |selection: Selection| -> Selection { let transform_index = |x: usize| -> usize { if x < edit.begin { x } else if x > edit.end { x + edit.begin + edit.content.len() - edit.end } else { edit.begin + edit.content.len() } }; Selection::new( transform_index(selection.anchor), transform_index(selection.active), ) }; data.edit_without_callback(edit); let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection)); self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut() .for_each(|(_, b)| *b = transform_selection(b.clone())); } Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some() && command.target() == Target::Widget(ctx.widget_id()) => { println!("received cursor command"); let content = &data.content; let unicode_offset_to_utf8_offset = |offset: u32| -> usize { content.iter().take(offset as usize).collect::<String>().len() }; let mut new_decorations = HashMap::new(); let my_id = self.client.as_ref().unwrap().read().id(); self.client.as_ref().unwrap().read().user_cursors.iter() .filter(|(&id, _)| id != my_id) .filter(|(_, data)| !data.selections.is_empty()) .for_each(|(&id, sel)| { new_decorations.insert(id, Selection::new( unicode_offset_to_utf8_offset(sel.selections[0].0), unicode_offset_to_utf8_offset(sel.selections[0].1), )); }); self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations; } _ => self.inner.event(ctx, event, data, env) } } fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) { self.inner.lifecycle(ctx, event, data, env); match event { LifeCycle::WidgetAdded => { self.id = ctx.widget_id(); println!("CoEditorWidget initialized with id: {:?}", self.id); self.event_sink = Some(ctx.get_external_handle()); let client = RustpadClient::create(self.server_url.clone()); client.write().widget_id = Some(self.id); client.write().set_event_sink( self.event_sink.as_ref().unwrap().clone(), self.id, ); self.client = Some(Arc::clone(&client)); ctx.get_external_handle().submit_command( COEDITOR_INIT_CLIENT, Box::new(Arc::clone(&client)), Target::Widget(self.id), ).expect("send command failed"); self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone())); } _ => {} } } fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) { if old_data.after_edits.len() != data.after_edits.len() { println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len()); } let new_selection = self.inner.widget().text().borrow().selection(); if self.last_selection != new_selection { self.last_selection = new_selection; let borrow = self.inner.widget_mut().text_mut().borrow_mut(); let content = &borrow.layout.text().unwrap().content_as_string; let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32; let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32; self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor)); } self.inner.update(ctx, data, env); } fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, data: &EditorBinding, env: &Env) -> Size { self.inner.layout(ctx, bc, data, env) } fn paint(&mut self, ctx: &mut PaintCtx, data: &EditorBinding, env: &Env) { self.inner.paint(ctx, data, env) } }
event
identifier_name
build-xml.js
/** * @copyright Maichong Software Ltd. 2016 http://maichong.it * @date 2016-09-26 * @author Liang <liang@maichong.it> */ 'use strict'; const fs = require('fs'); const path = require('path'); const mkdirp = require('mkdirp'); const xmldom = require('xmldom'); const utils = require('./utils'); const config = require('./config')(); require('colors'); const DOMParser = xmldom.DOMParser; let _uid = 0; function uid() { _uid++; return _uid; } /** * 判断字符串中指定的位置是否是被包含在引号中 * @param string * @param n * @returns {boolean} */ function inText(string, n) { let firstIndex = string.search(/"|'/); if (firstIndex === -1 || firstIndex > n) return false; let char = ''; let last = ''; for (let i = 0; i < n; i++) { let c = string[i]; if (c === '"' || c === "'") { if (!char) { char = c; } else if (char === c && last !== '\\') { char = ''; } } last = c; } return char !== ''; } /** * 将带数据绑定的字符串替换 * @param {Object} from * @param {string} str 原始字符串 * @param {string} prefix 前缀 * @param {object} ignores 忽略的字符串map * @returns {string} */ function replaceString(from, str, prefix, ignores) { // 替换字符串中 {{}} 包含的表达式 // 获取类似 a.b.c 表达式中第一个
ction getFirstWord(word) { return word.match(/[_a-z][\w\d]*/i)[0]; } // 检查类似 a.b.c 格式表达式是否忽略绑定 function shouldIgnore(word, matchs, n) { if (word[0] === '"' || word[0] === "'" || /^\d+$/.test(word)) return true; let w = getFirstWord(word); if (ignores.hasOwnProperty(w) || (matchs && inText(matchs, n))) { return true; } if (['state', 'props'].indexOf(w) < 0) { console.error(`'${from.fromSrc}' 中发现无效变量引用 '${word}',XML模板中只能引用组件'props'和'state'中的数据。`.red); console.error('如果您的项目基于Labrador 0.5.x,请按照升级指南升级到0.6.x版本 https://github.com/maichong/labrador'); } return false; } if (prefix) { prefix += '.'; } else { prefix = ''; } return str.replace(/\{\{([^}]+)\}\}/ig, function (matchs, words) { // matchs 是{{xxxxx}}格式的字符串 // words 是{{}}中间的表达式 // ...foo if (/^\s*\.\.\.[\w_][\w\d\-_.\[\]]*\s*$/.test(words)) { let word = words.match(/\s*\.\.\.([\w_][\w\d\-_.\[\]]*)/)[1].trim(); if (shouldIgnore(word)) { return matchs; } return `{{...${prefix}${word}}}`; } let isArray = /{{\s*\[/.test(matchs); if (!isArray) { //支持对象简写 let arrays = words.split(','); if (arrays.length > 1) { let isObject = true; let props = arrays.map(function (str) { if (!isObject) return; // str 为对象中的一个属性, 可能为 a:b / a / ...a / ...a.b str = str.trim(); let arr = str.split(':'); if (arr.length === 1) { // 如果属性表达式中不包含冒号 // 如果为简写属性表达式,例如 {foo} if (/^[a-z_][\w\d]*$/i.test(str)) { if (ignores[str]) { return str + ':' + str; } return str + ':' + prefix + str; } // 属性展开表达式 ...foo if (/^\.{3}[a-z_][\w\d.\[\]]*$/i.test(str)) { let word = str.substr(3); if (shouldIgnore(word)) { return str; } return '...' + prefix + word; } // 判定 ${matchs} 不为对象表达式 isObject = false; return; } // 存在冒号的对象属性表达式 let word = arr[1].trim(); // foo:2.3 if (/^[\d.]+$/.test(word)) { return arr[0] + ':' + word; } // foo:bar // 'foo':bar if (shouldIgnore(word)) { return str; } // foo:bar // 'foo':bar // foo return arr[0] + ':' + prefix + word; }); //console.log('isObject', isObject); if (isObject) { return '{{' + props.join(',') + '}}'; } } } return matchs.replace(/[^\.\w'"]([a-z_\$][\w\d\._\$]*)/ig, function (match, word, n) { if (shouldIgnore(word, matchs, n)) { return match; } return match[0] + prefix + word; }); }); } /** * 递归绑定XML中的节点 * @param from * @param node * @param comPrefix * @param valPrefix * @param clsPrefix * @param ignores */ function bind(from, node, comPrefix, valPrefix, clsPrefix, ignores) { ignores = Object.assign({ true: true, false: true, null: true, undefined: true }, ignores); let hasPath = false; //处理节点属性 let attributes = node.attributes; for (let i in attributes) { if (!/^\d+$/.test(i)) continue; let attr = attributes[i]; //处理属性值 if (attr.value.indexOf('{') > -1) { attr.value = replaceString(from, attr.value, valPrefix, ignores); } //绑定事件 if (/^(bind|catch)\w+/.test(attr.name)) { node.setAttribute('data-' + attr.name, attr.value); attr.value = '_dispatch'; if (!hasPath && comPrefix) { node.setAttribute('data-path', comPrefix); } } //如果是循环标签,则在子标签中忽略循环索引和值变量 if (attr.name === 'wx:for') { let index = node.getAttribute('wx:for-index') || 'index'; let item = node.getAttribute('wx:for-item') || 'item'; ignores[index] = true; ignores[item] = true; } if (clsPrefix && attr.name === 'class') { const matchArr = []; // "xxx {{a ? 'b' : 'c'}}" // => "xxx $" attr.value = attr.value.replace(/\{\{([^}]+)\}\}/ig, function (match) { matchArr.push(match); matchArr.push(match); return '$'; }); // => "xxx prefix-xxx $ prefix-$" attr.value = attr.value.split(' ').map(cls => `${cls} ${clsPrefix}-${cls}`).join(' '); // => "xxx prefix-xxx {{a ? 'b' : 'c'}} prefix-{{a ? 'b' : 'c'}}" attr.value = attr.value.replace(/\$/g, function () { const matchItem = matchArr.shift(); return matchItem; }); } } //如果节点为文本 if (node.nodeName === '#text') { let data = node.data; if (data) { node.replaceData(0, data.length, replaceString(from, data, valPrefix, ignores)); } } //递归处理子节点 for (let i in node.childNodes) { if (!/^\d+$/.test(i)) continue; let n = node.childNodes[i]; // 不转换template 定义 if (n.nodeName === 'template' && n.getAttribute('name')) { bindTemplateEvents(n); continue; } bind(from, n, comPrefix, valPrefix, clsPrefix, ignores); } } /** * 递归绑定template标签子节点中的事件 * @param node */ function bindTemplateEvents(node) { //处理节点属性 let attributes = node.attributes; for (let i in attributes) { if (!/^\d+$/.test(i)) continue; let attr = attributes[i]; //绑定事件 if (/^(bind|catch)\w+/.test(attr.name)) { node.setAttribute('data-' + attr.name, attr.value); attr.value = '_dispatch'; } } for (let i in node.childNodes) { if (!/^\d+$/.test(i)) continue; let n = node.childNodes[i]; bindTemplateEvents(n); } } /** * @param {FileInfo} from * @param {string} comPrefix * @param {string} valPrefix * @param {string} clsPrefix * @param {Object} depends * @returns {Document} */ function build(from, comPrefix, valPrefix, clsPrefix, depends) { if (typeof from === 'string') { from = utils.getInfo(from); } const components = config.srcDir + 'components/'; let data = fs.readFileSync(from.file, 'utf8'); if (!data) { throw new Error('XML file is empty ' + from.relative); } let doc = new DOMParser().parseFromString(data); bind(from, doc, comPrefix, valPrefix, clsPrefix); let listElemnts = doc.getElementsByTagName('list'); //console.log('listElemnts', listElemnts); for (let i = 0; i < listElemnts.$$length; i++) { let el = listElemnts[i]; let key = el.getAttribute('key'); let name = el.getAttribute('name') || key; if (!key) throw new Error('Unknown list key in ' + from.relative); let src; if (utils.isDirectory(path.join(components, name))) { //在components目录中 src = path.join(components, name, name + '.xml'); } else if (utils.isFile(path.join(components, name + '.xml'))) { //在components目录中 src = path.join(components, name + '.xml'); } else if (utils.isDirectory(path.join(config.modulesDir, name))) { //在node_modules目录中 src = path.join(config.modulesDir, name, 'index.xml'); } else if (utils.isFile(path.join(config.modulesDir, name + '.xml'))) { //在node_modules目录中 src = path.join(config.modulesDir, name + '.xml'); } else { throw new Error(`Can not find components "${name}" in ` + from.relative); } depends[src] = true; let id = uid(); let indexName = '_k' + id; let itemName = '_v' + id; let subComPrefix = comPrefix ? comPrefix + '.' + key : key; subComPrefix += '.{{' + itemName + '.__k}}'; let subValPrefix = valPrefix ? valPrefix + '.' + key : key; let subClsPrefix = clsPrefix ? clsPrefix + '-' + key : key; let listNode = doc.createElement('block'); listNode.setAttribute('wx:for', '{{' + subValPrefix + '}}'); listNode.setAttribute('wx:key', '__k'); listNode.setAttribute('wx:for-index', indexName); listNode.setAttribute('wx:for-item', itemName); el.parentNode.replaceChild(listNode, el); let ignores = {}; ignores[indexName] = true; ignores[itemName] = true; let node = build(src, subComPrefix, itemName, subClsPrefix, depends); listNode.appendChild(node); } let componentElements = doc.getElementsByTagName('component'); for (let i = 0; i < componentElements.$$length; i++) { let el = componentElements[i]; let key = el.getAttribute('key'); let name = el.getAttribute('name') || key; if (!key) throw new Error('Unknown component key in ' + from.relative); let src; if (utils.isDirectory(path.join(components, name))) { //在components目录中 src = path.join(components, name, name + '.xml'); } else if (utils.isFile(path.join(components, name + '.xml'))) { //在components目录中 src = path.join(components, name + '.xml'); } else if (utils.isDirectory(path.join(config.modulesDir, name))) { //在node_modules目录中 src = path.join(config.modulesDir, name, 'index.xml'); } else if (utils.isFile(path.join(config.modulesDir, name + '.xml'))) { //在node_modules目录中 src = path.join(config.modulesDir, name + '.xml'); } else { throw new Error(`Can not find components "${name}" in ` + from.relative); } depends[src] = true; let subComPrefix = comPrefix ? comPrefix + '.' + key : key; let subValPrefix = valPrefix ? valPrefix + '.' + key : key; let subClsPrefix = clsPrefix ? clsPrefix + '-' + key : key; let node = build(src, subComPrefix, subValPrefix, subClsPrefix, depends); el.parentNode.replaceChild(node, el); } return doc; } /** * 编译XML * @param {FileInfo} from * @param {FileInfo} to * @returns {Array} */ module.exports = function* buildXML(from, to) { console.log('build xml'.green, from.relative.blue, '->', to.relative.cyan); let depends = {}; let element = build(from, '', '', '', depends); mkdirp.sync(to.dir); let xml = element.toString(); xml = xml.replace(/&amp;nbsp;/g, '&nbsp;'); xml = xml.replace(/{{([^}]+)}}/g, function (matchs) { return matchs.replace(/&lt;/g, '<').replace(/&amp;/g, '&'); }); fs.writeFileSync(to.file, xml); return Object.keys(depends); };
有效变量名 a fun
identifier_name
build-xml.js
/** * @copyright Maichong Software Ltd. 2016 http://maichong.it * @date 2016-09-26 * @author Liang <liang@maichong.it> */ 'use strict'; const fs = require('fs'); const path = require('path'); const mkdirp = require('mkdirp'); const xmldom = require('xmldom'); const utils = require('./utils'); const config = require('./config')(); require('colors'); const DOMParser = xmldom.DOMParser; let _uid = 0; function uid() { _uid++; return _uid; } /** * 判断字符串中指定的位置是否是被包含在引号中 * @param string * @param n * @returns {boolean} */ function inText(string, n) { let firstIndex = string.search(/"|'/); if (firstIndex === -1 || firstIndex > n) return false; let char = ''; let last = ''; for (let i = 0; i < n; i++) { let c = string[i]; if (c === '"' || c === "'") { if (!char) { char = c; } else if (char === c && last !== '\\') { char = ''; } } last = c; } return char !== ''; } /** * 将带数据绑定的字符串替换 * @param {Object} from * @param {string} str 原始字符串 * @param {string} prefix 前缀 * @param {object} ignores 忽略的字符串map * @returns {string} */ function replaceString(from, str, prefix, ignores) { // 替换字符串中 {{}} 包含的表达式 // 获取类似 a.b.c 表达式中第一个有效变量名 a function getFirstWord(word) { return word.match(/[_a-z][\w\d]*/i)[0]; } // 检查类似 a.b.c 格式表达式是否忽略绑定 function shouldIgnore(word, matchs, n) { if (word[0] === '"' || word[0] === "'" || /^\d+$/.test(word)) return true; let w = getFirstWord(word); if (ignores.hasOwnProperty(w) || (matchs && inText(matchs, n))) { return true; } if (['state', 'props'].indexOf(w) < 0) { console.error(`'${from.fromSrc}' 中发现无效变量引用 '${word}',XML模板中只能引用组件'props'和'state'中的数据。`.red); console.error('如果您的项目基于Labrador 0.5.x,请按照升级指南升级到0.6.x版本 https://github.
]*\s*$/.test(words)) { let word = words.match(/\s*\.\.\.([\w_][\w\d\-_.\[\]]*)/)[1].trim(); if (shouldIgnore(word)) { return matchs; } return `{{...${prefix}${word}}}`; } let isArray = /{{\s*\[/.test(matchs); if (!isArray) { //支持对象简写 let arrays = words.split(','); if (arrays.length > 1) { let isObject = true; let props = arrays.map(function (str) { if (!isObject) return; // str 为对象中的一个属性, 可能为 a:b / a / ...a / ...a.b str = str.trim(); let arr = str.split(':'); if (arr.length === 1) { // 如果属性表达式中不包含冒号 // 如果为简写属性表达式,例如 {foo} if (/^[a-z_][\w\d]*$/i.test(str)) { if (ignores[str]) { return str + ':' + str; } return str + ':' + prefix + str; } // 属性展开表达式 ...foo if (/^\.{3}[a-z_][\w\d.\[\]]*$/i.test(str)) { let word = str.substr(3); if (shouldIgnore(word)) { return str; } return '...' + prefix + word; } // 判定 ${matchs} 不为对象表达式 isObject = false; return; } // 存在冒号的对象属性表达式 let word = arr[1].trim(); // foo:2.3 if (/^[\d.]+$/.test(word)) { return arr[0] + ':' + word; } // foo:bar // 'foo':bar if (shouldIgnore(word)) { return str; } // foo:bar // 'foo':bar // foo return arr[0] + ':' + prefix + word; }); //console.log('isObject', isObject); if (isObject) { return '{{' + props.join(',') + '}}'; } } } return matchs.replace(/[^\.\w'"]([a-z_\$][\w\d\._\$]*)/ig, function (match, word, n) { if (shouldIgnore(word, matchs, n)) { return match; } return match[0] + prefix + word; }); }); } /** * 递归绑定XML中的节点 * @param from * @param node * @param comPrefix * @param valPrefix * @param clsPrefix * @param ignores */ function bind(from, node, comPrefix, valPrefix, clsPrefix, ignores) { ignores = Object.assign({ true: true, false: true, null: true, undefined: true }, ignores); let hasPath = false; //处理节点属性 let attributes = node.attributes; for (let i in attributes) { if (!/^\d+$/.test(i)) continue; let attr = attributes[i]; //处理属性值 if (attr.value.indexOf('{') > -1) { attr.value = replaceString(from, attr.value, valPrefix, ignores); } //绑定事件 if (/^(bind|catch)\w+/.test(attr.name)) { node.setAttribute('data-' + attr.name, attr.value); attr.value = '_dispatch'; if (!hasPath && comPrefix) { node.setAttribute('data-path', comPrefix); } } //如果是循环标签,则在子标签中忽略循环索引和值变量 if (attr.name === 'wx:for') { let index = node.getAttribute('wx:for-index') || 'index'; let item = node.getAttribute('wx:for-item') || 'item'; ignores[index] = true; ignores[item] = true; } if (clsPrefix && attr.name === 'class') { const matchArr = []; // "xxx {{a ? 'b' : 'c'}}" // => "xxx $" attr.value = attr.value.replace(/\{\{([^}]+)\}\}/ig, function (match) { matchArr.push(match); matchArr.push(match); return '$'; }); // => "xxx prefix-xxx $ prefix-$" attr.value = attr.value.split(' ').map(cls => `${cls} ${clsPrefix}-${cls}`).join(' '); // => "xxx prefix-xxx {{a ? 'b' : 'c'}} prefix-{{a ? 'b' : 'c'}}" attr.value = attr.value.replace(/\$/g, function () { const matchItem = matchArr.shift(); return matchItem; }); } } //如果节点为文本 if (node.nodeName === '#text') { let data = node.data; if (data) { node.replaceData(0, data.length, replaceString(from, data, valPrefix, ignores)); } } //递归处理子节点 for (let i in node.childNodes) { if (!/^\d+$/.test(i)) continue; let n = node.childNodes[i]; // 不转换template 定义 if (n.nodeName === 'template' && n.getAttribute('name')) { bindTemplateEvents(n); continue; } bind(from, n, comPrefix, valPrefix, clsPrefix, ignores); } } /** * 递归绑定template标签子节点中的事件 * @param node */ function bindTemplateEvents(node) { //处理节点属性 let attributes = node.attributes; for (let i in attributes) { if (!/^\d+$/.test(i)) continue; let attr = attributes[i]; //绑定事件 if (/^(bind|catch)\w+/.test(attr.name)) { node.setAttribute('data-' + attr.name, attr.value); attr.value = '_dispatch'; } } for (let i in node.childNodes) { if (!/^\d+$/.test(i)) continue; let n = node.childNodes[i]; bindTemplateEvents(n); } } /** * @param {FileInfo} from * @param {string} comPrefix * @param {string} valPrefix * @param {string} clsPrefix * @param {Object} depends * @returns {Document} */ function build(from, comPrefix, valPrefix, clsPrefix, depends) { if (typeof from === 'string') { from = utils.getInfo(from); } const components = config.srcDir + 'components/'; let data = fs.readFileSync(from.file, 'utf8'); if (!data) { throw new Error('XML file is empty ' + from.relative); } let doc = new DOMParser().parseFromString(data); bind(from, doc, comPrefix, valPrefix, clsPrefix); let listElemnts = doc.getElementsByTagName('list'); //console.log('listElemnts', listElemnts); for (let i = 0; i < listElemnts.$$length; i++) { let el = listElemnts[i]; let key = el.getAttribute('key'); let name = el.getAttribute('name') || key; if (!key) throw new Error('Unknown list key in ' + from.relative); let src; if (utils.isDirectory(path.join(components, name))) { //在components目录中 src = path.join(components, name, name + '.xml'); } else if (utils.isFile(path.join(components, name + '.xml'))) { //在components目录中 src = path.join(components, name + '.xml'); } else if (utils.isDirectory(path.join(config.modulesDir, name))) { //在node_modules目录中 src = path.join(config.modulesDir, name, 'index.xml'); } else if (utils.isFile(path.join(config.modulesDir, name + '.xml'))) { //在node_modules目录中 src = path.join(config.modulesDir, name + '.xml'); } else { throw new Error(`Can not find components "${name}" in ` + from.relative); } depends[src] = true; let id = uid(); let indexName = '_k' + id; let itemName = '_v' + id; let subComPrefix = comPrefix ? comPrefix + '.' + key : key; subComPrefix += '.{{' + itemName + '.__k}}'; let subValPrefix = valPrefix ? valPrefix + '.' + key : key; let subClsPrefix = clsPrefix ? clsPrefix + '-' + key : key; let listNode = doc.createElement('block'); listNode.setAttribute('wx:for', '{{' + subValPrefix + '}}'); listNode.setAttribute('wx:key', '__k'); listNode.setAttribute('wx:for-index', indexName); listNode.setAttribute('wx:for-item', itemName); el.parentNode.replaceChild(listNode, el); let ignores = {}; ignores[indexName] = true; ignores[itemName] = true; let node = build(src, subComPrefix, itemName, subClsPrefix, depends); listNode.appendChild(node); } let componentElements = doc.getElementsByTagName('component'); for (let i = 0; i < componentElements.$$length; i++) { let el = componentElements[i]; let key = el.getAttribute('key'); let name = el.getAttribute('name') || key; if (!key) throw new Error('Unknown component key in ' + from.relative); let src; if (utils.isDirectory(path.join(components, name))) { //在components目录中 src = path.join(components, name, name + '.xml'); } else if (utils.isFile(path.join(components, name + '.xml'))) { //在components目录中 src = path.join(components, name + '.xml'); } else if (utils.isDirectory(path.join(config.modulesDir, name))) { //在node_modules目录中 src = path.join(config.modulesDir, name, 'index.xml'); } else if (utils.isFile(path.join(config.modulesDir, name + '.xml'))) { //在node_modules目录中 src = path.join(config.modulesDir, name + '.xml'); } else { throw new Error(`Can not find components "${name}" in ` + from.relative); } depends[src] = true; let subComPrefix = comPrefix ? comPrefix + '.' + key : key; let subValPrefix = valPrefix ? valPrefix + '.' + key : key; let subClsPrefix = clsPrefix ? clsPrefix + '-' + key : key; let node = build(src, subComPrefix, subValPrefix, subClsPrefix, depends); el.parentNode.replaceChild(node, el); } return doc; } /** * 编译XML * @param {FileInfo} from * @param {FileInfo} to * @returns {Array} */ module.exports = function* buildXML(from, to) { console.log('build xml'.green, from.relative.blue, '->', to.relative.cyan); let depends = {}; let element = build(from, '', '', '', depends); mkdirp.sync(to.dir); let xml = element.toString(); xml = xml.replace(/&amp;nbsp;/g, '&nbsp;'); xml = xml.replace(/{{([^}]+)}}/g, function (matchs) { return matchs.replace(/&lt;/g, '<').replace(/&amp;/g, '&'); }); fs.writeFileSync(to.file, xml); return Object.keys(depends); };
com/maichong/labrador'); } return false; } if (prefix) { prefix += '.'; } else { prefix = ''; } return str.replace(/\{\{([^}]+)\}\}/ig, function (matchs, words) { // matchs 是{{xxxxx}}格式的字符串 // words 是{{}}中间的表达式 // ...foo if (/^\s*\.\.\.[\w_][\w\d\-_.\[\]
conditional_block
build-xml.js
/** * @copyright Maichong Software Ltd. 2016 http://maichong.it * @date 2016-09-26 * @author Liang <liang@maichong.it> */ 'use strict'; const fs = require('fs'); const path = require('path'); const mkdirp = require('mkdirp'); const xmldom = require('xmldom'); const utils = require('./utils'); const config = require('./config')(); require('colors'); const DOMParser = xmldom.DOMParser; let _uid = 0; function uid() { _uid++; return _uid; } /** * 判断字符串中指定的位置是否是被包含在引号中 * @param string * @param n * @returns {boolean} */ function inText(string, n) { let firstIndex = string.search(/"|'/); if (firstIndex === -1 || firstIndex > n) return false; let char = ''; let last = ''; for (let i = 0; i < n; i++) { let c = string[i]; if (c === '"' || c === "'") { if (!char) { char = c; } else if (char === c && last !== '\\') { char = ''; } } last = c; } return char !== ''; } /** * 将带数据绑定的字符串替换 * @param {Object} from * @param {string} str 原始字符串 * @param {string} prefix 前缀 * @param {object} ignores 忽略的字符串map * @returns {string} */ function replaceString(from, str, prefix, ignores) { // 替换字符串中 {{}} 包含的表达式 // 获取类似 a.b.c 表达式中第一个有效变量名 a function getFirstWord(word) { return word.match(/[_a-z][\w\d]*/i)[0]; } // 检查类似 a.b.c 格式表达式是否忽略绑定 function shouldIgnore(word, matchs, n) { if (word[0] === '"' || word[0] === "'" || /^\d+$/.test(word)) return true; let w = getFirstWord(word); if (ignores.hasOwnProperty(w) || (matchs && inText(matchs, n))) { return true; } if (['state', 'props'].indexOf(w) < 0) { console.error(`'${from.fromSrc}' 中发现无效变量引用 '${word}',XML模板中只能引用组件'props'和'state'中的数据。`.red); console.error('如果您的项目基于Labrador 0.5.x,请按照升级指南升级到0.6.x版本 https://github.com/maichong/labrador'); } return false; } if (prefix) { prefix += '.'; } else { prefix = ''; } return str.replace(/\{\{([^}]+)\}\}/ig, function (matchs, words) { // matchs 是{{xxxxx}}格式的字符串 // words 是{{}}中间的表达式 // ...foo if (/^\s*\.\.\.[\w_][\w\d\-_.\[\]]*\s*$/.test(words)) { let word = words.match(/\s*\.\.\.([\w_][\w\d\-_.\[\]]*)/)[1].trim(); if (shouldIgnore(word)) { return matchs; } return `{{...${prefix}${word}}}`; } let isArray = /{{\s*\[/.test(matchs); if (!isArray) { //支持对象简写 let arrays = words.split(','); if (arrays.length > 1) { let isObject = true; let props = arrays.map(function (str) { if (!isObject) return; // str 为对象中的一个属性, 可能为 a:b / a / ...a / ...a.b str = str.trim(); let arr = str.split(':'); if (arr.length === 1) { // 如果属性表达式中不包含冒号 // 如果为简写属性表达式,例如 {foo} if (/^[a-z_][\w\d]*$/i.test(str)) { if (ignores[str]) { return str + ':' + str; } return str + ':' + prefix + str; } // 属性展开表达式 ...foo if (/^\.{3}[a-z_][\w\d.\[\]]*$/i.test(str)) { let word = str.substr(3); if (shouldIgnore(word)) { return str; } return '...' + prefix + word; } // 判定 ${matchs} 不为对象表达式 isObject = false; return; } // 存在冒号的对象属性表达式 let word = arr[1].trim(); // foo:2.3 if (/^[\d.]+$/.test(word)) { return arr[0] + ':' + word; } // foo:bar // 'foo':bar if (shouldIgnore(word)) { return str; } // foo:bar // 'foo':bar // foo return arr[0] + ':' + prefix + word; }); //console.log('isObject', isObject); if (isObject) { return '{{' + props.join(',') + '}}'; } } } return matchs.replace(/[^\.\w'"]([a-z_\$][\w\d\._\$]*)/ig, function (match, word, n) { if (shouldIgnore(word, matchs, n)) { return match; } return match[0] + prefix + word; }); }); } /** * 递归绑定XML中的节点 * @param from * @param node * @param comPrefix * @param valPrefix * @param clsPrefix * @param ignores */ function bind(from, node, comPrefix, valPrefix, clsPrefix, ignores) { ignores = Object.assign({ true: true, false: true, null: true, undefined: true }, ignores); let hasPath = false; //处理节点属性 let attributes = node.attributes; for (let i in attributes) { if (!/^\d+$/.test(i)) continue; let attr = attributes[i]; //处理属性值 if (attr.value.indexOf('{') > -1) { attr.value = replaceString(from, attr.value, valPrefix, ignores); } //绑定事件 if (/^(bind|catch)\w+/.test(attr.name)) { node.setAttribute('data-' + attr.name, attr.value); attr.value = '_dispatch'; if (!hasPath && comPrefix) { node.setAttribute('data-path', comPrefix); } } //如果是循环标签,则在子标签中忽略循环索引和值变量 if (attr.name === 'wx:for') { let index = node.getAttribute('wx:for-index') || 'index'; let item = node.getAttribute('wx:for-item') || 'item'; ignores[index] = true; ignores[item] = true; } if (clsPrefix && attr.name === 'class') { const matchArr = []; // "xxx {{a ? 'b' : 'c'}}" // => "xxx $" attr.value = attr.value.replace(/\{\{([^}]+)\}\}/ig, function (match) { matchArr.push(match); matchArr.push(match); return '$'; }); // => "xxx prefix-xxx $ prefix-$" attr.value = attr.value.split(' ').map(cls => `${cls} ${clsPrefix}-${cls}`).join(' '); // => "xxx prefix-xxx {{a ? 'b' : 'c'}} prefix-{{a ? 'b' : 'c'}}" attr.value = attr.value.replace(/\$/g, function () { const matchItem = matchArr.shift(); return matchItem; }); } } //如果节点为文本 if (node.nodeName === '#text') { let data = node.data; if (data) { node.replaceData(0, data.length, replaceString(from, data, valPrefix, ignores)); } }
// 不转换template 定义 if (n.nodeName === 'template' && n.getAttribute('name')) { bindTemplateEvents(n); continue; } bind(from, n, comPrefix, valPrefix, clsPrefix, ignores); } } /** * 递归绑定template标签子节点中的事件 * @param node */ function bindTemplateEvents(node) { //处理节点属性 let attributes = node.attributes; for (let i in attributes) { if (!/^\d+$/.test(i)) continue; let attr = attributes[i]; //绑定事件 if (/^(bind|catch)\w+/.test(attr.name)) { node.setAttribute('data-' + attr.name, attr.value); attr.value = '_dispatch'; } } for (let i in node.childNodes) { if (!/^\d+$/.test(i)) continue; let n = node.childNodes[i]; bindTemplateEvents(n); } } /** * @param {FileInfo} from * @param {string} comPrefix * @param {string} valPrefix * @param {string} clsPrefix * @param {Object} depends * @returns {Document} */ function build(from, comPrefix, valPrefix, clsPrefix, depends) { if (typeof from === 'string') { from = utils.getInfo(from); } const components = config.srcDir + 'components/'; let data = fs.readFileSync(from.file, 'utf8'); if (!data) { throw new Error('XML file is empty ' + from.relative); } let doc = new DOMParser().parseFromString(data); bind(from, doc, comPrefix, valPrefix, clsPrefix); let listElemnts = doc.getElementsByTagName('list'); //console.log('listElemnts', listElemnts); for (let i = 0; i < listElemnts.$$length; i++) { let el = listElemnts[i]; let key = el.getAttribute('key'); let name = el.getAttribute('name') || key; if (!key) throw new Error('Unknown list key in ' + from.relative); let src; if (utils.isDirectory(path.join(components, name))) { //在components目录中 src = path.join(components, name, name + '.xml'); } else if (utils.isFile(path.join(components, name + '.xml'))) { //在components目录中 src = path.join(components, name + '.xml'); } else if (utils.isDirectory(path.join(config.modulesDir, name))) { //在node_modules目录中 src = path.join(config.modulesDir, name, 'index.xml'); } else if (utils.isFile(path.join(config.modulesDir, name + '.xml'))) { //在node_modules目录中 src = path.join(config.modulesDir, name + '.xml'); } else { throw new Error(`Can not find components "${name}" in ` + from.relative); } depends[src] = true; let id = uid(); let indexName = '_k' + id; let itemName = '_v' + id; let subComPrefix = comPrefix ? comPrefix + '.' + key : key; subComPrefix += '.{{' + itemName + '.__k}}'; let subValPrefix = valPrefix ? valPrefix + '.' + key : key; let subClsPrefix = clsPrefix ? clsPrefix + '-' + key : key; let listNode = doc.createElement('block'); listNode.setAttribute('wx:for', '{{' + subValPrefix + '}}'); listNode.setAttribute('wx:key', '__k'); listNode.setAttribute('wx:for-index', indexName); listNode.setAttribute('wx:for-item', itemName); el.parentNode.replaceChild(listNode, el); let ignores = {}; ignores[indexName] = true; ignores[itemName] = true; let node = build(src, subComPrefix, itemName, subClsPrefix, depends); listNode.appendChild(node); } let componentElements = doc.getElementsByTagName('component'); for (let i = 0; i < componentElements.$$length; i++) { let el = componentElements[i]; let key = el.getAttribute('key'); let name = el.getAttribute('name') || key; if (!key) throw new Error('Unknown component key in ' + from.relative); let src; if (utils.isDirectory(path.join(components, name))) { //在components目录中 src = path.join(components, name, name + '.xml'); } else if (utils.isFile(path.join(components, name + '.xml'))) { //在components目录中 src = path.join(components, name + '.xml'); } else if (utils.isDirectory(path.join(config.modulesDir, name))) { //在node_modules目录中 src = path.join(config.modulesDir, name, 'index.xml'); } else if (utils.isFile(path.join(config.modulesDir, name + '.xml'))) { //在node_modules目录中 src = path.join(config.modulesDir, name + '.xml'); } else { throw new Error(`Can not find components "${name}" in ` + from.relative); } depends[src] = true; let subComPrefix = comPrefix ? comPrefix + '.' + key : key; let subValPrefix = valPrefix ? valPrefix + '.' + key : key; let subClsPrefix = clsPrefix ? clsPrefix + '-' + key : key; let node = build(src, subComPrefix, subValPrefix, subClsPrefix, depends); el.parentNode.replaceChild(node, el); } return doc; } /** * 编译XML * @param {FileInfo} from * @param {FileInfo} to * @returns {Array} */ module.exports = function* buildXML(from, to) { console.log('build xml'.green, from.relative.blue, '->', to.relative.cyan); let depends = {}; let element = build(from, '', '', '', depends); mkdirp.sync(to.dir); let xml = element.toString(); xml = xml.replace(/&amp;nbsp;/g, '&nbsp;'); xml = xml.replace(/{{([^}]+)}}/g, function (matchs) { return matchs.replace(/&lt;/g, '<').replace(/&amp;/g, '&'); }); fs.writeFileSync(to.file, xml); return Object.keys(depends); };
//递归处理子节点 for (let i in node.childNodes) { if (!/^\d+$/.test(i)) continue; let n = node.childNodes[i];
random_line_split
build-xml.js
/** * @copyright Maichong Software Ltd. 2016 http://maichong.it * @date 2016-09-26 * @author Liang <liang@maichong.it> */ 'use strict'; const fs = require('fs'); const path = require('path'); const mkdirp = require('mkdirp'); const xmldom = require('xmldom'); const utils = require('./utils'); const config = require('./config')(); require('colors'); const DOMParser = xmldom.DOMParser; let _uid = 0; function uid() { _uid++; return _uid; } /** * 判断字符串中指定的位置是否是被包含在引号中 * @param string * @param n * @returns {boolean} */ function inText(string, n) { let firstIndex = string.search(/"|'/); if (firstIndex === -1 || firstIndex > n) return false; let char = ''; let last = ''; for (let i = 0; i < n; i++) { let c = string[i]; if (c === '"' || c === "'") { if (!char) { char = c; } else if (char === c && last !== '\\') { char = ''; } } last = c; } return char !== ''; } /** * 将带数据绑定的字符串替换 * @param {Object} from * @param {string} str 原始字符串 * @param {string} prefix 前缀 * @param {object} ignores 忽略的字符串map * @returns {string} */ function replaceString(from, str, prefix, ignores) { // 替换字符串中 {{}} 包含的表达式 // 获取类似 a.b.c 表达式中第一个有效变量名 a function getFirstWord(word) { return word.match(/[_a-z][\w\d]*/i)[0]; } // 检查类似 a.b.c 格式表达式是否忽略绑定 function shouldIgnore(word, matchs, n) { if (word[0] === '"' || word[0] === "'" || /^\d+$/.test(word)) return true; let w = getFirstWord(word); if (ignores.hasOwnProperty(w) || (matchs && inText(matchs, n))) { return true; } if (['state', 'props'].indexOf(w) < 0) { console.error(`'${from.fromSrc}' 中发现无效变量引用 '${word}',XML模板中只能引用组件'props'和'state'中的数据。`.red); console.error('如果您的项目基于Labrador 0.5.x,请按照升级指南升级到0.6.x版本 https://github.com/maichong/labrador'); } return false; } if (prefix) { prefix += '.'; } else { prefix = ''; } return str.replace(/\{\{([^}]+)\}\}/ig, function (matchs, words) { // matchs 是{{xxxxx}}格式的字符串 // words 是{{}}中间的表达式 // ...foo if (/^\s*\.\.\.[\w_][\w\d\-_.\[\]]*\s*$/.test(words)) { let word = words.match(/\s*\.\.\.([\w_][\w\d\-_.\[\]]*)/)[1].trim(); if (shouldIgnore(word)) { return matchs; } return `{{...${prefix}${word}}}`; } let isArray = /{{\s*\[/.test(matchs); if (!isArray) { //支持对象简写 let arrays = words.split(','); if (arrays.length > 1) { let isObject = true; let props = arrays.map(function (str) { if (!isObject) return; // str 为对象中的一个属性, 可能为 a:b / a / ...a / ...a.b str = str.trim(); let arr = str.split(':'); if (arr.length === 1) { // 如果属性表达式中不包含冒号 // 如果为简写属性表达式,例如 {foo} if (/^[a-z_][\w\d]*$/i.test(str)) { if (ignores[str]) { return str + ':' + str; } return str + ':' + prefix + str; } // 属性展开表达式 ...foo if (/^\.{3}[a-z_][\w\d.\[\]]*$/i.test(str)) { let word = str.substr(3); if (shouldIgnore(word)) { return str; } return '...' + prefix + word; } // 判定 ${matchs} 不为对象表达式 isObject = false; return; } // 存在冒号的对象属性表达式 let word = arr[1].trim(); // foo:2.3 if (/^[\d.]+$/.test(word)) { return arr[0] + ':' + word; } // foo:bar // 'foo':bar if (shouldIgnore(word)) { return str; } // foo:bar // 'foo':bar // foo return arr[0] + ':' + prefix + word; }); //console.log('isObject', isObject); if (isObject) { return '{{' + props.join(',') + '}}'; } } } return matchs.replace(/[^\.\w'"]([a-z_\$][\w\d\._\$]*)/ig, function (match, word, n) { if (shouldIgnore(word, matchs, n)) { return match; } return match[0] + prefix + word; }); }); } /** * 递归绑定XML中的节点 * @param from * @param node * @param comPrefix * @param valPrefix * @param clsPrefix * @param ignores */ function bind(from, node, comPrefix, valPrefix, clsPrefix, ignores) { ignores = Object.assign({ true: true, false: true, null: true, undefined: true }, ignores); let hasPath = false; //处理节点属性 let attributes = node.attributes; for (let i in attributes) { if (!/^\d+$/.test(i)) continue; let attr = attributes[i]; //处理属性值 if (attr.value.indexOf('{') > -1) { attr.value = replaceString(from, attr.value, valPrefix, ignores); } //绑定事件 if (/^(bind|catch)\w+/.test(att
comPrefix * @param {string} valPrefix * @param {string} clsPrefix * @param {Object} depends * @returns {Document} */ function build(from, comPrefix, valPrefix, clsPrefix, depends) { if (typeof from === 'string') { from = utils.getInfo(from); } const components = config.srcDir + 'components/'; let data = fs.readFileSync(from.file, 'utf8'); if (!data) { throw new Error('XML file is empty ' + from.relative); } let doc = new DOMParser().parseFromString(data); bind(from, doc, comPrefix, valPrefix, clsPrefix); let listElemnts = doc.getElementsByTagName('list'); //console.log('listElemnts', listElemnts); for (let i = 0; i < listElemnts.$$length; i++) { let el = listElemnts[i]; let key = el.getAttribute('key'); let name = el.getAttribute('name') || key; if (!key) throw new Error('Unknown list key in ' + from.relative); let src; if (utils.isDirectory(path.join(components, name))) { //在components目录中 src = path.join(components, name, name + '.xml'); } else if (utils.isFile(path.join(components, name + '.xml'))) { //在components目录中 src = path.join(components, name + '.xml'); } else if (utils.isDirectory(path.join(config.modulesDir, name))) { //在node_modules目录中 src = path.join(config.modulesDir, name, 'index.xml'); } else if (utils.isFile(path.join(config.modulesDir, name + '.xml'))) { //在node_modules目录中 src = path.join(config.modulesDir, name + '.xml'); } else { throw new Error(`Can not find components "${name}" in ` + from.relative); } depends[src] = true; let id = uid(); let indexName = '_k' + id; let itemName = '_v' + id; let subComPrefix = comPrefix ? comPrefix + '.' + key : key; subComPrefix += '.{{' + itemName + '.__k}}'; let subValPrefix = valPrefix ? valPrefix + '.' + key : key; let subClsPrefix = clsPrefix ? clsPrefix + '-' + key : key; let listNode = doc.createElement('block'); listNode.setAttribute('wx:for', '{{' + subValPrefix + '}}'); listNode.setAttribute('wx:key', '__k'); listNode.setAttribute('wx:for-index', indexName); listNode.setAttribute('wx:for-item', itemName); el.parentNode.replaceChild(listNode, el); let ignores = {}; ignores[indexName] = true; ignores[itemName] = true; let node = build(src, subComPrefix, itemName, subClsPrefix, depends); listNode.appendChild(node); } let componentElements = doc.getElementsByTagName('component'); for (let i = 0; i < componentElements.$$length; i++) { let el = componentElements[i]; let key = el.getAttribute('key'); let name = el.getAttribute('name') || key; if (!key) throw new Error('Unknown component key in ' + from.relative); let src; if (utils.isDirectory(path.join(components, name))) { //在components目录中 src = path.join(components, name, name + '.xml'); } else if (utils.isFile(path.join(components, name + '.xml'))) { //在components目录中 src = path.join(components, name + '.xml'); } else if (utils.isDirectory(path.join(config.modulesDir, name))) { //在node_modules目录中 src = path.join(config.modulesDir, name, 'index.xml'); } else if (utils.isFile(path.join(config.modulesDir, name + '.xml'))) { //在node_modules目录中 src = path.join(config.modulesDir, name + '.xml'); } else { throw new Error(`Can not find components "${name}" in ` + from.relative); } depends[src] = true; let subComPrefix = comPrefix ? comPrefix + '.' + key : key; let subValPrefix = valPrefix ? valPrefix + '.' + key : key; let subClsPrefix = clsPrefix ? clsPrefix + '-' + key : key; let node = build(src, subComPrefix, subValPrefix, subClsPrefix, depends); el.parentNode.replaceChild(node, el); } return doc; } /** * 编译XML * @param {FileInfo} from * @param {FileInfo} to * @returns {Array} */ module.exports = function* buildXML(from, to) { console.log('build xml'.green, from.relative.blue, '->', to.relative.cyan); let depends = {}; let element = build(from, '', '', '', depends); mkdirp.sync(to.dir); let xml = element.toString(); xml = xml.replace(/&amp;nbsp;/g, '&nbsp;'); xml = xml.replace(/{{([^}]+)}}/g, function (matchs) { return matchs.replace(/&lt;/g, '<').replace(/&amp;/g, '&'); }); fs.writeFileSync(to.file, xml); return Object.keys(depends); };
r.name)) { node.setAttribute('data-' + attr.name, attr.value); attr.value = '_dispatch'; if (!hasPath && comPrefix) { node.setAttribute('data-path', comPrefix); } } //如果是循环标签,则在子标签中忽略循环索引和值变量 if (attr.name === 'wx:for') { let index = node.getAttribute('wx:for-index') || 'index'; let item = node.getAttribute('wx:for-item') || 'item'; ignores[index] = true; ignores[item] = true; } if (clsPrefix && attr.name === 'class') { const matchArr = []; // "xxx {{a ? 'b' : 'c'}}" // => "xxx $" attr.value = attr.value.replace(/\{\{([^}]+)\}\}/ig, function (match) { matchArr.push(match); matchArr.push(match); return '$'; }); // => "xxx prefix-xxx $ prefix-$" attr.value = attr.value.split(' ').map(cls => `${cls} ${clsPrefix}-${cls}`).join(' '); // => "xxx prefix-xxx {{a ? 'b' : 'c'}} prefix-{{a ? 'b' : 'c'}}" attr.value = attr.value.replace(/\$/g, function () { const matchItem = matchArr.shift(); return matchItem; }); } } //如果节点为文本 if (node.nodeName === '#text') { let data = node.data; if (data) { node.replaceData(0, data.length, replaceString(from, data, valPrefix, ignores)); } } //递归处理子节点 for (let i in node.childNodes) { if (!/^\d+$/.test(i)) continue; let n = node.childNodes[i]; // 不转换template 定义 if (n.nodeName === 'template' && n.getAttribute('name')) { bindTemplateEvents(n); continue; } bind(from, n, comPrefix, valPrefix, clsPrefix, ignores); } } /** * 递归绑定template标签子节点中的事件 * @param node */ function bindTemplateEvents(node) { //处理节点属性 let attributes = node.attributes; for (let i in attributes) { if (!/^\d+$/.test(i)) continue; let attr = attributes[i]; //绑定事件 if (/^(bind|catch)\w+/.test(attr.name)) { node.setAttribute('data-' + attr.name, attr.value); attr.value = '_dispatch'; } } for (let i in node.childNodes) { if (!/^\d+$/.test(i)) continue; let n = node.childNodes[i]; bindTemplateEvents(n); } } /** * @param {FileInfo} from * @param {string}
identifier_body
lib.rs
/*! [![Build status](https://ci.appveyor.com/api/projects/status/xlkq8rd73cla4ixw/branch/master?svg=true)](https://ci.appveyor.com/project/jaemk/self-update/branch/master) [![Build Status](https://travis-ci.org/jaemk/self_update.svg?branch=master)](https://travis-ci.org/jaemk/self_update) [![crates.io:clin](https://img.shields.io/crates/v/self_update.svg?label=self_update)](https://crates.io/crates/self_update) [![docs](https://docs.rs/self_update/badge.svg)](https://docs.rs/self_update) `self_update` provides updaters for updating rust executables in-place from various release distribution backends. ```shell self_update = "0.4" ``` ## Usage Update (replace) the current executable with the latest release downloaded from `https://api.github.com/repos/jaemk/self_update/releases/latest`. Note, the [`trust`](https://github.com/japaric/trust) project provides a nice setup for producing release-builds via CI (travis/appveyor). ``` #[macro_use] extern crate self_update; fn update() -> Result<(), Box<::std::error::Error>> { let target = self_update::get_target()?; let status = self_update::backends::github::Update::configure()? .repo_owner("jaemk") .repo_name("self_update") .target(&target) .bin_name("self_update_example") .show_download_progress(true) .current_version(cargo_crate_version!()) .build()? .update()?; println!("Update status: `{}`!", status.version()); Ok(()) } # fn main() { } ``` Run the above example to see `self_update` in action: `cargo run --example github` Separate utilities are also exposed: ``` extern crate self_update; fn update() -> Result<(), Box<::std::error::Error>> { let target = self_update::get_target()?; let releases = self_update::backends::github::ReleaseList::configure() .repo_owner("jaemk") .repo_name("self_update") .with_target(&target) .build()? .fetch()?; println!("found releases:"); println!("{:#?}\n", releases); // get the first available release let asset = releases[0] .asset_for(&target).unwrap(); let tmp_dir = self_update::TempDir::new_in(::std::env::current_dir()?, "self_update")?; let tmp_tarball_path = tmp_dir.path().join(&asset.name); let tmp_tarball = ::std::fs::File::open(&tmp_tarball_path)?; self_update::Download::from_url(&asset.download_url) .download_to(&tmp_tarball)?; self_update::Extract::from_source(&tmp_tarball_path) .archive(self_update::ArchiveKind::Tar) .encoding(self_update::EncodingKind::Gz) .extract_into(&tmp_dir.path())?; let tmp_file = tmp_dir.path().join("replacement_tmp"); let bin_name = "self_update_bin"; let bin_path = tmp_dir.path().join(bin_name); self_update::Move::from_source(&bin_path) .replace_using_temp(&tmp_file) .to_dest(&::std::env::current_exe()?)?; Ok(()) } # fn main() { } ``` */ extern crate serde_json; extern crate reqwest; extern crate tempdir; extern crate flate2; extern crate tar; extern crate semver; extern crate pbr; pub use tempdir::TempDir; use std::fs; use std::io; use std::path; #[macro_use] mod macros; pub mod errors; pub mod backends; pub mod version; use errors::*; /// Try to determine the current target triple. /// /// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an /// `Error::Config` if the current config cannot be determined or is not some combination of the /// following values: /// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc` /// /// * Errors: /// * Unexpected system config pub fn get_target() -> Result<String> { let arch_config = (cfg!(target_arch = "x86"), cfg!(target_arch = "x86_64"), cfg!(target_arch = "arm")); let arch = match arch_config { (true, _, _) => "i686", (_, true, _) => "x86_64", (_, _, true) => "armv7", _ => bail!(Error::Update, "Unable to determine target-architecture"), }; let os_config = (cfg!(target_os = "linux"), cfg!(target_os = "macos"), cfg!(target_os = "windows")); let os = match os_config { (true, _, _) => "unknown-linux", (_, true, _) => "apple-darwin", (_, _, true) => "pc-windows", _ => bail!(Error::Update, "Unable to determine target-os"), }; let s; let os = if cfg!(target_os = "macos") { os } else { let env_config = (cfg!(target_env = "gnu"), cfg!(target_env = "musl"), cfg!(target_env = "msvc")); let env = match env_config { (true, _, _) => "gnu", (_, true, _) => "musl", (_, _, true) => "msvc", _ => bail!(Error::Update, "Unable to determine target-environment"), }; s = format!("{}-{}", os, env); &s }; Ok(format!("{}-{}", arch, os)) } /// Check if a version tag is greater than the current #[deprecated(since="0.4.2", note="`should_update` functionality has been moved to `version::bump_is_greater`.\ `version::bump_is_compatible` should be used instead.")] pub fn should_update(current: &str, latest: &str) -> Result<bool> { use semver::Version; Ok(Version::parse(latest)? > Version::parse(current)?) } /// Flush a message to stdout and check if they respond `yes`. /// Interprets a blank response as yes. /// /// * Errors: /// * Io flushing /// * User entered anything other than enter/Y/y fn confirm(msg: &str) -> Result<()> { print_flush!("{}", msg); let mut s = String::new(); io::stdin().read_line(&mut s)?; let s = s.trim().to_lowercase(); if ! s.is_empty() && s != "y" { bail!(Error::Update, "Update aborted"); } Ok(()) } /// Status returned after updating /// /// Wrapped `String`s are version tags #[derive(Debug, Clone)] pub enum Status { UpToDate(String), Updated(String), } impl Status { /// Return the version tag pub fn version(&self) -> &str { use Status::*; match *self { UpToDate(ref s) => s, Updated(ref s) => s, } } /// Returns `true` if `Status::UpToDate` pub fn uptodate(&self) -> bool { match *self { Status::UpToDate(_) => true, _ => false, } } /// Returns `true` if `Status::Updated` pub fn updated(&self) -> bool { match *self { Status::Updated(_) => true, _ => false, } } } impl std::fmt::Display for Status { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { use Status::*; match *self { UpToDate(ref s) => write!(f, "UpToDate({})", s), Updated(ref s) => write!(f, "Updated({})", s), } } } /// Supported archive formats #[derive(Debug)] pub enum ArchiveKind { Tar, Plain, } /// Supported encoding formats #[derive(Debug)] pub enum EncodingKind { Gz, Plain, } /// Extract contents of an encoded archive (e.g. tar.gz) file to a specified directory /// /// * Errors: /// * Io - opening files /// * Io - gzip decoding /// * Io - archive unpacking #[derive(Debug)] pub struct Extract<'a> { source: &'a path::Path, archive: ArchiveKind, encoding: EncodingKind, } impl<'a> Extract<'a> { pub fn from_source(source: &'a path::Path) -> Extract<'a> { Self { source: source, archive: ArchiveKind::Plain, encoding: EncodingKind::Plain, } } pub fn archive(&mut self, kind: ArchiveKind) -> &mut Self { self.archive = kind; self } pub fn encoding(&mut self, kind: EncodingKind) -> &mut Self { self.encoding = kind; self } pub fn
(&self, into_dir: &path::Path) -> Result<()> { let source = fs::File::open(self.source)?; let archive: Box<io::Read> = match self.encoding { EncodingKind::Plain => Box::new(source), EncodingKind::Gz => { let reader = flate2::read::GzDecoder::new(source); Box::new(reader) }, }; match self.archive { ArchiveKind::Plain => (), ArchiveKind::Tar => { let mut archive = tar::Archive::new(archive); archive.unpack(into_dir)?; } }; Ok(()) } } /// Moves a file from the given path to the specified destination. /// /// `source` and `dest` must be on the same filesystem. /// If `replace_using_temp` is provided, the destination file will be /// replaced using the given temp path as a backup in case of `io` errors. /// /// * Errors: /// * Io - copying / renaming #[derive(Debug)] pub struct Move<'a> { source: &'a path::Path, temp: Option<&'a path::Path>, } impl<'a> Move<'a> { /// Specify source file pub fn from_source(source: &'a path::Path) -> Move<'a> { Self { source: source, temp: None, } } /// If specified and the destination file already exists, the destination /// file will be "safely" replaced using a temp path. /// The `temp` dir should must be explicitly provided since `replace` operations require /// files to live on the same filesystem. pub fn replace_using_temp(&mut self, temp: &'a path::Path) -> &mut Self { self.temp = Some(temp); self } /// Move source file to specified destination pub fn to_dest(&self, dest: &path::Path) -> Result<()> { match self.temp { None => { fs::rename(self.source, dest)?; } Some(temp) => { if dest.exists() { fs::rename(dest, temp)?; match fs::rename(self.source, dest) { Err(e) => { fs::rename(temp, dest)?; return Err(Error::from(e)) } Ok(_) => (), }; } else { fs::rename(self.source, dest)?; } } }; Ok(()) } } /// Download things into files /// /// With optional progress bar #[derive(Debug)] pub struct Download { show_progress: bool, url: String, } impl Download { /// Specify download url pub fn from_url(url: &str) -> Self { Self { show_progress: false, url: url.to_owned(), } } /// Toggle download progress bar pub fn show_progress(&mut self, b: bool) -> &mut Self { self.show_progress = b; self } /// Download the file behind the given `url` into the specified `dest`. /// Show a sliding progress bar if specified. /// If the resource doesn't specify a content-length, the progress bar will not be shown /// /// * Errors: /// * `reqwest` network errors /// * Unsuccessful response status /// * Progress-bar errors /// * Reading from response to `BufReader`-buffer /// * Writing from `BufReader`-buffer to `File` pub fn download_to<T: io::Write>(&self, mut dest: T) -> Result<()> { use io::BufRead; set_ssl_vars!(); let resp = reqwest::get(&self.url)?; let size = resp.headers() .get::<reqwest::header::ContentLength>() .map(|ct_len| **ct_len) .unwrap_or(0); if !resp.status().is_success() { bail!(Error::Update, "Download request failed with status: {:?}", resp.status()) } let show_progress = if size == 0 { false } else { self.show_progress }; let mut src = io::BufReader::new(resp); let mut bar = if show_progress { let mut bar = pbr::ProgressBar::new(size); bar.set_units(pbr::Units::Bytes); bar.format("[=> ]"); Some(bar) } else { None }; loop { let n = { let mut buf = src.fill_buf()?; dest.write_all(&mut buf)?; buf.len() }; if n == 0 { break; } src.consume(n); if let Some(ref mut bar) = bar { bar.add(n as u64); } } if show_progress { println!(" ... Done"); } Ok(()) } } #[cfg(test)] mod tests { use super::*; use std::env; #[test] fn can_determine_target_arch() { let target = get_target(); assert!(target.is_ok(), "{:?}", target); let target = target.unwrap(); if let Ok(env_target) = env::var("TARGET") { assert_eq!(target, env_target); } } }
extract_into
identifier_name
lib.rs
/*! [![Build status](https://ci.appveyor.com/api/projects/status/xlkq8rd73cla4ixw/branch/master?svg=true)](https://ci.appveyor.com/project/jaemk/self-update/branch/master) [![Build Status](https://travis-ci.org/jaemk/self_update.svg?branch=master)](https://travis-ci.org/jaemk/self_update) [![crates.io:clin](https://img.shields.io/crates/v/self_update.svg?label=self_update)](https://crates.io/crates/self_update) [![docs](https://docs.rs/self_update/badge.svg)](https://docs.rs/self_update) `self_update` provides updaters for updating rust executables in-place from various release distribution backends. ```shell self_update = "0.4" ``` ## Usage Update (replace) the current executable with the latest release downloaded from `https://api.github.com/repos/jaemk/self_update/releases/latest`. Note, the [`trust`](https://github.com/japaric/trust) project provides a nice setup for producing release-builds via CI (travis/appveyor). ``` #[macro_use] extern crate self_update; fn update() -> Result<(), Box<::std::error::Error>> { let target = self_update::get_target()?; let status = self_update::backends::github::Update::configure()? .repo_owner("jaemk") .repo_name("self_update") .target(&target) .bin_name("self_update_example") .show_download_progress(true) .current_version(cargo_crate_version!()) .build()? .update()?; println!("Update status: `{}`!", status.version()); Ok(()) } # fn main() { } ``` Run the above example to see `self_update` in action: `cargo run --example github` Separate utilities are also exposed: ``` extern crate self_update; fn update() -> Result<(), Box<::std::error::Error>> { let target = self_update::get_target()?; let releases = self_update::backends::github::ReleaseList::configure() .repo_owner("jaemk") .repo_name("self_update") .with_target(&target) .build()? .fetch()?; println!("found releases:"); println!("{:#?}\n", releases); // get the first available release let asset = releases[0] .asset_for(&target).unwrap(); let tmp_dir = self_update::TempDir::new_in(::std::env::current_dir()?, "self_update")?; let tmp_tarball_path = tmp_dir.path().join(&asset.name); let tmp_tarball = ::std::fs::File::open(&tmp_tarball_path)?; self_update::Download::from_url(&asset.download_url) .download_to(&tmp_tarball)?; self_update::Extract::from_source(&tmp_tarball_path) .archive(self_update::ArchiveKind::Tar) .encoding(self_update::EncodingKind::Gz) .extract_into(&tmp_dir.path())?; let tmp_file = tmp_dir.path().join("replacement_tmp"); let bin_name = "self_update_bin"; let bin_path = tmp_dir.path().join(bin_name); self_update::Move::from_source(&bin_path) .replace_using_temp(&tmp_file) .to_dest(&::std::env::current_exe()?)?; Ok(()) } # fn main() { } ``` */ extern crate serde_json; extern crate reqwest; extern crate tempdir; extern crate flate2; extern crate tar; extern crate semver; extern crate pbr; pub use tempdir::TempDir; use std::fs; use std::io; use std::path; #[macro_use] mod macros; pub mod errors; pub mod backends; pub mod version; use errors::*; /// Try to determine the current target triple. /// /// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an /// `Error::Config` if the current config cannot be determined or is not some combination of the /// following values: /// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc` /// /// * Errors: /// * Unexpected system config pub fn get_target() -> Result<String> { let arch_config = (cfg!(target_arch = "x86"), cfg!(target_arch = "x86_64"), cfg!(target_arch = "arm")); let arch = match arch_config { (true, _, _) => "i686", (_, true, _) => "x86_64", (_, _, true) => "armv7", _ => bail!(Error::Update, "Unable to determine target-architecture"), }; let os_config = (cfg!(target_os = "linux"), cfg!(target_os = "macos"), cfg!(target_os = "windows")); let os = match os_config { (true, _, _) => "unknown-linux", (_, true, _) => "apple-darwin", (_, _, true) => "pc-windows", _ => bail!(Error::Update, "Unable to determine target-os"), }; let s; let os = if cfg!(target_os = "macos") { os } else { let env_config = (cfg!(target_env = "gnu"), cfg!(target_env = "musl"), cfg!(target_env = "msvc")); let env = match env_config { (true, _, _) => "gnu", (_, true, _) => "musl", (_, _, true) => "msvc", _ => bail!(Error::Update, "Unable to determine target-environment"), }; s = format!("{}-{}", os, env); &s }; Ok(format!("{}-{}", arch, os)) } /// Check if a version tag is greater than the current #[deprecated(since="0.4.2", note="`should_update` functionality has been moved to `version::bump_is_greater`.\ `version::bump_is_compatible` should be used instead.")] pub fn should_update(current: &str, latest: &str) -> Result<bool> { use semver::Version; Ok(Version::parse(latest)? > Version::parse(current)?) } /// Flush a message to stdout and check if they respond `yes`. /// Interprets a blank response as yes. /// /// * Errors: /// * Io flushing /// * User entered anything other than enter/Y/y fn confirm(msg: &str) -> Result<()> { print_flush!("{}", msg); let mut s = String::new(); io::stdin().read_line(&mut s)?; let s = s.trim().to_lowercase(); if ! s.is_empty() && s != "y" { bail!(Error::Update, "Update aborted"); } Ok(()) } /// Status returned after updating /// /// Wrapped `String`s are version tags #[derive(Debug, Clone)] pub enum Status { UpToDate(String), Updated(String), } impl Status { /// Return the version tag pub fn version(&self) -> &str { use Status::*; match *self { UpToDate(ref s) => s, Updated(ref s) => s, } } /// Returns `true` if `Status::UpToDate` pub fn uptodate(&self) -> bool { match *self { Status::UpToDate(_) => true,
pub fn updated(&self) -> bool { match *self { Status::Updated(_) => true, _ => false, } } } impl std::fmt::Display for Status { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { use Status::*; match *self { UpToDate(ref s) => write!(f, "UpToDate({})", s), Updated(ref s) => write!(f, "Updated({})", s), } } } /// Supported archive formats #[derive(Debug)] pub enum ArchiveKind { Tar, Plain, } /// Supported encoding formats #[derive(Debug)] pub enum EncodingKind { Gz, Plain, } /// Extract contents of an encoded archive (e.g. tar.gz) file to a specified directory /// /// * Errors: /// * Io - opening files /// * Io - gzip decoding /// * Io - archive unpacking #[derive(Debug)] pub struct Extract<'a> { source: &'a path::Path, archive: ArchiveKind, encoding: EncodingKind, } impl<'a> Extract<'a> { pub fn from_source(source: &'a path::Path) -> Extract<'a> { Self { source: source, archive: ArchiveKind::Plain, encoding: EncodingKind::Plain, } } pub fn archive(&mut self, kind: ArchiveKind) -> &mut Self { self.archive = kind; self } pub fn encoding(&mut self, kind: EncodingKind) -> &mut Self { self.encoding = kind; self } pub fn extract_into(&self, into_dir: &path::Path) -> Result<()> { let source = fs::File::open(self.source)?; let archive: Box<io::Read> = match self.encoding { EncodingKind::Plain => Box::new(source), EncodingKind::Gz => { let reader = flate2::read::GzDecoder::new(source); Box::new(reader) }, }; match self.archive { ArchiveKind::Plain => (), ArchiveKind::Tar => { let mut archive = tar::Archive::new(archive); archive.unpack(into_dir)?; } }; Ok(()) } } /// Moves a file from the given path to the specified destination. /// /// `source` and `dest` must be on the same filesystem. /// If `replace_using_temp` is provided, the destination file will be /// replaced using the given temp path as a backup in case of `io` errors. /// /// * Errors: /// * Io - copying / renaming #[derive(Debug)] pub struct Move<'a> { source: &'a path::Path, temp: Option<&'a path::Path>, } impl<'a> Move<'a> { /// Specify source file pub fn from_source(source: &'a path::Path) -> Move<'a> { Self { source: source, temp: None, } } /// If specified and the destination file already exists, the destination /// file will be "safely" replaced using a temp path. /// The `temp` dir should must be explicitly provided since `replace` operations require /// files to live on the same filesystem. pub fn replace_using_temp(&mut self, temp: &'a path::Path) -> &mut Self { self.temp = Some(temp); self } /// Move source file to specified destination pub fn to_dest(&self, dest: &path::Path) -> Result<()> { match self.temp { None => { fs::rename(self.source, dest)?; } Some(temp) => { if dest.exists() { fs::rename(dest, temp)?; match fs::rename(self.source, dest) { Err(e) => { fs::rename(temp, dest)?; return Err(Error::from(e)) } Ok(_) => (), }; } else { fs::rename(self.source, dest)?; } } }; Ok(()) } } /// Download things into files /// /// With optional progress bar #[derive(Debug)] pub struct Download { show_progress: bool, url: String, } impl Download { /// Specify download url pub fn from_url(url: &str) -> Self { Self { show_progress: false, url: url.to_owned(), } } /// Toggle download progress bar pub fn show_progress(&mut self, b: bool) -> &mut Self { self.show_progress = b; self } /// Download the file behind the given `url` into the specified `dest`. /// Show a sliding progress bar if specified. /// If the resource doesn't specify a content-length, the progress bar will not be shown /// /// * Errors: /// * `reqwest` network errors /// * Unsuccessful response status /// * Progress-bar errors /// * Reading from response to `BufReader`-buffer /// * Writing from `BufReader`-buffer to `File` pub fn download_to<T: io::Write>(&self, mut dest: T) -> Result<()> { use io::BufRead; set_ssl_vars!(); let resp = reqwest::get(&self.url)?; let size = resp.headers() .get::<reqwest::header::ContentLength>() .map(|ct_len| **ct_len) .unwrap_or(0); if !resp.status().is_success() { bail!(Error::Update, "Download request failed with status: {:?}", resp.status()) } let show_progress = if size == 0 { false } else { self.show_progress }; let mut src = io::BufReader::new(resp); let mut bar = if show_progress { let mut bar = pbr::ProgressBar::new(size); bar.set_units(pbr::Units::Bytes); bar.format("[=> ]"); Some(bar) } else { None }; loop { let n = { let mut buf = src.fill_buf()?; dest.write_all(&mut buf)?; buf.len() }; if n == 0 { break; } src.consume(n); if let Some(ref mut bar) = bar { bar.add(n as u64); } } if show_progress { println!(" ... Done"); } Ok(()) } } #[cfg(test)] mod tests { use super::*; use std::env; #[test] fn can_determine_target_arch() { let target = get_target(); assert!(target.is_ok(), "{:?}", target); let target = target.unwrap(); if let Ok(env_target) = env::var("TARGET") { assert_eq!(target, env_target); } } }
_ => false, } } /// Returns `true` if `Status::Updated`
random_line_split
lib.rs
/*! [![Build status](https://ci.appveyor.com/api/projects/status/xlkq8rd73cla4ixw/branch/master?svg=true)](https://ci.appveyor.com/project/jaemk/self-update/branch/master) [![Build Status](https://travis-ci.org/jaemk/self_update.svg?branch=master)](https://travis-ci.org/jaemk/self_update) [![crates.io:clin](https://img.shields.io/crates/v/self_update.svg?label=self_update)](https://crates.io/crates/self_update) [![docs](https://docs.rs/self_update/badge.svg)](https://docs.rs/self_update) `self_update` provides updaters for updating rust executables in-place from various release distribution backends. ```shell self_update = "0.4" ``` ## Usage Update (replace) the current executable with the latest release downloaded from `https://api.github.com/repos/jaemk/self_update/releases/latest`. Note, the [`trust`](https://github.com/japaric/trust) project provides a nice setup for producing release-builds via CI (travis/appveyor). ``` #[macro_use] extern crate self_update; fn update() -> Result<(), Box<::std::error::Error>> { let target = self_update::get_target()?; let status = self_update::backends::github::Update::configure()? .repo_owner("jaemk") .repo_name("self_update") .target(&target) .bin_name("self_update_example") .show_download_progress(true) .current_version(cargo_crate_version!()) .build()? .update()?; println!("Update status: `{}`!", status.version()); Ok(()) } # fn main() { } ``` Run the above example to see `self_update` in action: `cargo run --example github` Separate utilities are also exposed: ``` extern crate self_update; fn update() -> Result<(), Box<::std::error::Error>> { let target = self_update::get_target()?; let releases = self_update::backends::github::ReleaseList::configure() .repo_owner("jaemk") .repo_name("self_update") .with_target(&target) .build()? .fetch()?; println!("found releases:"); println!("{:#?}\n", releases); // get the first available release let asset = releases[0] .asset_for(&target).unwrap(); let tmp_dir = self_update::TempDir::new_in(::std::env::current_dir()?, "self_update")?; let tmp_tarball_path = tmp_dir.path().join(&asset.name); let tmp_tarball = ::std::fs::File::open(&tmp_tarball_path)?; self_update::Download::from_url(&asset.download_url) .download_to(&tmp_tarball)?; self_update::Extract::from_source(&tmp_tarball_path) .archive(self_update::ArchiveKind::Tar) .encoding(self_update::EncodingKind::Gz) .extract_into(&tmp_dir.path())?; let tmp_file = tmp_dir.path().join("replacement_tmp"); let bin_name = "self_update_bin"; let bin_path = tmp_dir.path().join(bin_name); self_update::Move::from_source(&bin_path) .replace_using_temp(&tmp_file) .to_dest(&::std::env::current_exe()?)?; Ok(()) } # fn main() { } ``` */ extern crate serde_json; extern crate reqwest; extern crate tempdir; extern crate flate2; extern crate tar; extern crate semver; extern crate pbr; pub use tempdir::TempDir; use std::fs; use std::io; use std::path; #[macro_use] mod macros; pub mod errors; pub mod backends; pub mod version; use errors::*; /// Try to determine the current target triple. /// /// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an /// `Error::Config` if the current config cannot be determined or is not some combination of the /// following values: /// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc` /// /// * Errors: /// * Unexpected system config pub fn get_target() -> Result<String> { let arch_config = (cfg!(target_arch = "x86"), cfg!(target_arch = "x86_64"), cfg!(target_arch = "arm")); let arch = match arch_config { (true, _, _) => "i686", (_, true, _) => "x86_64", (_, _, true) => "armv7", _ => bail!(Error::Update, "Unable to determine target-architecture"), }; let os_config = (cfg!(target_os = "linux"), cfg!(target_os = "macos"), cfg!(target_os = "windows")); let os = match os_config { (true, _, _) => "unknown-linux", (_, true, _) => "apple-darwin", (_, _, true) => "pc-windows", _ => bail!(Error::Update, "Unable to determine target-os"), }; let s; let os = if cfg!(target_os = "macos") { os } else { let env_config = (cfg!(target_env = "gnu"), cfg!(target_env = "musl"), cfg!(target_env = "msvc")); let env = match env_config { (true, _, _) => "gnu", (_, true, _) => "musl", (_, _, true) => "msvc", _ => bail!(Error::Update, "Unable to determine target-environment"), }; s = format!("{}-{}", os, env); &s }; Ok(format!("{}-{}", arch, os)) } /// Check if a version tag is greater than the current #[deprecated(since="0.4.2", note="`should_update` functionality has been moved to `version::bump_is_greater`.\ `version::bump_is_compatible` should be used instead.")] pub fn should_update(current: &str, latest: &str) -> Result<bool> { use semver::Version; Ok(Version::parse(latest)? > Version::parse(current)?) } /// Flush a message to stdout and check if they respond `yes`. /// Interprets a blank response as yes. /// /// * Errors: /// * Io flushing /// * User entered anything other than enter/Y/y fn confirm(msg: &str) -> Result<()> { print_flush!("{}", msg); let mut s = String::new(); io::stdin().read_line(&mut s)?; let s = s.trim().to_lowercase(); if ! s.is_empty() && s != "y" { bail!(Error::Update, "Update aborted"); } Ok(()) } /// Status returned after updating /// /// Wrapped `String`s are version tags #[derive(Debug, Clone)] pub enum Status { UpToDate(String), Updated(String), } impl Status { /// Return the version tag pub fn version(&self) -> &str { use Status::*; match *self { UpToDate(ref s) => s, Updated(ref s) => s, } } /// Returns `true` if `Status::UpToDate` pub fn uptodate(&self) -> bool { match *self { Status::UpToDate(_) => true, _ => false, } } /// Returns `true` if `Status::Updated` pub fn updated(&self) -> bool { match *self { Status::Updated(_) => true, _ => false, } } } impl std::fmt::Display for Status { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { use Status::*; match *self { UpToDate(ref s) => write!(f, "UpToDate({})", s), Updated(ref s) => write!(f, "Updated({})", s), } } } /// Supported archive formats #[derive(Debug)] pub enum ArchiveKind { Tar, Plain, } /// Supported encoding formats #[derive(Debug)] pub enum EncodingKind { Gz, Plain, } /// Extract contents of an encoded archive (e.g. tar.gz) file to a specified directory /// /// * Errors: /// * Io - opening files /// * Io - gzip decoding /// * Io - archive unpacking #[derive(Debug)] pub struct Extract<'a> { source: &'a path::Path, archive: ArchiveKind, encoding: EncodingKind, } impl<'a> Extract<'a> { pub fn from_source(source: &'a path::Path) -> Extract<'a> { Self { source: source, archive: ArchiveKind::Plain, encoding: EncodingKind::Plain, } } pub fn archive(&mut self, kind: ArchiveKind) -> &mut Self { self.archive = kind; self } pub fn encoding(&mut self, kind: EncodingKind) -> &mut Self { self.encoding = kind; self } pub fn extract_into(&self, into_dir: &path::Path) -> Result<()> { let source = fs::File::open(self.source)?; let archive: Box<io::Read> = match self.encoding { EncodingKind::Plain => Box::new(source), EncodingKind::Gz => { let reader = flate2::read::GzDecoder::new(source); Box::new(reader) }, }; match self.archive { ArchiveKind::Plain => (), ArchiveKind::Tar => { let mut archive = tar::Archive::new(archive); archive.unpack(into_dir)?; } }; Ok(()) } } /// Moves a file from the given path to the specified destination. /// /// `source` and `dest` must be on the same filesystem. /// If `replace_using_temp` is provided, the destination file will be /// replaced using the given temp path as a backup in case of `io` errors. /// /// * Errors: /// * Io - copying / renaming #[derive(Debug)] pub struct Move<'a> { source: &'a path::Path, temp: Option<&'a path::Path>, } impl<'a> Move<'a> { /// Specify source file pub fn from_source(source: &'a path::Path) -> Move<'a> { Self { source: source, temp: None, } } /// If specified and the destination file already exists, the destination /// file will be "safely" replaced using a temp path. /// The `temp` dir should must be explicitly provided since `replace` operations require /// files to live on the same filesystem. pub fn replace_using_temp(&mut self, temp: &'a path::Path) -> &mut Self { self.temp = Some(temp); self } /// Move source file to specified destination pub fn to_dest(&self, dest: &path::Path) -> Result<()>
} /// Download things into files /// /// With optional progress bar #[derive(Debug)] pub struct Download { show_progress: bool, url: String, } impl Download { /// Specify download url pub fn from_url(url: &str) -> Self { Self { show_progress: false, url: url.to_owned(), } } /// Toggle download progress bar pub fn show_progress(&mut self, b: bool) -> &mut Self { self.show_progress = b; self } /// Download the file behind the given `url` into the specified `dest`. /// Show a sliding progress bar if specified. /// If the resource doesn't specify a content-length, the progress bar will not be shown /// /// * Errors: /// * `reqwest` network errors /// * Unsuccessful response status /// * Progress-bar errors /// * Reading from response to `BufReader`-buffer /// * Writing from `BufReader`-buffer to `File` pub fn download_to<T: io::Write>(&self, mut dest: T) -> Result<()> { use io::BufRead; set_ssl_vars!(); let resp = reqwest::get(&self.url)?; let size = resp.headers() .get::<reqwest::header::ContentLength>() .map(|ct_len| **ct_len) .unwrap_or(0); if !resp.status().is_success() { bail!(Error::Update, "Download request failed with status: {:?}", resp.status()) } let show_progress = if size == 0 { false } else { self.show_progress }; let mut src = io::BufReader::new(resp); let mut bar = if show_progress { let mut bar = pbr::ProgressBar::new(size); bar.set_units(pbr::Units::Bytes); bar.format("[=> ]"); Some(bar) } else { None }; loop { let n = { let mut buf = src.fill_buf()?; dest.write_all(&mut buf)?; buf.len() }; if n == 0 { break; } src.consume(n); if let Some(ref mut bar) = bar { bar.add(n as u64); } } if show_progress { println!(" ... Done"); } Ok(()) } } #[cfg(test)] mod tests { use super::*; use std::env; #[test] fn can_determine_target_arch() { let target = get_target(); assert!(target.is_ok(), "{:?}", target); let target = target.unwrap(); if let Ok(env_target) = env::var("TARGET") { assert_eq!(target, env_target); } } }
{ match self.temp { None => { fs::rename(self.source, dest)?; } Some(temp) => { if dest.exists() { fs::rename(dest, temp)?; match fs::rename(self.source, dest) { Err(e) => { fs::rename(temp, dest)?; return Err(Error::from(e)) } Ok(_) => (), }; } else { fs::rename(self.source, dest)?; } } }; Ok(()) }
identifier_body
sample.app.js
/** * Project : UCMS( Unified Contents Messaging Solution ) * * Copyright (c) 2013, 2014 FREECORE, Inc. All rights reserved. * * @author dbongman */ define( [ "BaroAppBase", "BaroProps", "Logger", "osapi" ] , function(BaroAppBase, BaroProps, Logger, osapi) { var baroappTrackingId = "UA-46722680-5"; var barowebTrackingId = "UA-46722680-1"; var baroappVer = "1.1.3"; var COOKER_CON_ID = "cooker"; var TestSessionChecker = function() { $.ajax( { type: 'GET', url: "http://localhost:8084/sessionChecker", cache: false, crossDomain: true }); setTimeout(TestSessionChecker, 60000); }; var reloadApp = function() { if(UCMS.SPA.isAppOS() == true) { var cs = osapi.getModule("CoreService"); cs.reset(); } else { //UCMS.reloadPage("#!reset"); window.location.reload(); } }; var BaangApp = BaroAppBase.extend( { _param : null, onAjaxError: function(jqXHR, textStatus) { Logger.debug("onAjaxError() - textStatus : "+textStatus); $("body").css("overflow","auto"); var self = this; UCMS.hideLoading(); if( jqXHR.status == 401 || jqXHR.status == 403 ) { Logger.error("[digestError] Unauthorized access. status : "+jqXHR.status); var msg; var user = BaroProps.getUser(); if( typeof user.md5pwd == "string" ) { msg = "세션이 만료되었습니다.<br>자동 연장합니다."; UCMS.showPrompt(msg); this.initSession().then(function() { self.initUniMeAgent() .then( function() { return self._initBadge(); } , function(err) { Logger.error("onAjaxError() - Failed to initialize the unime agent by error : "+JSON.stringify(err)); BaroAppBase.restartApp(); } ) .then(function() { UCMS.reloadPage("#!reset"); }) .always(function() { UCMS.hidePrompt(); }); } , function() { UCMS.hidePrompt(); UCMS.alert("세션을 복구하지 못했습니다.<br>잠시 후 다시 이용해 주세요.<br>감사합니다.") .then( function() { BaroProps.setUser(); BaroProps.setSessionParams(); reloadApp(); } ); }); } else { msg = "일정시간이 지나 안전을 위해 로그아웃 되었습니다.<br>다시 사용하시려면 로그인 해주세요."; UCMS.alert(msg).then ( function() { BaroProps.setUser(); BaroProps.setSessionParams(); reloadApp(); } ); } } else { Logger.error("[digestError] Error Code : "+jqXHR.status+", Message : "+jqXHR.responseText); UCMS.alert("서버와 통신 중 오류["+textStatus+","+jqXHR.status+"]가 발생하였습니다.<br>잠시 후 다시 시도해주세요.<br>이용에 불편을 드려 죄송합니다!") .then( function() { reloadApp(); }); } return true; } , onInitializeBefore: function(options) { UCMS.log("onInitializeBefore()"); this._appInfo = options.baangapp || {}; this._param = options; /** * PC 브라우저 백 버튼 으로 페이지 이동시 잠긴상태의 스크롤을 해제한다. * * */ if( UCMS.SPA.isAppOS() == false ) { window.onhashchange = function() { $("body").css("overflow", "auto"); }; }; // XXX 401 발생시 처리 테스트 코드 //setTimeout(TestSessionChecker, 10000); // this.initApplication ( options, { home_body : "modules/app/cooker/widgets/home/home-0.8.1.html", home : "modules/app/cooker/widgets/home/home-0.8.1", login_body : "modules/widgets/sign/login-0.8.1.html", login : "modules/widgets/sign/login-0.8.1", member_body : "modules/widgets/sign/member-0.8.1.html", member : "modules/widgets/sign/member-0.8.1", youtube_body : "modules/widgets/youtube/youtube-0.8.1.html", youtube : "modules/widgets/youtube/youtube-0.8.1", publicdata_body : "modules/widgets/opendata/publicData-0.8.1.html", publicdata : "modules/widgets/opendata/publicData-0.8.1", selectcity_body : "modules/widgets/opendata/selectCity-0.8.1.html", selectcity : "modules/widgets/opendata/selectCity-0.8.1", detailToiletInfo_body : "modules/widgets/opendata/detailToiletInfo-0.8.1.html", detailToiletInfo : "modules/widgets/opendata/detailToiletInfo-0.8.1", searchResult_body : "modules/widgets/youtube/searchResult-0.8.1.html", searchResult : "modules/widgets/youtube/searchResult-0.8.1", //modules을 어디에다가 정의해야하는지 몰라서 우선 여기에다 //youtube modules AuthYoutube : "modules/widgets/youtube/api/AuthYoutube", youtubeToken : "modules/widgets/youtube/models/youtubePagetokenModel", requestApi : "modules/widgets/youtube/api/requestApi", uploadApi : "modules/widgets/youtube/api/uploadApi", //toilet modules toilet : "modules/widgets/opendata/api/toiletInfo", toiletDetailInfo : "modules/widgets/opendata/model/toiletDetailInfo", } , UCMS.getRootPath() ); }, _initRoute: function( options ) { var self = this; this._route = new (Backbone.Marionette.AppRouter.extend( { routes: { "": "doHome", "home": "doHome", "!login" : "doLogin", "!join" : "doJoin", "up_join": "upJoin", "!member":"doMemberConfirm", "!youtube": "doYoutube", "!publicdata": "doPublic", "!selectCity": "doSelectcity", "!detailInfo": "doDetailInfo", "!likedVideolist": "doChkLiked", "!searchResult": "doSearch", }, onRoute: function( name, path, route ) { UCMSPlatform.log("Routing : "+name+", path: "+path+", route: "+route); var panelTag = Backbone.history.getFragment(); if( panelTag ) { Logger.debug("Tracking Tag : "+panelTag); Logger.debug("self._tracker : "+self._tracker); if(self._tracker != null) self._tracker.trackingView( panelTag ); } }, doHome: function() { UCMSPlatform.log("apps doHome()"); self._setPanel("doHome"); } , doLogin: function() { UCMSPlatform.log("apps doLogin()"); self._setPanel("doLogin"); } , doMemberConfirm : function() { UCMSPlatform.log("apps doMemberConfirm()"); self._setPanel("doMemberConfirm"); } , doYoutube: function() { UCMSPlatform.log("apps doYoutube()"); self._setPanel("doYoutube"); } , doPublic: function() { UCMSPlatform.log("apps doPublic()"); self._setPanel("doPublic"); } , doSelectcity: function(){ UCMSPlatform.log("apps doSelectcity()"); self._setPanel("doSelectcity"); } , doDetailInfo: function(){ UCMSPlatform.log("apps doSelectcity()"); self._setPanel("doDetailInfo"); }, doSearch: function(){ self._setPanel("doSearch"); } } )); }, _initUI: function( options ) { Logger.info("_initUI options " + JSON.stringify(options)); Logger.info(" UCMS.SPA.isDesktop() " + UCMS.SPA.isDesktop()); Logger.info("UCMS.SPA.isAppOS() " + UCMS.SPA.isAppOS()); if( UCMS.SPA.isDesktop() == false && UCMS.SPA.isAppOS() == false ) { // // 모바일에서 브라우저로 진입한 경우, // 앱으로 전환할 수 있는 영역을 확보한다. // $("body").append("<div class=switcher_region/><div class=body_region/>"); this.addRegions( { switcher: ".switcher_region", body: ".body_region" }); } else { this.addRegions( { body: options.bodyTag }); } // 웹뷰 높이 적용 UCMS.adjustViewHeight($("body")); // 웹 팝업 이벤트 가로채기 BaroAppBase.hookingHyperLink( options.bodyTag, "web:open" ); if( UCMS.SPA.isAppOS() == true && UCMS.SPA.isAndroid() == false ) { UCMS.initFixedHandler("input"); } Logger.info("_initUI options end " ); }, _setPanel: function( moduleName, p_type, container_id, title, item_id ) { /* var thePanel = UCMSPlatform.SPA.AppMain.createInstance( moduleName ); if( thePanel ) { this._showFrame( thePanel ); return; } */ UCMSPlatform.log("Loading a Panel!"); var self = this; if(p_type != undefined) self._param.type = p_type; if( moduleName === "doHome" ) { self._moduleLoading("home"); } else if( moduleName === "doHome" ) { self._moduleLoading("home"); } else if( moduleName === "doLogin" ) { self._moduleLoading("login"); } else if( moduleName === "doMemberConfirm" ) { self._moduleLoading("member"); } else if( moduleName === "doYoutube" ) { self._moduleLoading("youtube"); } else if( moduleName === "doPublic") { self._moduleLoading("publicdata"); } else if( moduleName === "doSelectcity") { self._moduleLoading("selectcity"); } else if( moduleName === "doDetailInfo") { self._moduleLoading("detailToiletInfo"); } else if( moduleName === "doSearch") { self._moduleLoading("searchResult"); } }, _showFrame: function(framePanel) { this.body.show( framePanel );
_moduleLoading : function(moduleName, action, id, title, item_id){ var self = this; require([ moduleName ], function(klass) { var selfPanel = new klass({parentView : self, mode: action, container_id : id, title : title, item_id : item_id}); self._showFrame( selfPanel ); }); } , onStart: function() { Logger.debug("Cooker.onStart() - begin"); Backbone.history.start({ silent: true }); this._route.navigate( "home", true ); Logger.debug("Cooker.onStart() - end"); } , goStartPage : function() { /**TODO * 리플레쉬시 현재페이지 유지 테스트코드. * * */ //if( this._param.startPage == undefined || location.href.indexOf("#home") > 0 ) if( location.href.indexOf("#home") > 0 ) { Backbone.history.start(); } else { var user = BaroProps.getUser(); var startPage = "#home"; } } , setBrowserComatible : function(){ UCMS.log("setBrowserCompablity==="); if(navigator.userAgent.indexOf("MSIE 9.0;") > 0){ $("head").append('<style type="text/css"> #iconCanvasPreview { width: 100% !important; height: 450px }</style>'); } } , openSwitcher : function() { var self = this; if(this.switcher != undefined) { // // 모바일에서 브라우저로 전근하는 경우 switcher region 이 추가된다. // 이런 경우 switcher 모듈을 활성화한다. // require([ "SwitcherPanel" ], function(SwitcherPanel) { self.switcher.show( new SwitcherPanel() ); // 시작파일에 담겨있는 초기 로딩 제거 // 앱이 생성되면서 대체하지 못하기 때문에 수동으로 제거 $("body > .loading_box").remove(); }); } } , closeSwitcher : function() { if( this.switcher != undefined ) { this.switcher.close(); } } , _initBadge: function() { if( this._unimeClient == null ) { Logger.info("_initBadge() - unime client is null."); return; } Logger.info("_initBadge() - Current User : "+this._unimeClient.get("user").id); // TODO 사용자가 바뀐경우 화면 전환시 항상 UniMe Token 이 다시 설정된다. 그러므로 _unimeClient 에는 언제나 최신 사용자 정보가 설정된 상태이다. 별도의 정보 갱신 절차 없이 API 를 호출한다. return this._unimeClient.getUnreadCount(); } }); return BaangApp; });
},
random_line_split
mod.rs
mod default_types; mod jsont; mod stats; use crate::cache::Digest; use crate::process::ShellCommand; use anyhow::Result; use once_cell::sync::Lazy; use std::borrow::Cow; use std::collections::HashMap; use std::convert::TryFrom; use std::ops::Range; use std::path::{Path, PathBuf}; use std::process::Command; use utils::display_width; pub use self::jsont::{Match, Message, SubMatch}; pub static RG_EXISTS: Lazy<bool> = Lazy::new(|| { std::process::Command::new("rg") .arg("--version") .stdout(std::process::Stdio::null()) .status() .map(|exit_status| exit_status.success()) .unwrap_or(false) }); /// Map of file extension to ripgrep language. /// /// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| { default_types::DEFAULT_TYPES .iter() .flat_map(|(lang, values)| { values.iter().filter_map(|v| { v.split('.').last().and_then(|ext| { // Simply ignore the abnormal cases. if ext.contains('[') || ext.contains('*') { None } else { Some((ext, *lang)) } }) }) }) .collect() }); /// Finds the ripgrep language given the file extension `ext`. pub fn get_language(file_extension: &str) -> Option<&&str> { RG_LANGUAGE_EXT_TABLE.get(file_extension) } /// Word represents the input query around by word boundries. #[derive(Clone, Debug)] pub struct Word { pub raw: String, pub len: usize, pub re: regex::Regex, } impl Word { pub fn new(re_word: String, re: regex::Regex) -> Word { Self { len: re_word.len(), raw: re_word, re, } } pub fn find(&self, line: &str) -> Option<usize> { self.re.find(line).map(|mat| mat.start()) } } #[inline] fn range(start: usize, end: usize, offset: usize) -> Range<usize> { start + offset..end + offset } impl SubMatch { pub fn match_indices(&self, offset: usize) -> Range<usize> { range(self.start, self.end, offset) } // FIXME find the word in non-utf8? pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> { // The text in SubMatch is not exactly the search word itself in some cases, // we need to first find the offset of search word in the SubMatch text manually. match search_word.find(&self.m.text()) { Some(search_word_offset) => { let start = self.start + search_word_offset; range(start, start + search_word.len, offset) } None => Default::default(), } } } impl PartialEq for Match { fn eq(&self, other: &Match) -> bool { // Ignore the `submatches` field. // // Given a certain search word, if all the other fields are same, especially the // `absolute_offset` equals, these two Match can be considered the same. self.path == other.path && self.lines == other.lines && self.line_number == other.line_number && self.absolute_offset == other.absolute_offset } } impl Eq for Match {} impl Match { pub fn path(&self) -> Cow<str> { self.path.text() } pub fn line_number(&self) -> u64 { self.line_number.unwrap_or_default() } pub fn column(&self) -> usize { self.submatches.get(0).map(|x| x.start).unwrap_or_default() } /// Returns true if the text line starts with `pat`. pub fn line_starts_with(&self, pat: &str) -> bool { self.lines.text().trim_start().starts_with(pat) } pub fn match_indices(&self, offset: usize) -> Vec<usize> { self.submatches .iter() .flat_map(|s| s.match_indices(offset)) .collect() } pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> { self.submatches .iter() .flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word)) .collect() } } impl TryFrom<&[u8]> for Match { type Error = Cow<'static, str>; fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> { let msg = serde_json::from_slice::<Message>(byte_line) .map_err(|e| format!("deserialize error: {e:?}"))?; if let Message::Match(mat) = msg { Ok(mat) } else { Err("Not Message::Match type".into()) } } } impl TryFrom<&str> for Match { type Error = Cow<'static, str>; fn try_from(line: &str) -> Result<Self, Self::Error> { let msg = serde_json::from_str::<Message>(line) .map_err(|e| format!("deserialize error: {e:?}"))?; if let Message::Match(mat) = msg { Ok(mat) } else { Err("Not Message::Match type".into()) } } } impl Match { /// Returns a pair of the formatted `String` and the offset of origin match indices. /// /// The formatted String is same with the output line using rg's -vimgrep option. fn grep_line_format(&self, enable_icon: bool) -> (String, usize) { let path = self.path(); let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); // filepath:line_number:column:text, 3 extra `:` in the formatted String. let mut offset = path.len() + display_width(line_number as usize) + display_width(column) + 3; let formatted_line = if enable_icon { let icon = icon::file_icon(&path); offset += icon.len_utf8() + 1; format!("{icon} {path}:{line_number}:{column}:{pattern}") } else { format!("{path}:{line_number}:{column}:{pattern}") }; (formatted_line, offset) } pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) { let (formatted, offset) = self.grep_line_format(enable_icon); let indices = self.match_indices(offset); (formatted, indices) } #[inline] pub fn pattern(&self) -> Cow<str> { self.lines.text() } pub fn pattern_priority(&self) -> dumb_analyzer::Priority { self.path() .rsplit_once('.') .and_then(|(_, file_ext)| { dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext) }) .unwrap_or_default() } /// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider. /// /// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed. fn jump_line_format(&self, kind: &str) -> (String, usize) { let path = self.path(); let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",); let offset = kind.len() + path.len() + display_width(line_number as usize) + display_width(column) + 6; // `[r]` + 3 `:` (formatted_line, offset) } pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) { let (formatted, offset) = self.jump_line_format(kind); let indices = self.match_indices_for_dumb_jump(offset, word); (formatted, indices) } fn jump_line_format_bare(&self) -> (String, usize) { let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); let formatted_string = format!(" {line_number}:{column}:{pattern}"); let offset = display_width(line_number as usize) + display_width(column) + 2 + 2; (formatted_string, offset) } pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) { let (formatted, offset) = self.jump_line_format_bare(); let indices = self.match_indices_for_dumb_jump(offset, word); (formatted, indices) } } const RG_ARGS: &[&str] = &[ "rg", "--column", "--line-number", "--no-heading", "--color=never", "--smart-case", "", ".", ]; // Ref https://github.com/liuchengxu/vim-clap/issues/533 // Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711. pub const RG_EXEC_CMD: &str = "rg --column --line-number --no-heading --color=never --smart-case '' ."; // Used for creating the cache in async context. #[derive(Debug, Clone, Hash)] pub struct RgTokioCommand { shell_cmd: ShellCommand, } impl RgTokioCommand { pub fn new(dir: PathBuf) -> Self { let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir); Self { shell_cmd } } pub fn cache_digest(&self) -> Option<Digest> { self.shell_cmd.cache_digest() } pub async fn create_cache(self) -> Result<Digest> { let cache_file = self.shell_cmd.cache_file_path()?; let std_cmd = rg_command(&self.shell_cmd.dir); let mut tokio_cmd = tokio::process::Command::from(std_cmd); crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?; let digest = crate::cache::store_cache_digest(self.shell_cmd.clone(), cache_file)?; Ok(digest) } } pub fn rg_command<P: AsRef<Path>>(dir: P) -> Command { // Can not use StdCommand as it joins the args which does not work somehow. let mut cmd = Command::new(RG_ARGS[0]); // Do not use --vimgrep here. cmd.args(&RG_ARGS[1..]).current_dir(dir); cmd } pub fn refresh_cache(dir: impl AsRef<Path>) -> Result<Digest> { let shell_cmd = rg_shell_command(dir.as_ref()); let cache_file_path = shell_cmd.cache_file_path()?; let mut cmd = rg_command(dir.as_ref()); crate::process::write_stdout_to_file(&mut cmd, &cache_file_path)?; let digest = crate::cache::store_cache_digest(shell_cmd, cache_file_path)?; Ok(digest) } #[inline] pub fn rg_shell_command<P: AsRef<Path>>(dir: P) -> ShellCommand
{ ShellCommand::new(RG_EXEC_CMD.into(), PathBuf::from(dir.as_ref())) }
identifier_body
mod.rs
mod default_types; mod jsont; mod stats; use crate::cache::Digest; use crate::process::ShellCommand; use anyhow::Result; use once_cell::sync::Lazy; use std::borrow::Cow; use std::collections::HashMap; use std::convert::TryFrom; use std::ops::Range; use std::path::{Path, PathBuf}; use std::process::Command; use utils::display_width; pub use self::jsont::{Match, Message, SubMatch}; pub static RG_EXISTS: Lazy<bool> = Lazy::new(|| { std::process::Command::new("rg") .arg("--version") .stdout(std::process::Stdio::null()) .status() .map(|exit_status| exit_status.success()) .unwrap_or(false) }); /// Map of file extension to ripgrep language. /// /// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| { default_types::DEFAULT_TYPES .iter() .flat_map(|(lang, values)| { values.iter().filter_map(|v| { v.split('.').last().and_then(|ext| { // Simply ignore the abnormal cases. if ext.contains('[') || ext.contains('*') { None } else { Some((ext, *lang)) } }) }) }) .collect() }); /// Finds the ripgrep language given the file extension `ext`. pub fn get_language(file_extension: &str) -> Option<&&str> { RG_LANGUAGE_EXT_TABLE.get(file_extension) } /// Word represents the input query around by word boundries. #[derive(Clone, Debug)] pub struct Word { pub raw: String, pub len: usize,
pub re: regex::Regex, } impl Word { pub fn new(re_word: String, re: regex::Regex) -> Word { Self { len: re_word.len(), raw: re_word, re, } } pub fn find(&self, line: &str) -> Option<usize> { self.re.find(line).map(|mat| mat.start()) } } #[inline] fn range(start: usize, end: usize, offset: usize) -> Range<usize> { start + offset..end + offset } impl SubMatch { pub fn match_indices(&self, offset: usize) -> Range<usize> { range(self.start, self.end, offset) } // FIXME find the word in non-utf8? pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> { // The text in SubMatch is not exactly the search word itself in some cases, // we need to first find the offset of search word in the SubMatch text manually. match search_word.find(&self.m.text()) { Some(search_word_offset) => { let start = self.start + search_word_offset; range(start, start + search_word.len, offset) } None => Default::default(), } } } impl PartialEq for Match { fn eq(&self, other: &Match) -> bool { // Ignore the `submatches` field. // // Given a certain search word, if all the other fields are same, especially the // `absolute_offset` equals, these two Match can be considered the same. self.path == other.path && self.lines == other.lines && self.line_number == other.line_number && self.absolute_offset == other.absolute_offset } } impl Eq for Match {} impl Match { pub fn path(&self) -> Cow<str> { self.path.text() } pub fn line_number(&self) -> u64 { self.line_number.unwrap_or_default() } pub fn column(&self) -> usize { self.submatches.get(0).map(|x| x.start).unwrap_or_default() } /// Returns true if the text line starts with `pat`. pub fn line_starts_with(&self, pat: &str) -> bool { self.lines.text().trim_start().starts_with(pat) } pub fn match_indices(&self, offset: usize) -> Vec<usize> { self.submatches .iter() .flat_map(|s| s.match_indices(offset)) .collect() } pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> { self.submatches .iter() .flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word)) .collect() } } impl TryFrom<&[u8]> for Match { type Error = Cow<'static, str>; fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> { let msg = serde_json::from_slice::<Message>(byte_line) .map_err(|e| format!("deserialize error: {e:?}"))?; if let Message::Match(mat) = msg { Ok(mat) } else { Err("Not Message::Match type".into()) } } } impl TryFrom<&str> for Match { type Error = Cow<'static, str>; fn try_from(line: &str) -> Result<Self, Self::Error> { let msg = serde_json::from_str::<Message>(line) .map_err(|e| format!("deserialize error: {e:?}"))?; if let Message::Match(mat) = msg { Ok(mat) } else { Err("Not Message::Match type".into()) } } } impl Match { /// Returns a pair of the formatted `String` and the offset of origin match indices. /// /// The formatted String is same with the output line using rg's -vimgrep option. fn grep_line_format(&self, enable_icon: bool) -> (String, usize) { let path = self.path(); let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); // filepath:line_number:column:text, 3 extra `:` in the formatted String. let mut offset = path.len() + display_width(line_number as usize) + display_width(column) + 3; let formatted_line = if enable_icon { let icon = icon::file_icon(&path); offset += icon.len_utf8() + 1; format!("{icon} {path}:{line_number}:{column}:{pattern}") } else { format!("{path}:{line_number}:{column}:{pattern}") }; (formatted_line, offset) } pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) { let (formatted, offset) = self.grep_line_format(enable_icon); let indices = self.match_indices(offset); (formatted, indices) } #[inline] pub fn pattern(&self) -> Cow<str> { self.lines.text() } pub fn pattern_priority(&self) -> dumb_analyzer::Priority { self.path() .rsplit_once('.') .and_then(|(_, file_ext)| { dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext) }) .unwrap_or_default() } /// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider. /// /// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed. fn jump_line_format(&self, kind: &str) -> (String, usize) { let path = self.path(); let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",); let offset = kind.len() + path.len() + display_width(line_number as usize) + display_width(column) + 6; // `[r]` + 3 `:` (formatted_line, offset) } pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) { let (formatted, offset) = self.jump_line_format(kind); let indices = self.match_indices_for_dumb_jump(offset, word); (formatted, indices) } fn jump_line_format_bare(&self) -> (String, usize) { let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); let formatted_string = format!(" {line_number}:{column}:{pattern}"); let offset = display_width(line_number as usize) + display_width(column) + 2 + 2; (formatted_string, offset) } pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) { let (formatted, offset) = self.jump_line_format_bare(); let indices = self.match_indices_for_dumb_jump(offset, word); (formatted, indices) } } const RG_ARGS: &[&str] = &[ "rg", "--column", "--line-number", "--no-heading", "--color=never", "--smart-case", "", ".", ]; // Ref https://github.com/liuchengxu/vim-clap/issues/533 // Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711. pub const RG_EXEC_CMD: &str = "rg --column --line-number --no-heading --color=never --smart-case '' ."; // Used for creating the cache in async context. #[derive(Debug, Clone, Hash)] pub struct RgTokioCommand { shell_cmd: ShellCommand, } impl RgTokioCommand { pub fn new(dir: PathBuf) -> Self { let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir); Self { shell_cmd } } pub fn cache_digest(&self) -> Option<Digest> { self.shell_cmd.cache_digest() } pub async fn create_cache(self) -> Result<Digest> { let cache_file = self.shell_cmd.cache_file_path()?; let std_cmd = rg_command(&self.shell_cmd.dir); let mut tokio_cmd = tokio::process::Command::from(std_cmd); crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?; let digest = crate::cache::store_cache_digest(self.shell_cmd.clone(), cache_file)?; Ok(digest) } } pub fn rg_command<P: AsRef<Path>>(dir: P) -> Command { // Can not use StdCommand as it joins the args which does not work somehow. let mut cmd = Command::new(RG_ARGS[0]); // Do not use --vimgrep here. cmd.args(&RG_ARGS[1..]).current_dir(dir); cmd } pub fn refresh_cache(dir: impl AsRef<Path>) -> Result<Digest> { let shell_cmd = rg_shell_command(dir.as_ref()); let cache_file_path = shell_cmd.cache_file_path()?; let mut cmd = rg_command(dir.as_ref()); crate::process::write_stdout_to_file(&mut cmd, &cache_file_path)?; let digest = crate::cache::store_cache_digest(shell_cmd, cache_file_path)?; Ok(digest) } #[inline] pub fn rg_shell_command<P: AsRef<Path>>(dir: P) -> ShellCommand { ShellCommand::new(RG_EXEC_CMD.into(), PathBuf::from(dir.as_ref())) }
random_line_split
mod.rs
mod default_types; mod jsont; mod stats; use crate::cache::Digest; use crate::process::ShellCommand; use anyhow::Result; use once_cell::sync::Lazy; use std::borrow::Cow; use std::collections::HashMap; use std::convert::TryFrom; use std::ops::Range; use std::path::{Path, PathBuf}; use std::process::Command; use utils::display_width; pub use self::jsont::{Match, Message, SubMatch}; pub static RG_EXISTS: Lazy<bool> = Lazy::new(|| { std::process::Command::new("rg") .arg("--version") .stdout(std::process::Stdio::null()) .status() .map(|exit_status| exit_status.success()) .unwrap_or(false) }); /// Map of file extension to ripgrep language. /// /// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| { default_types::DEFAULT_TYPES .iter() .flat_map(|(lang, values)| { values.iter().filter_map(|v| { v.split('.').last().and_then(|ext| { // Simply ignore the abnormal cases. if ext.contains('[') || ext.contains('*') { None } else { Some((ext, *lang)) } }) }) }) .collect() }); /// Finds the ripgrep language given the file extension `ext`. pub fn get_language(file_extension: &str) -> Option<&&str> { RG_LANGUAGE_EXT_TABLE.get(file_extension) } /// Word represents the input query around by word boundries. #[derive(Clone, Debug)] pub struct Word { pub raw: String, pub len: usize, pub re: regex::Regex, } impl Word { pub fn new(re_word: String, re: regex::Regex) -> Word { Self { len: re_word.len(), raw: re_word, re, } } pub fn find(&self, line: &str) -> Option<usize> { self.re.find(line).map(|mat| mat.start()) } } #[inline] fn range(start: usize, end: usize, offset: usize) -> Range<usize> { start + offset..end + offset } impl SubMatch { pub fn match_indices(&self, offset: usize) -> Range<usize> { range(self.start, self.end, offset) } // FIXME find the word in non-utf8? pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> { // The text in SubMatch is not exactly the search word itself in some cases, // we need to first find the offset of search word in the SubMatch text manually. match search_word.find(&self.m.text()) { Some(search_word_offset) => { let start = self.start + search_word_offset; range(start, start + search_word.len, offset) } None => Default::default(), } } } impl PartialEq for Match { fn eq(&self, other: &Match) -> bool { // Ignore the `submatches` field. // // Given a certain search word, if all the other fields are same, especially the // `absolute_offset` equals, these two Match can be considered the same. self.path == other.path && self.lines == other.lines && self.line_number == other.line_number && self.absolute_offset == other.absolute_offset } } impl Eq for Match {} impl Match { pub fn path(&self) -> Cow<str> { self.path.text() } pub fn line_number(&self) -> u64 { self.line_number.unwrap_or_default() } pub fn column(&self) -> usize { self.submatches.get(0).map(|x| x.start).unwrap_or_default() } /// Returns true if the text line starts with `pat`. pub fn line_starts_with(&self, pat: &str) -> bool { self.lines.text().trim_start().starts_with(pat) } pub fn match_indices(&self, offset: usize) -> Vec<usize> { self.submatches .iter() .flat_map(|s| s.match_indices(offset)) .collect() } pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> { self.submatches .iter() .flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word)) .collect() } } impl TryFrom<&[u8]> for Match { type Error = Cow<'static, str>; fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> { let msg = serde_json::from_slice::<Message>(byte_line) .map_err(|e| format!("deserialize error: {e:?}"))?; if let Message::Match(mat) = msg { Ok(mat) } else { Err("Not Message::Match type".into()) } } } impl TryFrom<&str> for Match { type Error = Cow<'static, str>; fn try_from(line: &str) -> Result<Self, Self::Error> { let msg = serde_json::from_str::<Message>(line) .map_err(|e| format!("deserialize error: {e:?}"))?; if let Message::Match(mat) = msg { Ok(mat) } else
} } impl Match { /// Returns a pair of the formatted `String` and the offset of origin match indices. /// /// The formatted String is same with the output line using rg's -vimgrep option. fn grep_line_format(&self, enable_icon: bool) -> (String, usize) { let path = self.path(); let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); // filepath:line_number:column:text, 3 extra `:` in the formatted String. let mut offset = path.len() + display_width(line_number as usize) + display_width(column) + 3; let formatted_line = if enable_icon { let icon = icon::file_icon(&path); offset += icon.len_utf8() + 1; format!("{icon} {path}:{line_number}:{column}:{pattern}") } else { format!("{path}:{line_number}:{column}:{pattern}") }; (formatted_line, offset) } pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) { let (formatted, offset) = self.grep_line_format(enable_icon); let indices = self.match_indices(offset); (formatted, indices) } #[inline] pub fn pattern(&self) -> Cow<str> { self.lines.text() } pub fn pattern_priority(&self) -> dumb_analyzer::Priority { self.path() .rsplit_once('.') .and_then(|(_, file_ext)| { dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext) }) .unwrap_or_default() } /// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider. /// /// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed. fn jump_line_format(&self, kind: &str) -> (String, usize) { let path = self.path(); let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",); let offset = kind.len() + path.len() + display_width(line_number as usize) + display_width(column) + 6; // `[r]` + 3 `:` (formatted_line, offset) } pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) { let (formatted, offset) = self.jump_line_format(kind); let indices = self.match_indices_for_dumb_jump(offset, word); (formatted, indices) } fn jump_line_format_bare(&self) -> (String, usize) { let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); let formatted_string = format!(" {line_number}:{column}:{pattern}"); let offset = display_width(line_number as usize) + display_width(column) + 2 + 2; (formatted_string, offset) } pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) { let (formatted, offset) = self.jump_line_format_bare(); let indices = self.match_indices_for_dumb_jump(offset, word); (formatted, indices) } } const RG_ARGS: &[&str] = &[ "rg", "--column", "--line-number", "--no-heading", "--color=never", "--smart-case", "", ".", ]; // Ref https://github.com/liuchengxu/vim-clap/issues/533 // Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711. pub const RG_EXEC_CMD: &str = "rg --column --line-number --no-heading --color=never --smart-case '' ."; // Used for creating the cache in async context. #[derive(Debug, Clone, Hash)] pub struct RgTokioCommand { shell_cmd: ShellCommand, } impl RgTokioCommand { pub fn new(dir: PathBuf) -> Self { let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir); Self { shell_cmd } } pub fn cache_digest(&self) -> Option<Digest> { self.shell_cmd.cache_digest() } pub async fn create_cache(self) -> Result<Digest> { let cache_file = self.shell_cmd.cache_file_path()?; let std_cmd = rg_command(&self.shell_cmd.dir); let mut tokio_cmd = tokio::process::Command::from(std_cmd); crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?; let digest = crate::cache::store_cache_digest(self.shell_cmd.clone(), cache_file)?; Ok(digest) } } pub fn rg_command<P: AsRef<Path>>(dir: P) -> Command { // Can not use StdCommand as it joins the args which does not work somehow. let mut cmd = Command::new(RG_ARGS[0]); // Do not use --vimgrep here. cmd.args(&RG_ARGS[1..]).current_dir(dir); cmd } pub fn refresh_cache(dir: impl AsRef<Path>) -> Result<Digest> { let shell_cmd = rg_shell_command(dir.as_ref()); let cache_file_path = shell_cmd.cache_file_path()?; let mut cmd = rg_command(dir.as_ref()); crate::process::write_stdout_to_file(&mut cmd, &cache_file_path)?; let digest = crate::cache::store_cache_digest(shell_cmd, cache_file_path)?; Ok(digest) } #[inline] pub fn rg_shell_command<P: AsRef<Path>>(dir: P) -> ShellCommand { ShellCommand::new(RG_EXEC_CMD.into(), PathBuf::from(dir.as_ref())) }
{ Err("Not Message::Match type".into()) }
conditional_block
mod.rs
mod default_types; mod jsont; mod stats; use crate::cache::Digest; use crate::process::ShellCommand; use anyhow::Result; use once_cell::sync::Lazy; use std::borrow::Cow; use std::collections::HashMap; use std::convert::TryFrom; use std::ops::Range; use std::path::{Path, PathBuf}; use std::process::Command; use utils::display_width; pub use self::jsont::{Match, Message, SubMatch}; pub static RG_EXISTS: Lazy<bool> = Lazy::new(|| { std::process::Command::new("rg") .arg("--version") .stdout(std::process::Stdio::null()) .status() .map(|exit_status| exit_status.success()) .unwrap_or(false) }); /// Map of file extension to ripgrep language. /// /// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| { default_types::DEFAULT_TYPES .iter() .flat_map(|(lang, values)| { values.iter().filter_map(|v| { v.split('.').last().and_then(|ext| { // Simply ignore the abnormal cases. if ext.contains('[') || ext.contains('*') { None } else { Some((ext, *lang)) } }) }) }) .collect() }); /// Finds the ripgrep language given the file extension `ext`. pub fn get_language(file_extension: &str) -> Option<&&str> { RG_LANGUAGE_EXT_TABLE.get(file_extension) } /// Word represents the input query around by word boundries. #[derive(Clone, Debug)] pub struct Word { pub raw: String, pub len: usize, pub re: regex::Regex, } impl Word { pub fn new(re_word: String, re: regex::Regex) -> Word { Self { len: re_word.len(), raw: re_word, re, } } pub fn find(&self, line: &str) -> Option<usize> { self.re.find(line).map(|mat| mat.start()) } } #[inline] fn range(start: usize, end: usize, offset: usize) -> Range<usize> { start + offset..end + offset } impl SubMatch { pub fn match_indices(&self, offset: usize) -> Range<usize> { range(self.start, self.end, offset) } // FIXME find the word in non-utf8? pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> { // The text in SubMatch is not exactly the search word itself in some cases, // we need to first find the offset of search word in the SubMatch text manually. match search_word.find(&self.m.text()) { Some(search_word_offset) => { let start = self.start + search_word_offset; range(start, start + search_word.len, offset) } None => Default::default(), } } } impl PartialEq for Match { fn eq(&self, other: &Match) -> bool { // Ignore the `submatches` field. // // Given a certain search word, if all the other fields are same, especially the // `absolute_offset` equals, these two Match can be considered the same. self.path == other.path && self.lines == other.lines && self.line_number == other.line_number && self.absolute_offset == other.absolute_offset } } impl Eq for Match {} impl Match { pub fn path(&self) -> Cow<str> { self.path.text() } pub fn line_number(&self) -> u64 { self.line_number.unwrap_or_default() } pub fn column(&self) -> usize { self.submatches.get(0).map(|x| x.start).unwrap_or_default() } /// Returns true if the text line starts with `pat`. pub fn line_starts_with(&self, pat: &str) -> bool { self.lines.text().trim_start().starts_with(pat) } pub fn match_indices(&self, offset: usize) -> Vec<usize> { self.submatches .iter() .flat_map(|s| s.match_indices(offset)) .collect() } pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> { self.submatches .iter() .flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word)) .collect() } } impl TryFrom<&[u8]> for Match { type Error = Cow<'static, str>; fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> { let msg = serde_json::from_slice::<Message>(byte_line) .map_err(|e| format!("deserialize error: {e:?}"))?; if let Message::Match(mat) = msg { Ok(mat) } else { Err("Not Message::Match type".into()) } } } impl TryFrom<&str> for Match { type Error = Cow<'static, str>; fn try_from(line: &str) -> Result<Self, Self::Error> { let msg = serde_json::from_str::<Message>(line) .map_err(|e| format!("deserialize error: {e:?}"))?; if let Message::Match(mat) = msg { Ok(mat) } else { Err("Not Message::Match type".into()) } } } impl Match { /// Returns a pair of the formatted `String` and the offset of origin match indices. /// /// The formatted String is same with the output line using rg's -vimgrep option. fn grep_line_format(&self, enable_icon: bool) -> (String, usize) { let path = self.path(); let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); // filepath:line_number:column:text, 3 extra `:` in the formatted String. let mut offset = path.len() + display_width(line_number as usize) + display_width(column) + 3; let formatted_line = if enable_icon { let icon = icon::file_icon(&path); offset += icon.len_utf8() + 1; format!("{icon} {path}:{line_number}:{column}:{pattern}") } else { format!("{path}:{line_number}:{column}:{pattern}") }; (formatted_line, offset) } pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) { let (formatted, offset) = self.grep_line_format(enable_icon); let indices = self.match_indices(offset); (formatted, indices) } #[inline] pub fn pattern(&self) -> Cow<str> { self.lines.text() } pub fn
(&self) -> dumb_analyzer::Priority { self.path() .rsplit_once('.') .and_then(|(_, file_ext)| { dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext) }) .unwrap_or_default() } /// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider. /// /// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed. fn jump_line_format(&self, kind: &str) -> (String, usize) { let path = self.path(); let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",); let offset = kind.len() + path.len() + display_width(line_number as usize) + display_width(column) + 6; // `[r]` + 3 `:` (formatted_line, offset) } pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) { let (formatted, offset) = self.jump_line_format(kind); let indices = self.match_indices_for_dumb_jump(offset, word); (formatted, indices) } fn jump_line_format_bare(&self) -> (String, usize) { let line_number = self.line_number(); let column = self.column(); let pattern = self.pattern(); let pattern = pattern.trim_end(); let formatted_string = format!(" {line_number}:{column}:{pattern}"); let offset = display_width(line_number as usize) + display_width(column) + 2 + 2; (formatted_string, offset) } pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) { let (formatted, offset) = self.jump_line_format_bare(); let indices = self.match_indices_for_dumb_jump(offset, word); (formatted, indices) } } const RG_ARGS: &[&str] = &[ "rg", "--column", "--line-number", "--no-heading", "--color=never", "--smart-case", "", ".", ]; // Ref https://github.com/liuchengxu/vim-clap/issues/533 // Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711. pub const RG_EXEC_CMD: &str = "rg --column --line-number --no-heading --color=never --smart-case '' ."; // Used for creating the cache in async context. #[derive(Debug, Clone, Hash)] pub struct RgTokioCommand { shell_cmd: ShellCommand, } impl RgTokioCommand { pub fn new(dir: PathBuf) -> Self { let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir); Self { shell_cmd } } pub fn cache_digest(&self) -> Option<Digest> { self.shell_cmd.cache_digest() } pub async fn create_cache(self) -> Result<Digest> { let cache_file = self.shell_cmd.cache_file_path()?; let std_cmd = rg_command(&self.shell_cmd.dir); let mut tokio_cmd = tokio::process::Command::from(std_cmd); crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?; let digest = crate::cache::store_cache_digest(self.shell_cmd.clone(), cache_file)?; Ok(digest) } } pub fn rg_command<P: AsRef<Path>>(dir: P) -> Command { // Can not use StdCommand as it joins the args which does not work somehow. let mut cmd = Command::new(RG_ARGS[0]); // Do not use --vimgrep here. cmd.args(&RG_ARGS[1..]).current_dir(dir); cmd } pub fn refresh_cache(dir: impl AsRef<Path>) -> Result<Digest> { let shell_cmd = rg_shell_command(dir.as_ref()); let cache_file_path = shell_cmd.cache_file_path()?; let mut cmd = rg_command(dir.as_ref()); crate::process::write_stdout_to_file(&mut cmd, &cache_file_path)?; let digest = crate::cache::store_cache_digest(shell_cmd, cache_file_path)?; Ok(digest) } #[inline] pub fn rg_shell_command<P: AsRef<Path>>(dir: P) -> ShellCommand { ShellCommand::new(RG_EXEC_CMD.into(), PathBuf::from(dir.as_ref())) }
pattern_priority
identifier_name
tls_accept.rs
#![cfg(test)] // These are basically integration tests for the `connection` submodule, but // they cannot be "real" integration tests because `connection` isn't a public // interface and because `connection` exposes a `#[cfg(test)]`-only API for use // by these tests. use linkerd2_error::Never; use linkerd2_identity::{test_util, CrtKey, Name}; use linkerd2_proxy_core::listen::{Accept, Bind as _Bind, Listen as CoreListen}; use linkerd2_proxy_transport::tls::{ self, accept::{AcceptTls, Connection as ServerConnection}, client::Connection as ClientConnection, Conditional, }; use linkerd2_proxy_transport::{connect, Bind, Listen}; use std::{net::SocketAddr, sync::mpsc}; use tokio::{self, io, prelude::*}; use tower::{layer::Layer, ServiceExt}; use tower_util::service_fn; #[test] fn plaintext() { let (client_result, server_result) = run_test( Conditional::None(tls::ReasonForNoIdentity::Disabled), |conn| write_then_read(conn, PING), Conditional::None(tls::ReasonForNoIdentity::Disabled), |(_, conn)| read_then_write(conn, PING.len(), PONG), ); assert_eq!(client_result.is_tls(), false); assert_eq!(&client_result.result.expect("pong")[..], PONG); assert_eq!(server_result.is_tls(), false); assert_eq!(&server_result.result.expect("ping")[..], PING); } #[test] fn proxy_to_proxy_tls_works() { let server_tls = test_util::FOO_NS1.validate().unwrap(); let client_tls = test_util::BAR_NS1.validate().unwrap(); let (client_result, server_result) = run_test( Conditional::Some((client_tls, server_tls.tls_server_name())), |conn| write_then_read(conn, PING), Conditional::Some(server_tls), |(_, conn)| read_then_write(conn, PING.len(), PONG), ); assert_eq!(client_result.is_tls(), true); assert_eq!(&client_result.result.expect("pong")[..], PONG); assert_eq!(server_result.is_tls(), true); assert_eq!(&server_result.result.expect("ping")[..], PING); } #[test] fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() { let server_tls = test_util::FOO_NS1.validate().unwrap(); // Misuse the client's identity instead of the server's identity. Any // identity other than `server_tls.server_identity` would work. let client_tls = test_util::BAR_NS1.validate().expect("valid client cert"); let client_target = test_util::BAR_NS1.crt().name().clone(); let (client_result, server_result) = run_test( Conditional::Some((client_tls, client_target)), |conn| write_then_read(conn, PING), Conditional::Some(server_tls), |(_, conn)| read_then_write(conn, START_OF_TLS.len(), PONG), ); // The server's connection will succeed with the TLS client hello passed // through, because the SNI doesn't match its identity. assert_eq!(client_result.is_tls(), false); assert!(client_result.result.is_err()); assert_eq!(server_result.is_tls(), false); assert_eq!(&server_result.result.unwrap()[..], START_OF_TLS); } struct Transported<R> { /// The value of `Connection::peer_identity()` for the established connection. /// /// This will be `None` if we never even get a `Connection`. peer_identity: Option<tls::PeerIdentity>, /// The connection's result. result: Result<R, io::Error>, } impl<R> Transported<R> { fn is_tls(&self) -> bool { self.peer_identity .as_ref() .map(|i| i.is_some()) .unwrap_or(false) } } /// Runs a test for a single TCP connection. `client` processes the connection /// on the client side and `server` processes the connection on the server /// side. fn run_test<C, CF, CR, S, SF, SR>( client_tls: tls::Conditional<(CrtKey, Name)>, client: C, server_tls: tls::Conditional<CrtKey>, server: S, ) -> (Transported<CR>, Transported<SR>) where // Client C: FnOnce(ClientConnection) -> CF + Clone + Send + 'static, CF: Future<Item = CR, Error = io::Error> + Send + 'static, CR: Send + 'static, // Server S: Fn(ServerConnection) -> SF + Clone + Send + 'static, SF: Future<Item = SR, Error = io::Error> + Send + 'static, SR: Send + 'static, { { use tracing_subscriber::{fmt, EnvFilter}; let sub = fmt::Subscriber::builder() .with_env_filter(EnvFilter::from_default_env()) .finish(); let _ = tracing::subscriber::set_global_default(sub); } let (client_tls, client_target_name) = match client_tls { Conditional::Some((crtkey, name)) => ( Conditional::Some(ClientTls(crtkey)), Conditional::Some(name), ), Conditional::None(reason) => (Conditional::None(reason.clone()), Conditional::None(reason)), }; // A future that will receive a single connection. let (server, server_addr, server_result) = { // Saves the result of every connection. let (sender, receiver) = mpsc::channel::<Transported<SR>>(); // Let the OS decide the port number and then return the resulting // `SocketAddr` so the client can connect to it. This allows multiple // tests to run at once, which wouldn't work if they all were bound on // a fixed port. let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap(); let listen = Bind::new(addr, None).bind().expect("must bind"); let listen_addr = listen.listen_addr(); let sender = service_fn(move |(meta, conn): ServerConnection| { let sender = sender.clone(); let peer_identity = Some(meta.peer_identity.clone()); let server = Box::new(server((meta, conn)).then(move |result| { sender .send(Transported { peer_identity, result, }) .expect("send result"); future::ok::<(), Never>(()) })); Box::new(future::ok::<_, Never>(server)) }); let accept = AcceptTls::new(server_tls, sender); let server = Server::Init { listen, accept }; (server, listen_addr, receiver) }; // A future that will open a single connection to the server. let (client, client_result) = { // Saves the result of the single connection. This could be a simpler // type, e.g. `Arc<Mutex>`, but using a channel simplifies the code and // parallels the server side. let (sender, receiver) = mpsc::channel::<Transported<CR>>(); let sender_clone = sender.clone(); let peer_identity = Some(client_target_name.clone()); let client = tls::ConnectLayer::new(client_tls) .layer(connect::Connect::new(None)) .oneshot(Target(server_addr, client_target_name)) .map_err(move |e| { sender_clone .send(Transported { peer_identity: None, result: Err(e), }) .expect("send result"); () }) .and_then(move |conn| { client(conn).then(move |result| { sender .send(Transported { peer_identity, result, }) .expect("send result"); Ok(()) }) }); (client, receiver) }; tokio::run(server.join(client).map(|_| ())); let client_result = client_result.try_recv().expect("client complete"); // XXX: This assumes that only one connection is accepted. TODO: allow the // caller to observe the results for every connection, once we have tests // that allow accepting multiple connections. let server_result = server_result.try_recv().expect("server complete"); (client_result, server_result) } /// Writes `to_write` and shuts down the write side, then reads until EOF, /// returning the bytes read. fn write_then_read( conn: impl AsyncRead + AsyncWrite, to_write: &'static [u8], ) -> impl Future<Item = Vec<u8>, Error = io::Error> { write_and_shutdown(conn, to_write) .and_then(|conn| io::read_to_end(conn, Vec::new())) .map(|(_conn, r)| r) } /// Reads until EOF then writes `to_write` and shuts down the write side, /// returning the bytes read. fn read_then_write( conn: impl AsyncRead + AsyncWrite, read_prefix_len: usize, to_write: &'static [u8], ) -> impl Future<Item = Vec<u8>, Error = io::Error> { io::read_exact(conn, vec![0; read_prefix_len]) .and_then(move |(conn, r)| write_and_shutdown(conn, to_write).map(|_conn| r)) } /// writes `to_write` to `conn` and then shuts down the write side of `conn`. fn write_and_shutdown<T: AsyncRead + AsyncWrite>( conn: T, to_write: &'static [u8], ) -> impl Future<Item = T, Error = io::Error> { io::write_all(conn, to_write).and_then(|(mut conn, _)| { conn.shutdown()?; Ok(conn) }) } const PING: &[u8] = b"ping"; const PONG: &[u8] = b"pong"; const START_OF_TLS: &[u8] = &[22, 3, 1]; // ContentType::handshake version 3.1 enum Server<A: Accept<ServerConnection>> where
}, Accepting(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::Future), Serving(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::ConnectionFuture), } #[derive(Clone)] struct Target(SocketAddr, Conditional<Name>); #[derive(Clone)] struct ClientTls(CrtKey); impl<A: Accept<ServerConnection> + Clone> Future for Server<A> { type Item = (); type Error = (); fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { *self = match self { Server::Init { ref mut listen, ref mut accept, } => { match Accept::poll_ready(accept) { Ok(Async::Ready(())) => {} Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => panic!("accept failed"), } let conn = match listen.poll_accept() { Ok(Async::Ready(conn)) => conn, Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => panic!("listener failed"), }; Server::Accepting(accept.accept(conn)) } Server::Accepting(ref mut fut) => match fut.poll() { Ok(Async::Ready(conn_future)) => Server::Serving(conn_future), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => panic!("accepting failed"), }, Server::Serving(ref mut fut) => match fut.poll() { Ok(ready) => return Ok(ready), Err(_) => panic!("connection failed"), }, } } } } impl connect::ConnectAddr for Target { fn connect_addr(&self) -> SocketAddr { self.0 } } impl tls::HasPeerIdentity for Target { fn peer_identity(&self) -> Conditional<Name> { self.1.clone() } } impl tls::client::HasConfig for ClientTls { fn tls_client_config(&self) -> std::sync::Arc<tls::client::Config> { self.0.tls_client_config() } }
AcceptTls<A, CrtKey>: Accept<<Listen as CoreListen>::Connection>, { Init { listen: Listen, accept: AcceptTls<A, CrtKey>,
random_line_split
tls_accept.rs
#![cfg(test)] // These are basically integration tests for the `connection` submodule, but // they cannot be "real" integration tests because `connection` isn't a public // interface and because `connection` exposes a `#[cfg(test)]`-only API for use // by these tests. use linkerd2_error::Never; use linkerd2_identity::{test_util, CrtKey, Name}; use linkerd2_proxy_core::listen::{Accept, Bind as _Bind, Listen as CoreListen}; use linkerd2_proxy_transport::tls::{ self, accept::{AcceptTls, Connection as ServerConnection}, client::Connection as ClientConnection, Conditional, }; use linkerd2_proxy_transport::{connect, Bind, Listen}; use std::{net::SocketAddr, sync::mpsc}; use tokio::{self, io, prelude::*}; use tower::{layer::Layer, ServiceExt}; use tower_util::service_fn; #[test] fn plaintext() { let (client_result, server_result) = run_test( Conditional::None(tls::ReasonForNoIdentity::Disabled), |conn| write_then_read(conn, PING), Conditional::None(tls::ReasonForNoIdentity::Disabled), |(_, conn)| read_then_write(conn, PING.len(), PONG), ); assert_eq!(client_result.is_tls(), false); assert_eq!(&client_result.result.expect("pong")[..], PONG); assert_eq!(server_result.is_tls(), false); assert_eq!(&server_result.result.expect("ping")[..], PING); } #[test] fn proxy_to_proxy_tls_works() { let server_tls = test_util::FOO_NS1.validate().unwrap(); let client_tls = test_util::BAR_NS1.validate().unwrap(); let (client_result, server_result) = run_test( Conditional::Some((client_tls, server_tls.tls_server_name())), |conn| write_then_read(conn, PING), Conditional::Some(server_tls), |(_, conn)| read_then_write(conn, PING.len(), PONG), ); assert_eq!(client_result.is_tls(), true); assert_eq!(&client_result.result.expect("pong")[..], PONG); assert_eq!(server_result.is_tls(), true); assert_eq!(&server_result.result.expect("ping")[..], PING); } #[test] fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() { let server_tls = test_util::FOO_NS1.validate().unwrap(); // Misuse the client's identity instead of the server's identity. Any // identity other than `server_tls.server_identity` would work. let client_tls = test_util::BAR_NS1.validate().expect("valid client cert"); let client_target = test_util::BAR_NS1.crt().name().clone(); let (client_result, server_result) = run_test( Conditional::Some((client_tls, client_target)), |conn| write_then_read(conn, PING), Conditional::Some(server_tls), |(_, conn)| read_then_write(conn, START_OF_TLS.len(), PONG), ); // The server's connection will succeed with the TLS client hello passed // through, because the SNI doesn't match its identity. assert_eq!(client_result.is_tls(), false); assert!(client_result.result.is_err()); assert_eq!(server_result.is_tls(), false); assert_eq!(&server_result.result.unwrap()[..], START_OF_TLS); } struct Transported<R> { /// The value of `Connection::peer_identity()` for the established connection. /// /// This will be `None` if we never even get a `Connection`. peer_identity: Option<tls::PeerIdentity>, /// The connection's result. result: Result<R, io::Error>, } impl<R> Transported<R> { fn is_tls(&self) -> bool
} /// Runs a test for a single TCP connection. `client` processes the connection /// on the client side and `server` processes the connection on the server /// side. fn run_test<C, CF, CR, S, SF, SR>( client_tls: tls::Conditional<(CrtKey, Name)>, client: C, server_tls: tls::Conditional<CrtKey>, server: S, ) -> (Transported<CR>, Transported<SR>) where // Client C: FnOnce(ClientConnection) -> CF + Clone + Send + 'static, CF: Future<Item = CR, Error = io::Error> + Send + 'static, CR: Send + 'static, // Server S: Fn(ServerConnection) -> SF + Clone + Send + 'static, SF: Future<Item = SR, Error = io::Error> + Send + 'static, SR: Send + 'static, { { use tracing_subscriber::{fmt, EnvFilter}; let sub = fmt::Subscriber::builder() .with_env_filter(EnvFilter::from_default_env()) .finish(); let _ = tracing::subscriber::set_global_default(sub); } let (client_tls, client_target_name) = match client_tls { Conditional::Some((crtkey, name)) => ( Conditional::Some(ClientTls(crtkey)), Conditional::Some(name), ), Conditional::None(reason) => (Conditional::None(reason.clone()), Conditional::None(reason)), }; // A future that will receive a single connection. let (server, server_addr, server_result) = { // Saves the result of every connection. let (sender, receiver) = mpsc::channel::<Transported<SR>>(); // Let the OS decide the port number and then return the resulting // `SocketAddr` so the client can connect to it. This allows multiple // tests to run at once, which wouldn't work if they all were bound on // a fixed port. let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap(); let listen = Bind::new(addr, None).bind().expect("must bind"); let listen_addr = listen.listen_addr(); let sender = service_fn(move |(meta, conn): ServerConnection| { let sender = sender.clone(); let peer_identity = Some(meta.peer_identity.clone()); let server = Box::new(server((meta, conn)).then(move |result| { sender .send(Transported { peer_identity, result, }) .expect("send result"); future::ok::<(), Never>(()) })); Box::new(future::ok::<_, Never>(server)) }); let accept = AcceptTls::new(server_tls, sender); let server = Server::Init { listen, accept }; (server, listen_addr, receiver) }; // A future that will open a single connection to the server. let (client, client_result) = { // Saves the result of the single connection. This could be a simpler // type, e.g. `Arc<Mutex>`, but using a channel simplifies the code and // parallels the server side. let (sender, receiver) = mpsc::channel::<Transported<CR>>(); let sender_clone = sender.clone(); let peer_identity = Some(client_target_name.clone()); let client = tls::ConnectLayer::new(client_tls) .layer(connect::Connect::new(None)) .oneshot(Target(server_addr, client_target_name)) .map_err(move |e| { sender_clone .send(Transported { peer_identity: None, result: Err(e), }) .expect("send result"); () }) .and_then(move |conn| { client(conn).then(move |result| { sender .send(Transported { peer_identity, result, }) .expect("send result"); Ok(()) }) }); (client, receiver) }; tokio::run(server.join(client).map(|_| ())); let client_result = client_result.try_recv().expect("client complete"); // XXX: This assumes that only one connection is accepted. TODO: allow the // caller to observe the results for every connection, once we have tests // that allow accepting multiple connections. let server_result = server_result.try_recv().expect("server complete"); (client_result, server_result) } /// Writes `to_write` and shuts down the write side, then reads until EOF, /// returning the bytes read. fn write_then_read( conn: impl AsyncRead + AsyncWrite, to_write: &'static [u8], ) -> impl Future<Item = Vec<u8>, Error = io::Error> { write_and_shutdown(conn, to_write) .and_then(|conn| io::read_to_end(conn, Vec::new())) .map(|(_conn, r)| r) } /// Reads until EOF then writes `to_write` and shuts down the write side, /// returning the bytes read. fn read_then_write( conn: impl AsyncRead + AsyncWrite, read_prefix_len: usize, to_write: &'static [u8], ) -> impl Future<Item = Vec<u8>, Error = io::Error> { io::read_exact(conn, vec![0; read_prefix_len]) .and_then(move |(conn, r)| write_and_shutdown(conn, to_write).map(|_conn| r)) } /// writes `to_write` to `conn` and then shuts down the write side of `conn`. fn write_and_shutdown<T: AsyncRead + AsyncWrite>( conn: T, to_write: &'static [u8], ) -> impl Future<Item = T, Error = io::Error> { io::write_all(conn, to_write).and_then(|(mut conn, _)| { conn.shutdown()?; Ok(conn) }) } const PING: &[u8] = b"ping"; const PONG: &[u8] = b"pong"; const START_OF_TLS: &[u8] = &[22, 3, 1]; // ContentType::handshake version 3.1 enum Server<A: Accept<ServerConnection>> where AcceptTls<A, CrtKey>: Accept<<Listen as CoreListen>::Connection>, { Init { listen: Listen, accept: AcceptTls<A, CrtKey>, }, Accepting(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::Future), Serving(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::ConnectionFuture), } #[derive(Clone)] struct Target(SocketAddr, Conditional<Name>); #[derive(Clone)] struct ClientTls(CrtKey); impl<A: Accept<ServerConnection> + Clone> Future for Server<A> { type Item = (); type Error = (); fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { *self = match self { Server::Init { ref mut listen, ref mut accept, } => { match Accept::poll_ready(accept) { Ok(Async::Ready(())) => {} Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => panic!("accept failed"), } let conn = match listen.poll_accept() { Ok(Async::Ready(conn)) => conn, Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => panic!("listener failed"), }; Server::Accepting(accept.accept(conn)) } Server::Accepting(ref mut fut) => match fut.poll() { Ok(Async::Ready(conn_future)) => Server::Serving(conn_future), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => panic!("accepting failed"), }, Server::Serving(ref mut fut) => match fut.poll() { Ok(ready) => return Ok(ready), Err(_) => panic!("connection failed"), }, } } } } impl connect::ConnectAddr for Target { fn connect_addr(&self) -> SocketAddr { self.0 } } impl tls::HasPeerIdentity for Target { fn peer_identity(&self) -> Conditional<Name> { self.1.clone() } } impl tls::client::HasConfig for ClientTls { fn tls_client_config(&self) -> std::sync::Arc<tls::client::Config> { self.0.tls_client_config() } }
{ self.peer_identity .as_ref() .map(|i| i.is_some()) .unwrap_or(false) }
identifier_body
tls_accept.rs
#![cfg(test)] // These are basically integration tests for the `connection` submodule, but // they cannot be "real" integration tests because `connection` isn't a public // interface and because `connection` exposes a `#[cfg(test)]`-only API for use // by these tests. use linkerd2_error::Never; use linkerd2_identity::{test_util, CrtKey, Name}; use linkerd2_proxy_core::listen::{Accept, Bind as _Bind, Listen as CoreListen}; use linkerd2_proxy_transport::tls::{ self, accept::{AcceptTls, Connection as ServerConnection}, client::Connection as ClientConnection, Conditional, }; use linkerd2_proxy_transport::{connect, Bind, Listen}; use std::{net::SocketAddr, sync::mpsc}; use tokio::{self, io, prelude::*}; use tower::{layer::Layer, ServiceExt}; use tower_util::service_fn; #[test] fn plaintext() { let (client_result, server_result) = run_test( Conditional::None(tls::ReasonForNoIdentity::Disabled), |conn| write_then_read(conn, PING), Conditional::None(tls::ReasonForNoIdentity::Disabled), |(_, conn)| read_then_write(conn, PING.len(), PONG), ); assert_eq!(client_result.is_tls(), false); assert_eq!(&client_result.result.expect("pong")[..], PONG); assert_eq!(server_result.is_tls(), false); assert_eq!(&server_result.result.expect("ping")[..], PING); } #[test] fn proxy_to_proxy_tls_works() { let server_tls = test_util::FOO_NS1.validate().unwrap(); let client_tls = test_util::BAR_NS1.validate().unwrap(); let (client_result, server_result) = run_test( Conditional::Some((client_tls, server_tls.tls_server_name())), |conn| write_then_read(conn, PING), Conditional::Some(server_tls), |(_, conn)| read_then_write(conn, PING.len(), PONG), ); assert_eq!(client_result.is_tls(), true); assert_eq!(&client_result.result.expect("pong")[..], PONG); assert_eq!(server_result.is_tls(), true); assert_eq!(&server_result.result.expect("ping")[..], PING); } #[test] fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() { let server_tls = test_util::FOO_NS1.validate().unwrap(); // Misuse the client's identity instead of the server's identity. Any // identity other than `server_tls.server_identity` would work. let client_tls = test_util::BAR_NS1.validate().expect("valid client cert"); let client_target = test_util::BAR_NS1.crt().name().clone(); let (client_result, server_result) = run_test( Conditional::Some((client_tls, client_target)), |conn| write_then_read(conn, PING), Conditional::Some(server_tls), |(_, conn)| read_then_write(conn, START_OF_TLS.len(), PONG), ); // The server's connection will succeed with the TLS client hello passed // through, because the SNI doesn't match its identity. assert_eq!(client_result.is_tls(), false); assert!(client_result.result.is_err()); assert_eq!(server_result.is_tls(), false); assert_eq!(&server_result.result.unwrap()[..], START_OF_TLS); } struct Transported<R> { /// The value of `Connection::peer_identity()` for the established connection. /// /// This will be `None` if we never even get a `Connection`. peer_identity: Option<tls::PeerIdentity>, /// The connection's result. result: Result<R, io::Error>, } impl<R> Transported<R> { fn is_tls(&self) -> bool { self.peer_identity .as_ref() .map(|i| i.is_some()) .unwrap_or(false) } } /// Runs a test for a single TCP connection. `client` processes the connection /// on the client side and `server` processes the connection on the server /// side. fn run_test<C, CF, CR, S, SF, SR>( client_tls: tls::Conditional<(CrtKey, Name)>, client: C, server_tls: tls::Conditional<CrtKey>, server: S, ) -> (Transported<CR>, Transported<SR>) where // Client C: FnOnce(ClientConnection) -> CF + Clone + Send + 'static, CF: Future<Item = CR, Error = io::Error> + Send + 'static, CR: Send + 'static, // Server S: Fn(ServerConnection) -> SF + Clone + Send + 'static, SF: Future<Item = SR, Error = io::Error> + Send + 'static, SR: Send + 'static, { { use tracing_subscriber::{fmt, EnvFilter}; let sub = fmt::Subscriber::builder() .with_env_filter(EnvFilter::from_default_env()) .finish(); let _ = tracing::subscriber::set_global_default(sub); } let (client_tls, client_target_name) = match client_tls { Conditional::Some((crtkey, name)) => ( Conditional::Some(ClientTls(crtkey)), Conditional::Some(name), ), Conditional::None(reason) => (Conditional::None(reason.clone()), Conditional::None(reason)), }; // A future that will receive a single connection. let (server, server_addr, server_result) = { // Saves the result of every connection. let (sender, receiver) = mpsc::channel::<Transported<SR>>(); // Let the OS decide the port number and then return the resulting // `SocketAddr` so the client can connect to it. This allows multiple // tests to run at once, which wouldn't work if they all were bound on // a fixed port. let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap(); let listen = Bind::new(addr, None).bind().expect("must bind"); let listen_addr = listen.listen_addr(); let sender = service_fn(move |(meta, conn): ServerConnection| { let sender = sender.clone(); let peer_identity = Some(meta.peer_identity.clone()); let server = Box::new(server((meta, conn)).then(move |result| { sender .send(Transported { peer_identity, result, }) .expect("send result"); future::ok::<(), Never>(()) })); Box::new(future::ok::<_, Never>(server)) }); let accept = AcceptTls::new(server_tls, sender); let server = Server::Init { listen, accept }; (server, listen_addr, receiver) }; // A future that will open a single connection to the server. let (client, client_result) = { // Saves the result of the single connection. This could be a simpler // type, e.g. `Arc<Mutex>`, but using a channel simplifies the code and // parallels the server side. let (sender, receiver) = mpsc::channel::<Transported<CR>>(); let sender_clone = sender.clone(); let peer_identity = Some(client_target_name.clone()); let client = tls::ConnectLayer::new(client_tls) .layer(connect::Connect::new(None)) .oneshot(Target(server_addr, client_target_name)) .map_err(move |e| { sender_clone .send(Transported { peer_identity: None, result: Err(e), }) .expect("send result"); () }) .and_then(move |conn| { client(conn).then(move |result| { sender .send(Transported { peer_identity, result, }) .expect("send result"); Ok(()) }) }); (client, receiver) }; tokio::run(server.join(client).map(|_| ())); let client_result = client_result.try_recv().expect("client complete"); // XXX: This assumes that only one connection is accepted. TODO: allow the // caller to observe the results for every connection, once we have tests // that allow accepting multiple connections. let server_result = server_result.try_recv().expect("server complete"); (client_result, server_result) } /// Writes `to_write` and shuts down the write side, then reads until EOF, /// returning the bytes read. fn write_then_read( conn: impl AsyncRead + AsyncWrite, to_write: &'static [u8], ) -> impl Future<Item = Vec<u8>, Error = io::Error> { write_and_shutdown(conn, to_write) .and_then(|conn| io::read_to_end(conn, Vec::new())) .map(|(_conn, r)| r) } /// Reads until EOF then writes `to_write` and shuts down the write side, /// returning the bytes read. fn read_then_write( conn: impl AsyncRead + AsyncWrite, read_prefix_len: usize, to_write: &'static [u8], ) -> impl Future<Item = Vec<u8>, Error = io::Error> { io::read_exact(conn, vec![0; read_prefix_len]) .and_then(move |(conn, r)| write_and_shutdown(conn, to_write).map(|_conn| r)) } /// writes `to_write` to `conn` and then shuts down the write side of `conn`. fn write_and_shutdown<T: AsyncRead + AsyncWrite>( conn: T, to_write: &'static [u8], ) -> impl Future<Item = T, Error = io::Error> { io::write_all(conn, to_write).and_then(|(mut conn, _)| { conn.shutdown()?; Ok(conn) }) } const PING: &[u8] = b"ping"; const PONG: &[u8] = b"pong"; const START_OF_TLS: &[u8] = &[22, 3, 1]; // ContentType::handshake version 3.1 enum Server<A: Accept<ServerConnection>> where AcceptTls<A, CrtKey>: Accept<<Listen as CoreListen>::Connection>, { Init { listen: Listen, accept: AcceptTls<A, CrtKey>, }, Accepting(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::Future), Serving(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::ConnectionFuture), } #[derive(Clone)] struct Target(SocketAddr, Conditional<Name>); #[derive(Clone)] struct
(CrtKey); impl<A: Accept<ServerConnection> + Clone> Future for Server<A> { type Item = (); type Error = (); fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { *self = match self { Server::Init { ref mut listen, ref mut accept, } => { match Accept::poll_ready(accept) { Ok(Async::Ready(())) => {} Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => panic!("accept failed"), } let conn = match listen.poll_accept() { Ok(Async::Ready(conn)) => conn, Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => panic!("listener failed"), }; Server::Accepting(accept.accept(conn)) } Server::Accepting(ref mut fut) => match fut.poll() { Ok(Async::Ready(conn_future)) => Server::Serving(conn_future), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => panic!("accepting failed"), }, Server::Serving(ref mut fut) => match fut.poll() { Ok(ready) => return Ok(ready), Err(_) => panic!("connection failed"), }, } } } } impl connect::ConnectAddr for Target { fn connect_addr(&self) -> SocketAddr { self.0 } } impl tls::HasPeerIdentity for Target { fn peer_identity(&self) -> Conditional<Name> { self.1.clone() } } impl tls::client::HasConfig for ClientTls { fn tls_client_config(&self) -> std::sync::Arc<tls::client::Config> { self.0.tls_client_config() } }
ClientTls
identifier_name
AutomobilesOnSale.py
# Importing Necessary Library import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import chi2_contingency from datetime import datetime from ipywidgets import interact, widgets get_ipython().run_line_magic('matplotlib', 'inline') sns.set_style('darkgrid') # Importing Data from autos.csv file df_auto = pd.read_csv(r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\Dataset\autos.csv', encoding='latin-1') # Investigating the Dimension of Extracted Observations df_auto.shape # Checking null values in Columns df_auto.isnull().sum() # Defining a function to fill the null values with their max value counts of individual's column def impute_missing_values(parameter): df_auto[parameter] = df_auto[parameter].fillna(df_auto[parameter].value_counts().index[0]) # Missing value imputation using "impute_missing_values" function impute_missing_values('vehicleType') impute_missing_values('gearbox') impute_missing_values('model') impute_missing_values('fuelType') impute_missing_values('notRepairedDamage') # Checking all the columns of dataset df_auto.columns # Dropping duplicate observation from dataset df_auto = df_auto.drop_duplicates() # Checking shape of datset df_auto.shape # Converting German word "ja" to 'yes' & "nein" to 'no' in English for better understanding df_auto['notRepairedDamage'] = df_auto['notRepairedDamage'].map({'ja':'yes', 'nein':'no'}) # Investigating number of cars available for sale print('Number of cars available in dataset : ', df_auto['name'].nunique()) # Dropping the feature : name as it is unnecessary while building model df_auto = df_auto.drop(['name'], axis=1) # Investigating overall structure of feature : monthOfRegistration df_auto.monthOfRegistration.describe() # As there are 12 months in Calendar, 13 months can't be right. Removing observations of month = 12, contains ~ 12k observation df_auto = df_auto[df_auto.monthOfRegistration != 12] # Univariate Analysis of : Sellers sns.barplot(df_auto.seller.value_counts().index, df_auto.seller.value_counts().values, alpha=0.9) plt.xlabel('Sellers') plt.ylabel('Count') plt.title('Distribution Of Car Sellers'); # As almost all of the Sellers are from private we can drop this feature df_auto = df_auto.drop(['seller'], axis=1) # Univariate Analysis of : Offer Type sns.barplot(df_auto.offerType.value_counts().index, df_auto.offerType.value_counts().values, alpha=0.9) plt.xlabel('Offer Type') plt.ylabel('Count') plt.title('Distribution Of Car Offers'); # As almost all of the Offers are from Angebot we can drop this feature df_auto = df_auto.drop(['offerType'], axis=1) print('Number of observation where price is 0 : ', df_auto[df_auto.price == 0]['price'].count()) # Number of observation where price is > 200000 df_auto[df_auto.price > 200000]['price'].count() # Number of observation where price is < 200 df_auto[df_auto.price < 200]['price'].count() # Considering outlier, selecting observations in between $200 & $200000 df_auto = df_auto[(df_auto.price > 200) & (df_auto.price < 200000)] # Distribution of Price sns.distplot(df_auto.price) plt.xlabel("Price") plt.ylabel('Frequency') plt.title("Distribution of Car's Price"); # Logarithm of Price Distribution sns.distplot(np.log(df_auto.price)) plt.xlabel("Logarithm of Car's Price") plt.ylabel('Frequency') plt.title("Distribution Log of Car's Price"); # Univariate Analysis of : AB Testing sns.barplot(df_auto.abtest.value_counts().index, df_auto.abtest.value_counts().values, alpha=0.9) plt.xlabel('Type of Testing') plt.ylabel('Count') plt.title('Distribution Of Car Testing'); # Univariate Analysis of : Vehicle Type plt.figure(figsize=(12,6)) sns.barplot(df_auto.vehicleType.value_counts().index, df_auto.vehicleType.value_counts().values, alpha=0.9) plt.xlabel('Type of Vehicle') plt.ylabel('Count') plt.title('Distribution Of Vehicle Types'); # Univariate Analysis of : Gear Type sns.barplot(df_auto.gearbox.value_counts().index, df_auto.gearbox.value_counts().values, alpha=0.9) plt.xlabel('Type of Gears') plt.ylabel('Count') plt.title('Distribution Of Types of Gears'); print('No of PowerPS is having value of 0 : ', df_auto[df_auto.powerPS == 0]['powerPS'].count()) print('No of PowerPS is having value of more than 662 is : ', df_auto[df_auto.powerPS > 662]['powerPS'].count()) # Removng cars having HP of 662 as the latest technology doesn't have HP > 662 # Removing observations having HP of 0 - as its meaningless df_auto = df_auto[(df_auto.powerPS > 0) & (df_auto.powerPS < 663)] # Distribution of Top 10 Horse Powered car sold plt.figure(figsize=(16,6)) sns.lineplot(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index, df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].values) plt.xticks(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index) plt.xlabel('Horse Power') plt.ylabel('No. of Car Sold With Available Horse Power') plt.title('Top 10 Car Sold with Horse Power Variation'); # Distribution of Top 10 car's moel sold sns.lineplot(df_auto.model.value_counts()[:10].index, df_auto.model.value_counts()[:10].values) plt.xticks(df_auto.model.value_counts()[:10].index) plt.xlabel('Cars Model') plt.ylabel('Frequency') plt.title('Top 10 Cars Model Sold'); # Ditribution of Mesurement of KM a car ran before coming for sale plt.figure(figsize=(12,6)) sns.distplot(df_auto.kilometer) plt.xlabel("KM's Car Ran") plt.ylabel('Frequency') plt.title('Car was Driven in KM'); # No. of car registerd in a month for sale plt.figure(figsize=(12,6)) sns.lineplot(df_auto.monthOfRegistration.value_counts().index, df_auto.monthOfRegistration.value_counts().values) plt.xticks(df_auto.monthOfRegistration.value_counts().index.sort_values(), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) plt.xlabel("Month Of Registration") plt.ylabel('Frequency') plt.title('No. Of Cars Sold In Month'); # Univariate Analysis of : fuel Type plt.figure(figsize=(12,6)) sns.barplot(df_auto.fuelType.value_counts().index, df_auto.fuelType.value_counts().values, alpha=0.9) plt.xlabel('Types of Fuel') plt.ylabel('Frequency') plt.title('Distribution Of Car with Types of Fuel'); # Univariate Analysis of : Top 10 Car's Brand plt.figure(figsize=(12,6)) sns.barplot(df_auto.brand.value_counts()[:10].index, df_auto.brand.value_counts()[:10].values, alpha=0.9) plt.xlabel("Car's Brand") plt.ylabel('Frequency') plt.title("Top 10 Car's Brand Sold"); # Univariate Analysis of : Car was Repaired: yes/no before sale sns.barplot(df_auto.notRepairedDamage.value_counts().index, df_auto.notRepairedDamage.value_counts().values, alpha=0.9) plt.xlabel('Repaired Post Damage') plt.ylabel('Frequency') plt.title('Distribution Of Car Not Repaired Damaged'); # Investigating overall structure of feature : yearOfRegistration df_auto.yearOfRegistration.describe() # Observation which is older than 1989 df_auto[df_auto.yearOfRegistration < 1989]['yearOfRegistration'].count() # Observation which is more than 2019 df_auto[df_auto.yearOfRegistration > 2019]['yearOfRegistration'].count() # Taking into considearion which is in the year of between 1989 & 2019 df_auto = df_auto[(df_auto.yearOfRegistration >= 1989) & (df_auto.yearOfRegistration <= 2019)] # No of car was registered for sale throughout the year
plt.xlabel('Years of Registration') plt.ylabel('Price') plt.title('Variation Of Price with Year'); # No of days it took to sold while purchasing from E-bay days = [] for time1, time2 in zip(df_auto['dateCrawled'], df_auto['lastSeen']): time = datetime.strptime(time2, '%Y-%m-%d %H:%M:%S') - datetime.strptime(time1, '%Y-%m-%d %H:%M:%S') days.append(time.days) df_auto['Sold_In_Days'] = days # Investigating the feature : Sold_In_Days df_auto.Sold_In_Days.describe() # Removing the observations having negative values as it doesn't make any sense df_auto = df_auto[df_auto.Sold_In_Days >= 0] # Distribution of no. of cars sold in days plt.figure(figsize=(12,6)) sns.barplot(df_auto.Sold_In_Days.value_counts().index, df_auto.Sold_In_Days.value_counts().values, alpha=0.9) plt.xlabel('Sold In Days') plt.ylabel('Frequency') plt.title('No. Of Cars Sold in Days'); # Dropping the below mentioned features as they are unnecesary now while building models # All the postal code is from Germany only df_auto = df_auto.drop(['dateCrawled', 'lastSeen', 'dateCreated', 'nrOfPictures', 'model', 'abtest', 'postalCode'], axis=1) # Corelation matrix with Heatmap annotation sns.heatmap(df_auto.corr(), annot=True); # Function to get the Chi Square value & P value def chi_p_value(cat1, cat2): table = pd.crosstab(df_auto[cat1], df_auto[cat2]) chi2, p, dof, expected = chi2_contingency(table.values) if p < 0.05: print("Chi Square Statistics and p value of {} and {} is {}, {}".format(cat1, cat2, chi2, p)) # Extracting Chi Square value & p value for i in range(len(df_auto.select_dtypes(include=['object']).columns)): for cat2 in df_auto.select_dtypes(include=['object']).columns[df_auto.select_dtypes(include=['object']).columns != df_auto.select_dtypes(include=['object']).columns[i]]: chi_p_value(df_auto.select_dtypes(include=['object']).columns[i], cat2) # Taking into consideration of Sold_In_Dyas which is <= 5 days for bi-variate analysis # It will give us the top most sold cars in first consecutive 5 days df_auto_sold = df_auto[df_auto.Sold_In_Days < 5] # Function to visualize bivariate analysis def bivariate_analysis(param, xlabel): df_auto_sold.groupby([param, 'Sold_In_Days'])['price'].count().unstack().plot(kind='bar') plt.xticks(rotation=360) plt.xlabel(xlabel) plt.ylabel('Price') plt.title('Price Distribution of ' + xlabel + ' Sold within 0-4 days'); bivariate_analysis('vehicleType', 'Types Of Vehicle') bivariate_analysis('gearbox', 'Types Of Gear') bivariate_analysis('fuelType', 'Types Of Fuel') print("No. Of cars sold on the day the ad was published : ", df_auto[df_auto.Sold_In_Days == 0].count()[0]) print("No. Of cars sold on the 1st day the ad was published : ", df_auto[df_auto.Sold_In_Days == 1].count()[0]) print("No. Of cars sold on the 2nd day the ad was published : ", df_auto[df_auto.Sold_In_Days == 2].count()[0]) df_auto.head() # Investigating the count of individual Categorical Features for col in df_auto.select_dtypes(include=['object']).columns: print(col, len(df_auto[col].unique())) # Interactive Distribution of Horsepower with Price # Visualization possible among year/month/days/gearbox/damage def plot_year(year, month, days, gearbox, damage): data = df_auto[(df_auto.yearOfRegistration == year) & (df_auto.monthOfRegistration == month) & (df_auto.Sold_In_Days == days) & (df_auto.gearbox == gearbox) & (df_auto.notRepairedDamage == damage)] area = 2 * df_auto.powerPS data.plot.scatter('powerPS', 'price', s = area, linewidth = 1, edgecolor='k', figsize=(12,8), alpha=0.7) plt.xlabel('Horse Power') plt.ylabel('Price') title = 'Variation of Price with Horse Power in ' + str(year) plt.title(title) interact(plot_year, year=widgets.IntSlider(min=1989, max=2019, step=1, value=2003, description='Year '), month=widgets.IntSlider(min=1, max=12, step=1, value=2, description='Month '), days=widgets.IntSlider(min=0, max=10, step=1, value=0, description='Day '), gearbox = widgets.RadioButtons(value='manuell', options=list(df_auto.gearbox.unique()), description="Gear Type "), damage = widgets.RadioButtons(value='no', options=list(df_auto.notRepairedDamage.unique()), description="Repaired ")) df_auto.head() # Importing Necessary Libraries from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split X = df_auto.drop(['price'], axis=1) y = df_auto.price #X = pd.get_dummies(data=X,columns= ['vehicleType','yearOfRegistration','gearbox','monthOfRegistration','fuelType','brand','notRepairedDamage'],drop_first=True) X = pd.get_dummies(data=X,columns= ['vehicleType','gearbox','fuelType', 'brand','notRepairedDamage']) X.to_csv(r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\X_dummy.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) rf = RandomForestRegressor(n_estimators = 50) rf.fit(X_train, y_train) score = rf.score(X_test, y_test) print('Accuracy Of Random Forest: ', score) pred=rf.predict(X_test) print(pred) from sklearn.metrics import mean_squared_error from math import sqrt rmse = sqrt(mean_squared_error(pred,y_test)) print("RMSE of model is : ",rmse) import joblib filename = r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\persis model\finalized_model.sav' joblib.dump(rf, filename) from IPython.display import Image from sklearn import tree import pydotplus # Visualize data data_feature_names=['vehicleType','price','yearOfRegistration','gearbox','powerPS','kilometer','Sold_In_Days','monthOfRegistration','fuelType','brand','notRepairedDamage'] rf_small = RandomForestRegressor(n_estimators=10, max_depth = 3) rf_small.fit(X_train, y_train) tree_small = rf_small.estimators_[5] dot_data = tree.export_graphviz(tree_small, feature_names=data_feature_names, out_file=None, filled=True, rounded=True) graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png())
sns.lineplot(df_auto.groupby('yearOfRegistration')['price'].count().index, df_auto.groupby('yearOfRegistration')['price'].count().values, data=df_auto)
random_line_split
AutomobilesOnSale.py
# Importing Necessary Library import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import chi2_contingency from datetime import datetime from ipywidgets import interact, widgets get_ipython().run_line_magic('matplotlib', 'inline') sns.set_style('darkgrid') # Importing Data from autos.csv file df_auto = pd.read_csv(r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\Dataset\autos.csv', encoding='latin-1') # Investigating the Dimension of Extracted Observations df_auto.shape # Checking null values in Columns df_auto.isnull().sum() # Defining a function to fill the null values with their max value counts of individual's column def impute_missing_values(parameter): df_auto[parameter] = df_auto[parameter].fillna(df_auto[parameter].value_counts().index[0]) # Missing value imputation using "impute_missing_values" function impute_missing_values('vehicleType') impute_missing_values('gearbox') impute_missing_values('model') impute_missing_values('fuelType') impute_missing_values('notRepairedDamage') # Checking all the columns of dataset df_auto.columns # Dropping duplicate observation from dataset df_auto = df_auto.drop_duplicates() # Checking shape of datset df_auto.shape # Converting German word "ja" to 'yes' & "nein" to 'no' in English for better understanding df_auto['notRepairedDamage'] = df_auto['notRepairedDamage'].map({'ja':'yes', 'nein':'no'}) # Investigating number of cars available for sale print('Number of cars available in dataset : ', df_auto['name'].nunique()) # Dropping the feature : name as it is unnecessary while building model df_auto = df_auto.drop(['name'], axis=1) # Investigating overall structure of feature : monthOfRegistration df_auto.monthOfRegistration.describe() # As there are 12 months in Calendar, 13 months can't be right. Removing observations of month = 12, contains ~ 12k observation df_auto = df_auto[df_auto.monthOfRegistration != 12] # Univariate Analysis of : Sellers sns.barplot(df_auto.seller.value_counts().index, df_auto.seller.value_counts().values, alpha=0.9) plt.xlabel('Sellers') plt.ylabel('Count') plt.title('Distribution Of Car Sellers'); # As almost all of the Sellers are from private we can drop this feature df_auto = df_auto.drop(['seller'], axis=1) # Univariate Analysis of : Offer Type sns.barplot(df_auto.offerType.value_counts().index, df_auto.offerType.value_counts().values, alpha=0.9) plt.xlabel('Offer Type') plt.ylabel('Count') plt.title('Distribution Of Car Offers'); # As almost all of the Offers are from Angebot we can drop this feature df_auto = df_auto.drop(['offerType'], axis=1) print('Number of observation where price is 0 : ', df_auto[df_auto.price == 0]['price'].count()) # Number of observation where price is > 200000 df_auto[df_auto.price > 200000]['price'].count() # Number of observation where price is < 200 df_auto[df_auto.price < 200]['price'].count() # Considering outlier, selecting observations in between $200 & $200000 df_auto = df_auto[(df_auto.price > 200) & (df_auto.price < 200000)] # Distribution of Price sns.distplot(df_auto.price) plt.xlabel("Price") plt.ylabel('Frequency') plt.title("Distribution of Car's Price"); # Logarithm of Price Distribution sns.distplot(np.log(df_auto.price)) plt.xlabel("Logarithm of Car's Price") plt.ylabel('Frequency') plt.title("Distribution Log of Car's Price"); # Univariate Analysis of : AB Testing sns.barplot(df_auto.abtest.value_counts().index, df_auto.abtest.value_counts().values, alpha=0.9) plt.xlabel('Type of Testing') plt.ylabel('Count') plt.title('Distribution Of Car Testing'); # Univariate Analysis of : Vehicle Type plt.figure(figsize=(12,6)) sns.barplot(df_auto.vehicleType.value_counts().index, df_auto.vehicleType.value_counts().values, alpha=0.9) plt.xlabel('Type of Vehicle') plt.ylabel('Count') plt.title('Distribution Of Vehicle Types'); # Univariate Analysis of : Gear Type sns.barplot(df_auto.gearbox.value_counts().index, df_auto.gearbox.value_counts().values, alpha=0.9) plt.xlabel('Type of Gears') plt.ylabel('Count') plt.title('Distribution Of Types of Gears'); print('No of PowerPS is having value of 0 : ', df_auto[df_auto.powerPS == 0]['powerPS'].count()) print('No of PowerPS is having value of more than 662 is : ', df_auto[df_auto.powerPS > 662]['powerPS'].count()) # Removng cars having HP of 662 as the latest technology doesn't have HP > 662 # Removing observations having HP of 0 - as its meaningless df_auto = df_auto[(df_auto.powerPS > 0) & (df_auto.powerPS < 663)] # Distribution of Top 10 Horse Powered car sold plt.figure(figsize=(16,6)) sns.lineplot(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index, df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].values) plt.xticks(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index) plt.xlabel('Horse Power') plt.ylabel('No. of Car Sold With Available Horse Power') plt.title('Top 10 Car Sold with Horse Power Variation'); # Distribution of Top 10 car's moel sold sns.lineplot(df_auto.model.value_counts()[:10].index, df_auto.model.value_counts()[:10].values) plt.xticks(df_auto.model.value_counts()[:10].index) plt.xlabel('Cars Model') plt.ylabel('Frequency') plt.title('Top 10 Cars Model Sold'); # Ditribution of Mesurement of KM a car ran before coming for sale plt.figure(figsize=(12,6)) sns.distplot(df_auto.kilometer) plt.xlabel("KM's Car Ran") plt.ylabel('Frequency') plt.title('Car was Driven in KM'); # No. of car registerd in a month for sale plt.figure(figsize=(12,6)) sns.lineplot(df_auto.monthOfRegistration.value_counts().index, df_auto.monthOfRegistration.value_counts().values) plt.xticks(df_auto.monthOfRegistration.value_counts().index.sort_values(), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) plt.xlabel("Month Of Registration") plt.ylabel('Frequency') plt.title('No. Of Cars Sold In Month'); # Univariate Analysis of : fuel Type plt.figure(figsize=(12,6)) sns.barplot(df_auto.fuelType.value_counts().index, df_auto.fuelType.value_counts().values, alpha=0.9) plt.xlabel('Types of Fuel') plt.ylabel('Frequency') plt.title('Distribution Of Car with Types of Fuel'); # Univariate Analysis of : Top 10 Car's Brand plt.figure(figsize=(12,6)) sns.barplot(df_auto.brand.value_counts()[:10].index, df_auto.brand.value_counts()[:10].values, alpha=0.9) plt.xlabel("Car's Brand") plt.ylabel('Frequency') plt.title("Top 10 Car's Brand Sold"); # Univariate Analysis of : Car was Repaired: yes/no before sale sns.barplot(df_auto.notRepairedDamage.value_counts().index, df_auto.notRepairedDamage.value_counts().values, alpha=0.9) plt.xlabel('Repaired Post Damage') plt.ylabel('Frequency') plt.title('Distribution Of Car Not Repaired Damaged'); # Investigating overall structure of feature : yearOfRegistration df_auto.yearOfRegistration.describe() # Observation which is older than 1989 df_auto[df_auto.yearOfRegistration < 1989]['yearOfRegistration'].count() # Observation which is more than 2019 df_auto[df_auto.yearOfRegistration > 2019]['yearOfRegistration'].count() # Taking into considearion which is in the year of between 1989 & 2019 df_auto = df_auto[(df_auto.yearOfRegistration >= 1989) & (df_auto.yearOfRegistration <= 2019)] # No of car was registered for sale throughout the year sns.lineplot(df_auto.groupby('yearOfRegistration')['price'].count().index, df_auto.groupby('yearOfRegistration')['price'].count().values, data=df_auto) plt.xlabel('Years of Registration') plt.ylabel('Price') plt.title('Variation Of Price with Year'); # No of days it took to sold while purchasing from E-bay days = [] for time1, time2 in zip(df_auto['dateCrawled'], df_auto['lastSeen']): time = datetime.strptime(time2, '%Y-%m-%d %H:%M:%S') - datetime.strptime(time1, '%Y-%m-%d %H:%M:%S') days.append(time.days) df_auto['Sold_In_Days'] = days # Investigating the feature : Sold_In_Days df_auto.Sold_In_Days.describe() # Removing the observations having negative values as it doesn't make any sense df_auto = df_auto[df_auto.Sold_In_Days >= 0] # Distribution of no. of cars sold in days plt.figure(figsize=(12,6)) sns.barplot(df_auto.Sold_In_Days.value_counts().index, df_auto.Sold_In_Days.value_counts().values, alpha=0.9) plt.xlabel('Sold In Days') plt.ylabel('Frequency') plt.title('No. Of Cars Sold in Days'); # Dropping the below mentioned features as they are unnecesary now while building models # All the postal code is from Germany only df_auto = df_auto.drop(['dateCrawled', 'lastSeen', 'dateCreated', 'nrOfPictures', 'model', 'abtest', 'postalCode'], axis=1) # Corelation matrix with Heatmap annotation sns.heatmap(df_auto.corr(), annot=True); # Function to get the Chi Square value & P value def chi_p_value(cat1, cat2): table = pd.crosstab(df_auto[cat1], df_auto[cat2]) chi2, p, dof, expected = chi2_contingency(table.values) if p < 0.05: print("Chi Square Statistics and p value of {} and {} is {}, {}".format(cat1, cat2, chi2, p)) # Extracting Chi Square value & p value for i in range(len(df_auto.select_dtypes(include=['object']).columns)): for cat2 in df_auto.select_dtypes(include=['object']).columns[df_auto.select_dtypes(include=['object']).columns != df_auto.select_dtypes(include=['object']).columns[i]]: chi_p_value(df_auto.select_dtypes(include=['object']).columns[i], cat2) # Taking into consideration of Sold_In_Dyas which is <= 5 days for bi-variate analysis # It will give us the top most sold cars in first consecutive 5 days df_auto_sold = df_auto[df_auto.Sold_In_Days < 5] # Function to visualize bivariate analysis def bivariate_analysis(param, xlabel): df_auto_sold.groupby([param, 'Sold_In_Days'])['price'].count().unstack().plot(kind='bar') plt.xticks(rotation=360) plt.xlabel(xlabel) plt.ylabel('Price') plt.title('Price Distribution of ' + xlabel + ' Sold within 0-4 days'); bivariate_analysis('vehicleType', 'Types Of Vehicle') bivariate_analysis('gearbox', 'Types Of Gear') bivariate_analysis('fuelType', 'Types Of Fuel') print("No. Of cars sold on the day the ad was published : ", df_auto[df_auto.Sold_In_Days == 0].count()[0]) print("No. Of cars sold on the 1st day the ad was published : ", df_auto[df_auto.Sold_In_Days == 1].count()[0]) print("No. Of cars sold on the 2nd day the ad was published : ", df_auto[df_auto.Sold_In_Days == 2].count()[0]) df_auto.head() # Investigating the count of individual Categorical Features for col in df_auto.select_dtypes(include=['object']).columns: print(col, len(df_auto[col].unique())) # Interactive Distribution of Horsepower with Price # Visualization possible among year/month/days/gearbox/damage def plot_year(year, month, days, gearbox, damage):
interact(plot_year, year=widgets.IntSlider(min=1989, max=2019, step=1, value=2003, description='Year '), month=widgets.IntSlider(min=1, max=12, step=1, value=2, description='Month '), days=widgets.IntSlider(min=0, max=10, step=1, value=0, description='Day '), gearbox = widgets.RadioButtons(value='manuell', options=list(df_auto.gearbox.unique()), description="Gear Type "), damage = widgets.RadioButtons(value='no', options=list(df_auto.notRepairedDamage.unique()), description="Repaired ")) df_auto.head() # Importing Necessary Libraries from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split X = df_auto.drop(['price'], axis=1) y = df_auto.price #X = pd.get_dummies(data=X,columns= ['vehicleType','yearOfRegistration','gearbox','monthOfRegistration','fuelType','brand','notRepairedDamage'],drop_first=True) X = pd.get_dummies(data=X,columns= ['vehicleType','gearbox','fuelType', 'brand','notRepairedDamage']) X.to_csv(r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\X_dummy.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) rf = RandomForestRegressor(n_estimators = 50) rf.fit(X_train, y_train) score = rf.score(X_test, y_test) print('Accuracy Of Random Forest: ', score) pred=rf.predict(X_test) print(pred) from sklearn.metrics import mean_squared_error from math import sqrt rmse = sqrt(mean_squared_error(pred,y_test)) print("RMSE of model is : ",rmse) import joblib filename = r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\persis model\finalized_model.sav' joblib.dump(rf, filename) from IPython.display import Image from sklearn import tree import pydotplus # Visualize data data_feature_names=['vehicleType','price','yearOfRegistration','gearbox','powerPS','kilometer','Sold_In_Days','monthOfRegistration','fuelType','brand','notRepairedDamage'] rf_small = RandomForestRegressor(n_estimators=10, max_depth = 3) rf_small.fit(X_train, y_train) tree_small = rf_small.estimators_[5] dot_data = tree.export_graphviz(tree_small, feature_names=data_feature_names, out_file=None, filled=True, rounded=True) graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png())
data = df_auto[(df_auto.yearOfRegistration == year) & (df_auto.monthOfRegistration == month) & (df_auto.Sold_In_Days == days) & (df_auto.gearbox == gearbox) & (df_auto.notRepairedDamage == damage)] area = 2 * df_auto.powerPS data.plot.scatter('powerPS', 'price', s = area, linewidth = 1, edgecolor='k', figsize=(12,8), alpha=0.7) plt.xlabel('Horse Power') plt.ylabel('Price') title = 'Variation of Price with Horse Power in ' + str(year) plt.title(title)
identifier_body
AutomobilesOnSale.py
# Importing Necessary Library import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import chi2_contingency from datetime import datetime from ipywidgets import interact, widgets get_ipython().run_line_magic('matplotlib', 'inline') sns.set_style('darkgrid') # Importing Data from autos.csv file df_auto = pd.read_csv(r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\Dataset\autos.csv', encoding='latin-1') # Investigating the Dimension of Extracted Observations df_auto.shape # Checking null values in Columns df_auto.isnull().sum() # Defining a function to fill the null values with their max value counts of individual's column def impute_missing_values(parameter): df_auto[parameter] = df_auto[parameter].fillna(df_auto[parameter].value_counts().index[0]) # Missing value imputation using "impute_missing_values" function impute_missing_values('vehicleType') impute_missing_values('gearbox') impute_missing_values('model') impute_missing_values('fuelType') impute_missing_values('notRepairedDamage') # Checking all the columns of dataset df_auto.columns # Dropping duplicate observation from dataset df_auto = df_auto.drop_duplicates() # Checking shape of datset df_auto.shape # Converting German word "ja" to 'yes' & "nein" to 'no' in English for better understanding df_auto['notRepairedDamage'] = df_auto['notRepairedDamage'].map({'ja':'yes', 'nein':'no'}) # Investigating number of cars available for sale print('Number of cars available in dataset : ', df_auto['name'].nunique()) # Dropping the feature : name as it is unnecessary while building model df_auto = df_auto.drop(['name'], axis=1) # Investigating overall structure of feature : monthOfRegistration df_auto.monthOfRegistration.describe() # As there are 12 months in Calendar, 13 months can't be right. Removing observations of month = 12, contains ~ 12k observation df_auto = df_auto[df_auto.monthOfRegistration != 12] # Univariate Analysis of : Sellers sns.barplot(df_auto.seller.value_counts().index, df_auto.seller.value_counts().values, alpha=0.9) plt.xlabel('Sellers') plt.ylabel('Count') plt.title('Distribution Of Car Sellers'); # As almost all of the Sellers are from private we can drop this feature df_auto = df_auto.drop(['seller'], axis=1) # Univariate Analysis of : Offer Type sns.barplot(df_auto.offerType.value_counts().index, df_auto.offerType.value_counts().values, alpha=0.9) plt.xlabel('Offer Type') plt.ylabel('Count') plt.title('Distribution Of Car Offers'); # As almost all of the Offers are from Angebot we can drop this feature df_auto = df_auto.drop(['offerType'], axis=1) print('Number of observation where price is 0 : ', df_auto[df_auto.price == 0]['price'].count()) # Number of observation where price is > 200000 df_auto[df_auto.price > 200000]['price'].count() # Number of observation where price is < 200 df_auto[df_auto.price < 200]['price'].count() # Considering outlier, selecting observations in between $200 & $200000 df_auto = df_auto[(df_auto.price > 200) & (df_auto.price < 200000)] # Distribution of Price sns.distplot(df_auto.price) plt.xlabel("Price") plt.ylabel('Frequency') plt.title("Distribution of Car's Price"); # Logarithm of Price Distribution sns.distplot(np.log(df_auto.price)) plt.xlabel("Logarithm of Car's Price") plt.ylabel('Frequency') plt.title("Distribution Log of Car's Price"); # Univariate Analysis of : AB Testing sns.barplot(df_auto.abtest.value_counts().index, df_auto.abtest.value_counts().values, alpha=0.9) plt.xlabel('Type of Testing') plt.ylabel('Count') plt.title('Distribution Of Car Testing'); # Univariate Analysis of : Vehicle Type plt.figure(figsize=(12,6)) sns.barplot(df_auto.vehicleType.value_counts().index, df_auto.vehicleType.value_counts().values, alpha=0.9) plt.xlabel('Type of Vehicle') plt.ylabel('Count') plt.title('Distribution Of Vehicle Types'); # Univariate Analysis of : Gear Type sns.barplot(df_auto.gearbox.value_counts().index, df_auto.gearbox.value_counts().values, alpha=0.9) plt.xlabel('Type of Gears') plt.ylabel('Count') plt.title('Distribution Of Types of Gears'); print('No of PowerPS is having value of 0 : ', df_auto[df_auto.powerPS == 0]['powerPS'].count()) print('No of PowerPS is having value of more than 662 is : ', df_auto[df_auto.powerPS > 662]['powerPS'].count()) # Removng cars having HP of 662 as the latest technology doesn't have HP > 662 # Removing observations having HP of 0 - as its meaningless df_auto = df_auto[(df_auto.powerPS > 0) & (df_auto.powerPS < 663)] # Distribution of Top 10 Horse Powered car sold plt.figure(figsize=(16,6)) sns.lineplot(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index, df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].values) plt.xticks(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index) plt.xlabel('Horse Power') plt.ylabel('No. of Car Sold With Available Horse Power') plt.title('Top 10 Car Sold with Horse Power Variation'); # Distribution of Top 10 car's moel sold sns.lineplot(df_auto.model.value_counts()[:10].index, df_auto.model.value_counts()[:10].values) plt.xticks(df_auto.model.value_counts()[:10].index) plt.xlabel('Cars Model') plt.ylabel('Frequency') plt.title('Top 10 Cars Model Sold'); # Ditribution of Mesurement of KM a car ran before coming for sale plt.figure(figsize=(12,6)) sns.distplot(df_auto.kilometer) plt.xlabel("KM's Car Ran") plt.ylabel('Frequency') plt.title('Car was Driven in KM'); # No. of car registerd in a month for sale plt.figure(figsize=(12,6)) sns.lineplot(df_auto.monthOfRegistration.value_counts().index, df_auto.monthOfRegistration.value_counts().values) plt.xticks(df_auto.monthOfRegistration.value_counts().index.sort_values(), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) plt.xlabel("Month Of Registration") plt.ylabel('Frequency') plt.title('No. Of Cars Sold In Month'); # Univariate Analysis of : fuel Type plt.figure(figsize=(12,6)) sns.barplot(df_auto.fuelType.value_counts().index, df_auto.fuelType.value_counts().values, alpha=0.9) plt.xlabel('Types of Fuel') plt.ylabel('Frequency') plt.title('Distribution Of Car with Types of Fuel'); # Univariate Analysis of : Top 10 Car's Brand plt.figure(figsize=(12,6)) sns.barplot(df_auto.brand.value_counts()[:10].index, df_auto.brand.value_counts()[:10].values, alpha=0.9) plt.xlabel("Car's Brand") plt.ylabel('Frequency') plt.title("Top 10 Car's Brand Sold"); # Univariate Analysis of : Car was Repaired: yes/no before sale sns.barplot(df_auto.notRepairedDamage.value_counts().index, df_auto.notRepairedDamage.value_counts().values, alpha=0.9) plt.xlabel('Repaired Post Damage') plt.ylabel('Frequency') plt.title('Distribution Of Car Not Repaired Damaged'); # Investigating overall structure of feature : yearOfRegistration df_auto.yearOfRegistration.describe() # Observation which is older than 1989 df_auto[df_auto.yearOfRegistration < 1989]['yearOfRegistration'].count() # Observation which is more than 2019 df_auto[df_auto.yearOfRegistration > 2019]['yearOfRegistration'].count() # Taking into considearion which is in the year of between 1989 & 2019 df_auto = df_auto[(df_auto.yearOfRegistration >= 1989) & (df_auto.yearOfRegistration <= 2019)] # No of car was registered for sale throughout the year sns.lineplot(df_auto.groupby('yearOfRegistration')['price'].count().index, df_auto.groupby('yearOfRegistration')['price'].count().values, data=df_auto) plt.xlabel('Years of Registration') plt.ylabel('Price') plt.title('Variation Of Price with Year'); # No of days it took to sold while purchasing from E-bay days = [] for time1, time2 in zip(df_auto['dateCrawled'], df_auto['lastSeen']): time = datetime.strptime(time2, '%Y-%m-%d %H:%M:%S') - datetime.strptime(time1, '%Y-%m-%d %H:%M:%S') days.append(time.days) df_auto['Sold_In_Days'] = days # Investigating the feature : Sold_In_Days df_auto.Sold_In_Days.describe() # Removing the observations having negative values as it doesn't make any sense df_auto = df_auto[df_auto.Sold_In_Days >= 0] # Distribution of no. of cars sold in days plt.figure(figsize=(12,6)) sns.barplot(df_auto.Sold_In_Days.value_counts().index, df_auto.Sold_In_Days.value_counts().values, alpha=0.9) plt.xlabel('Sold In Days') plt.ylabel('Frequency') plt.title('No. Of Cars Sold in Days'); # Dropping the below mentioned features as they are unnecesary now while building models # All the postal code is from Germany only df_auto = df_auto.drop(['dateCrawled', 'lastSeen', 'dateCreated', 'nrOfPictures', 'model', 'abtest', 'postalCode'], axis=1) # Corelation matrix with Heatmap annotation sns.heatmap(df_auto.corr(), annot=True); # Function to get the Chi Square value & P value def chi_p_value(cat1, cat2): table = pd.crosstab(df_auto[cat1], df_auto[cat2]) chi2, p, dof, expected = chi2_contingency(table.values) if p < 0.05: print("Chi Square Statistics and p value of {} and {} is {}, {}".format(cat1, cat2, chi2, p)) # Extracting Chi Square value & p value for i in range(len(df_auto.select_dtypes(include=['object']).columns)): for cat2 in df_auto.select_dtypes(include=['object']).columns[df_auto.select_dtypes(include=['object']).columns != df_auto.select_dtypes(include=['object']).columns[i]]: chi_p_value(df_auto.select_dtypes(include=['object']).columns[i], cat2) # Taking into consideration of Sold_In_Dyas which is <= 5 days for bi-variate analysis # It will give us the top most sold cars in first consecutive 5 days df_auto_sold = df_auto[df_auto.Sold_In_Days < 5] # Function to visualize bivariate analysis def bivariate_analysis(param, xlabel): df_auto_sold.groupby([param, 'Sold_In_Days'])['price'].count().unstack().plot(kind='bar') plt.xticks(rotation=360) plt.xlabel(xlabel) plt.ylabel('Price') plt.title('Price Distribution of ' + xlabel + ' Sold within 0-4 days'); bivariate_analysis('vehicleType', 'Types Of Vehicle') bivariate_analysis('gearbox', 'Types Of Gear') bivariate_analysis('fuelType', 'Types Of Fuel') print("No. Of cars sold on the day the ad was published : ", df_auto[df_auto.Sold_In_Days == 0].count()[0]) print("No. Of cars sold on the 1st day the ad was published : ", df_auto[df_auto.Sold_In_Days == 1].count()[0]) print("No. Of cars sold on the 2nd day the ad was published : ", df_auto[df_auto.Sold_In_Days == 2].count()[0]) df_auto.head() # Investigating the count of individual Categorical Features for col in df_auto.select_dtypes(include=['object']).columns: print(col, len(df_auto[col].unique())) # Interactive Distribution of Horsepower with Price # Visualization possible among year/month/days/gearbox/damage def
(year, month, days, gearbox, damage): data = df_auto[(df_auto.yearOfRegistration == year) & (df_auto.monthOfRegistration == month) & (df_auto.Sold_In_Days == days) & (df_auto.gearbox == gearbox) & (df_auto.notRepairedDamage == damage)] area = 2 * df_auto.powerPS data.plot.scatter('powerPS', 'price', s = area, linewidth = 1, edgecolor='k', figsize=(12,8), alpha=0.7) plt.xlabel('Horse Power') plt.ylabel('Price') title = 'Variation of Price with Horse Power in ' + str(year) plt.title(title) interact(plot_year, year=widgets.IntSlider(min=1989, max=2019, step=1, value=2003, description='Year '), month=widgets.IntSlider(min=1, max=12, step=1, value=2, description='Month '), days=widgets.IntSlider(min=0, max=10, step=1, value=0, description='Day '), gearbox = widgets.RadioButtons(value='manuell', options=list(df_auto.gearbox.unique()), description="Gear Type "), damage = widgets.RadioButtons(value='no', options=list(df_auto.notRepairedDamage.unique()), description="Repaired ")) df_auto.head() # Importing Necessary Libraries from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split X = df_auto.drop(['price'], axis=1) y = df_auto.price #X = pd.get_dummies(data=X,columns= ['vehicleType','yearOfRegistration','gearbox','monthOfRegistration','fuelType','brand','notRepairedDamage'],drop_first=True) X = pd.get_dummies(data=X,columns= ['vehicleType','gearbox','fuelType', 'brand','notRepairedDamage']) X.to_csv(r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\X_dummy.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) rf = RandomForestRegressor(n_estimators = 50) rf.fit(X_train, y_train) score = rf.score(X_test, y_test) print('Accuracy Of Random Forest: ', score) pred=rf.predict(X_test) print(pred) from sklearn.metrics import mean_squared_error from math import sqrt rmse = sqrt(mean_squared_error(pred,y_test)) print("RMSE of model is : ",rmse) import joblib filename = r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\persis model\finalized_model.sav' joblib.dump(rf, filename) from IPython.display import Image from sklearn import tree import pydotplus # Visualize data data_feature_names=['vehicleType','price','yearOfRegistration','gearbox','powerPS','kilometer','Sold_In_Days','monthOfRegistration','fuelType','brand','notRepairedDamage'] rf_small = RandomForestRegressor(n_estimators=10, max_depth = 3) rf_small.fit(X_train, y_train) tree_small = rf_small.estimators_[5] dot_data = tree.export_graphviz(tree_small, feature_names=data_feature_names, out_file=None, filled=True, rounded=True) graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png())
plot_year
identifier_name
AutomobilesOnSale.py
# Importing Necessary Library import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import chi2_contingency from datetime import datetime from ipywidgets import interact, widgets get_ipython().run_line_magic('matplotlib', 'inline') sns.set_style('darkgrid') # Importing Data from autos.csv file df_auto = pd.read_csv(r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\Dataset\autos.csv', encoding='latin-1') # Investigating the Dimension of Extracted Observations df_auto.shape # Checking null values in Columns df_auto.isnull().sum() # Defining a function to fill the null values with their max value counts of individual's column def impute_missing_values(parameter): df_auto[parameter] = df_auto[parameter].fillna(df_auto[parameter].value_counts().index[0]) # Missing value imputation using "impute_missing_values" function impute_missing_values('vehicleType') impute_missing_values('gearbox') impute_missing_values('model') impute_missing_values('fuelType') impute_missing_values('notRepairedDamage') # Checking all the columns of dataset df_auto.columns # Dropping duplicate observation from dataset df_auto = df_auto.drop_duplicates() # Checking shape of datset df_auto.shape # Converting German word "ja" to 'yes' & "nein" to 'no' in English for better understanding df_auto['notRepairedDamage'] = df_auto['notRepairedDamage'].map({'ja':'yes', 'nein':'no'}) # Investigating number of cars available for sale print('Number of cars available in dataset : ', df_auto['name'].nunique()) # Dropping the feature : name as it is unnecessary while building model df_auto = df_auto.drop(['name'], axis=1) # Investigating overall structure of feature : monthOfRegistration df_auto.monthOfRegistration.describe() # As there are 12 months in Calendar, 13 months can't be right. Removing observations of month = 12, contains ~ 12k observation df_auto = df_auto[df_auto.monthOfRegistration != 12] # Univariate Analysis of : Sellers sns.barplot(df_auto.seller.value_counts().index, df_auto.seller.value_counts().values, alpha=0.9) plt.xlabel('Sellers') plt.ylabel('Count') plt.title('Distribution Of Car Sellers'); # As almost all of the Sellers are from private we can drop this feature df_auto = df_auto.drop(['seller'], axis=1) # Univariate Analysis of : Offer Type sns.barplot(df_auto.offerType.value_counts().index, df_auto.offerType.value_counts().values, alpha=0.9) plt.xlabel('Offer Type') plt.ylabel('Count') plt.title('Distribution Of Car Offers'); # As almost all of the Offers are from Angebot we can drop this feature df_auto = df_auto.drop(['offerType'], axis=1) print('Number of observation where price is 0 : ', df_auto[df_auto.price == 0]['price'].count()) # Number of observation where price is > 200000 df_auto[df_auto.price > 200000]['price'].count() # Number of observation where price is < 200 df_auto[df_auto.price < 200]['price'].count() # Considering outlier, selecting observations in between $200 & $200000 df_auto = df_auto[(df_auto.price > 200) & (df_auto.price < 200000)] # Distribution of Price sns.distplot(df_auto.price) plt.xlabel("Price") plt.ylabel('Frequency') plt.title("Distribution of Car's Price"); # Logarithm of Price Distribution sns.distplot(np.log(df_auto.price)) plt.xlabel("Logarithm of Car's Price") plt.ylabel('Frequency') plt.title("Distribution Log of Car's Price"); # Univariate Analysis of : AB Testing sns.barplot(df_auto.abtest.value_counts().index, df_auto.abtest.value_counts().values, alpha=0.9) plt.xlabel('Type of Testing') plt.ylabel('Count') plt.title('Distribution Of Car Testing'); # Univariate Analysis of : Vehicle Type plt.figure(figsize=(12,6)) sns.barplot(df_auto.vehicleType.value_counts().index, df_auto.vehicleType.value_counts().values, alpha=0.9) plt.xlabel('Type of Vehicle') plt.ylabel('Count') plt.title('Distribution Of Vehicle Types'); # Univariate Analysis of : Gear Type sns.barplot(df_auto.gearbox.value_counts().index, df_auto.gearbox.value_counts().values, alpha=0.9) plt.xlabel('Type of Gears') plt.ylabel('Count') plt.title('Distribution Of Types of Gears'); print('No of PowerPS is having value of 0 : ', df_auto[df_auto.powerPS == 0]['powerPS'].count()) print('No of PowerPS is having value of more than 662 is : ', df_auto[df_auto.powerPS > 662]['powerPS'].count()) # Removng cars having HP of 662 as the latest technology doesn't have HP > 662 # Removing observations having HP of 0 - as its meaningless df_auto = df_auto[(df_auto.powerPS > 0) & (df_auto.powerPS < 663)] # Distribution of Top 10 Horse Powered car sold plt.figure(figsize=(16,6)) sns.lineplot(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index, df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].values) plt.xticks(df_auto[df_auto.powerPS > 0].powerPS.value_counts()[:10].index) plt.xlabel('Horse Power') plt.ylabel('No. of Car Sold With Available Horse Power') plt.title('Top 10 Car Sold with Horse Power Variation'); # Distribution of Top 10 car's moel sold sns.lineplot(df_auto.model.value_counts()[:10].index, df_auto.model.value_counts()[:10].values) plt.xticks(df_auto.model.value_counts()[:10].index) plt.xlabel('Cars Model') plt.ylabel('Frequency') plt.title('Top 10 Cars Model Sold'); # Ditribution of Mesurement of KM a car ran before coming for sale plt.figure(figsize=(12,6)) sns.distplot(df_auto.kilometer) plt.xlabel("KM's Car Ran") plt.ylabel('Frequency') plt.title('Car was Driven in KM'); # No. of car registerd in a month for sale plt.figure(figsize=(12,6)) sns.lineplot(df_auto.monthOfRegistration.value_counts().index, df_auto.monthOfRegistration.value_counts().values) plt.xticks(df_auto.monthOfRegistration.value_counts().index.sort_values(), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) plt.xlabel("Month Of Registration") plt.ylabel('Frequency') plt.title('No. Of Cars Sold In Month'); # Univariate Analysis of : fuel Type plt.figure(figsize=(12,6)) sns.barplot(df_auto.fuelType.value_counts().index, df_auto.fuelType.value_counts().values, alpha=0.9) plt.xlabel('Types of Fuel') plt.ylabel('Frequency') plt.title('Distribution Of Car with Types of Fuel'); # Univariate Analysis of : Top 10 Car's Brand plt.figure(figsize=(12,6)) sns.barplot(df_auto.brand.value_counts()[:10].index, df_auto.brand.value_counts()[:10].values, alpha=0.9) plt.xlabel("Car's Brand") plt.ylabel('Frequency') plt.title("Top 10 Car's Brand Sold"); # Univariate Analysis of : Car was Repaired: yes/no before sale sns.barplot(df_auto.notRepairedDamage.value_counts().index, df_auto.notRepairedDamage.value_counts().values, alpha=0.9) plt.xlabel('Repaired Post Damage') plt.ylabel('Frequency') plt.title('Distribution Of Car Not Repaired Damaged'); # Investigating overall structure of feature : yearOfRegistration df_auto.yearOfRegistration.describe() # Observation which is older than 1989 df_auto[df_auto.yearOfRegistration < 1989]['yearOfRegistration'].count() # Observation which is more than 2019 df_auto[df_auto.yearOfRegistration > 2019]['yearOfRegistration'].count() # Taking into considearion which is in the year of between 1989 & 2019 df_auto = df_auto[(df_auto.yearOfRegistration >= 1989) & (df_auto.yearOfRegistration <= 2019)] # No of car was registered for sale throughout the year sns.lineplot(df_auto.groupby('yearOfRegistration')['price'].count().index, df_auto.groupby('yearOfRegistration')['price'].count().values, data=df_auto) plt.xlabel('Years of Registration') plt.ylabel('Price') plt.title('Variation Of Price with Year'); # No of days it took to sold while purchasing from E-bay days = [] for time1, time2 in zip(df_auto['dateCrawled'], df_auto['lastSeen']): time = datetime.strptime(time2, '%Y-%m-%d %H:%M:%S') - datetime.strptime(time1, '%Y-%m-%d %H:%M:%S') days.append(time.days) df_auto['Sold_In_Days'] = days # Investigating the feature : Sold_In_Days df_auto.Sold_In_Days.describe() # Removing the observations having negative values as it doesn't make any sense df_auto = df_auto[df_auto.Sold_In_Days >= 0] # Distribution of no. of cars sold in days plt.figure(figsize=(12,6)) sns.barplot(df_auto.Sold_In_Days.value_counts().index, df_auto.Sold_In_Days.value_counts().values, alpha=0.9) plt.xlabel('Sold In Days') plt.ylabel('Frequency') plt.title('No. Of Cars Sold in Days'); # Dropping the below mentioned features as they are unnecesary now while building models # All the postal code is from Germany only df_auto = df_auto.drop(['dateCrawled', 'lastSeen', 'dateCreated', 'nrOfPictures', 'model', 'abtest', 'postalCode'], axis=1) # Corelation matrix with Heatmap annotation sns.heatmap(df_auto.corr(), annot=True); # Function to get the Chi Square value & P value def chi_p_value(cat1, cat2): table = pd.crosstab(df_auto[cat1], df_auto[cat2]) chi2, p, dof, expected = chi2_contingency(table.values) if p < 0.05: print("Chi Square Statistics and p value of {} and {} is {}, {}".format(cat1, cat2, chi2, p)) # Extracting Chi Square value & p value for i in range(len(df_auto.select_dtypes(include=['object']).columns)): for cat2 in df_auto.select_dtypes(include=['object']).columns[df_auto.select_dtypes(include=['object']).columns != df_auto.select_dtypes(include=['object']).columns[i]]: chi_p_value(df_auto.select_dtypes(include=['object']).columns[i], cat2) # Taking into consideration of Sold_In_Dyas which is <= 5 days for bi-variate analysis # It will give us the top most sold cars in first consecutive 5 days df_auto_sold = df_auto[df_auto.Sold_In_Days < 5] # Function to visualize bivariate analysis def bivariate_analysis(param, xlabel): df_auto_sold.groupby([param, 'Sold_In_Days'])['price'].count().unstack().plot(kind='bar') plt.xticks(rotation=360) plt.xlabel(xlabel) plt.ylabel('Price') plt.title('Price Distribution of ' + xlabel + ' Sold within 0-4 days'); bivariate_analysis('vehicleType', 'Types Of Vehicle') bivariate_analysis('gearbox', 'Types Of Gear') bivariate_analysis('fuelType', 'Types Of Fuel') print("No. Of cars sold on the day the ad was published : ", df_auto[df_auto.Sold_In_Days == 0].count()[0]) print("No. Of cars sold on the 1st day the ad was published : ", df_auto[df_auto.Sold_In_Days == 1].count()[0]) print("No. Of cars sold on the 2nd day the ad was published : ", df_auto[df_auto.Sold_In_Days == 2].count()[0]) df_auto.head() # Investigating the count of individual Categorical Features for col in df_auto.select_dtypes(include=['object']).columns:
# Interactive Distribution of Horsepower with Price # Visualization possible among year/month/days/gearbox/damage def plot_year(year, month, days, gearbox, damage): data = df_auto[(df_auto.yearOfRegistration == year) & (df_auto.monthOfRegistration == month) & (df_auto.Sold_In_Days == days) & (df_auto.gearbox == gearbox) & (df_auto.notRepairedDamage == damage)] area = 2 * df_auto.powerPS data.plot.scatter('powerPS', 'price', s = area, linewidth = 1, edgecolor='k', figsize=(12,8), alpha=0.7) plt.xlabel('Horse Power') plt.ylabel('Price') title = 'Variation of Price with Horse Power in ' + str(year) plt.title(title) interact(plot_year, year=widgets.IntSlider(min=1989, max=2019, step=1, value=2003, description='Year '), month=widgets.IntSlider(min=1, max=12, step=1, value=2, description='Month '), days=widgets.IntSlider(min=0, max=10, step=1, value=0, description='Day '), gearbox = widgets.RadioButtons(value='manuell', options=list(df_auto.gearbox.unique()), description="Gear Type "), damage = widgets.RadioButtons(value='no', options=list(df_auto.notRepairedDamage.unique()), description="Repaired ")) df_auto.head() # Importing Necessary Libraries from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split X = df_auto.drop(['price'], axis=1) y = df_auto.price #X = pd.get_dummies(data=X,columns= ['vehicleType','yearOfRegistration','gearbox','monthOfRegistration','fuelType','brand','notRepairedDamage'],drop_first=True) X = pd.get_dummies(data=X,columns= ['vehicleType','gearbox','fuelType', 'brand','notRepairedDamage']) X.to_csv(r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\X_dummy.csv') X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) rf = RandomForestRegressor(n_estimators = 50) rf.fit(X_train, y_train) score = rf.score(X_test, y_test) print('Accuracy Of Random Forest: ', score) pred=rf.predict(X_test) print(pred) from sklearn.metrics import mean_squared_error from math import sqrt rmse = sqrt(mean_squared_error(pred,y_test)) print("RMSE of model is : ",rmse) import joblib filename = r'C:\Users\Ad\Desktop\bros f proj\Automobiles_Capstone_Project-master\persis model\finalized_model.sav' joblib.dump(rf, filename) from IPython.display import Image from sklearn import tree import pydotplus # Visualize data data_feature_names=['vehicleType','price','yearOfRegistration','gearbox','powerPS','kilometer','Sold_In_Days','monthOfRegistration','fuelType','brand','notRepairedDamage'] rf_small = RandomForestRegressor(n_estimators=10, max_depth = 3) rf_small.fit(X_train, y_train) tree_small = rf_small.estimators_[5] dot_data = tree.export_graphviz(tree_small, feature_names=data_feature_names, out_file=None, filled=True, rounded=True) graph = pydotplus.graph_from_dot_data(dot_data) Image(graph.create_png())
print(col, len(df_auto[col].unique()))
conditional_block
metapipeline.go
package metapipeline import ( "fmt" "path/filepath" jenkinsv1 "github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1" "github.com/jenkins-x/jx/pkg/apps" "github.com/jenkins-x/jx/pkg/client/clientset/versioned" "github.com/jenkins-x/jx/pkg/gits" "github.com/jenkins-x/jx/pkg/kube" "github.com/jenkins-x/jx/pkg/prow" "github.com/jenkins-x/jx/pkg/tekton" "github.com/jenkins-x/jx/pkg/tekton/syntax" "github.com/jenkins-x/jx/pkg/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/jenkins-x/jx/pkg/log" "github.com/pkg/errors" pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" corev1 "k8s.io/api/core/v1" ) const ( appExtensionStageName = "app-extension" // mergePullRefsStepName is the meta pipeline step name for merging all pull refs into the workspace mergePullRefsStepName = "merge-pull-refs" // createEffectivePipelineStepName is the meta pipeline step name for the generation of the effective jenkins-x pipeline config createEffectivePipelineStepName = "create-effective-pipeline" // createTektonCRDsStepName is the meta pipeline step name for the Tekton CRD creation createTektonCRDsStepName = "create-tekton-crds" tektonBaseDir = "/workspace" ) // CRDCreationParameters are the parameters needed to create the Tekton CRDs type CRDCreationParameters struct { Namespace string Context string PipelineName string ResourceName string PipelineKind string BuildNumber string GitInfo gits.GitRepository BranchIdentifier string PullRef prow.PullRefs SourceDir string PodTemplates map[string]*corev1.Pod ServiceAccount string Labels []string EnvVars []string DefaultImage string Apps []jenkinsv1.App VersionsDir string } // CreateMetaPipelineCRDs creates the Tekton CRDs needed to execute the meta pipeline. // The meta pipeline is responsible to checkout the source repository at the right revision, allows Jenkins-X Apps // to modify the pipeline (via modifying the configuration on the file system) and finally triggering the actual // pipeline build. // An error is returned in case the creation of the Tekton CRDs fails. func CreateMetaPipelineCRDs(params CRDCreationParameters) (*tekton.CRDWrapper, error) { parsedPipeline, err := createPipeline(params) if err != nil { return nil, err } labels, err := buildLabels(params) if err != nil { return nil, err } pipeline, tasks, structure, err := parsedPipeline.GenerateCRDs(params.PipelineName, params.BuildNumber, params.ResourceName, params.Namespace, params.PodTemplates, params.VersionsDir, nil, params.SourceDir, labels, params.DefaultImage) if err != nil { return nil, err } revision := params.PullRef.BaseSha if revision == "" { revision = params.PullRef.BaseBranch } resources := []*pipelineapi.PipelineResource{tekton.GenerateSourceRepoResource(params.ResourceName, &params.GitInfo, revision)} run := tekton.CreatePipelineRun(resources, pipeline.Name, pipeline.APIVersion, labels, params.ServiceAccount, nil, nil, nil) tektonCRDs, err := tekton.NewCRDWrapper(pipeline, tasks, resources, structure, run) if err != nil { return nil, err } return tektonCRDs, nil } // GetExtendingApps returns the list of apps which are installed in the cluster registered for extending the pipeline. // An app registers its interest in extending the pipeline by having the 'pipeline-extension' label set. func GetExtendingApps(jxClient versioned.Interface, namespace string) ([]jenkinsv1.App, error) { listOptions := metav1.ListOptions{} listOptions.LabelSelector = fmt.Sprintf(apps.AppTypeLabel+" in (%s)", apps.PipelineExtension) appsList, err := jxClient.JenkinsV1().Apps(namespace).List(listOptions) if err != nil { return nil, errors.Wrap(err, "error retrieving pipeline contributor apps") } return appsList.Items, nil } // createPipeline builds the parsed/typed pipeline which servers as input for the Tekton CRD creation. func createPipeline(params CRDCreationParameters) (*syntax.ParsedPipeline, error) { steps, err := buildSteps(params) if err != nil { return nil, errors.Wrap(err, "unable to create app extending pipeline steps") } stage := syntax.Stage{ Name: appExtensionStageName, Steps: steps, Agent: &syntax.Agent{ Image: determineDefaultStepImage(params.DefaultImage), }, } parsedPipeline := &syntax.ParsedPipeline{ Stages: []syntax.Stage{stage}, } env := buildEnvParams(params) parsedPipeline.AddContainerEnvVarsToPipeline(env) return parsedPipeline, nil } // buildSteps builds the meta pipeline steps. // The tasks of the meta pipeline are: // 1) make sure the right commits are merged // 2) create the effective pipeline and write it to disk // 3) one step for each extending app // 4) create Tekton CRDs for the meta pipeline func
(params CRDCreationParameters) ([]syntax.Step, error) { var steps []syntax.Step // 1) step := stepMergePullRefs(params.PullRef) steps = append(steps, step) // 2) step = stepEffectivePipeline(params) steps = append(steps, step) log.Logger().Debugf("creating pipeline steps for extending apps") // 3) for _, app := range params.Apps { if app.Spec.PipelineExtension == nil { log.Logger().Warnf("Skipping app %s in meta pipeline. It contains label %s with value %s, but does not contain PipelineExtension fields.", app.Name, apps.AppTypeLabel, apps.PipelineExtension) continue } extension := app.Spec.PipelineExtension step := syntax.Step{ Name: extension.Name, Image: extension.Image, Command: extension.Command, Arguments: extension.Args, } log.Logger().Debugf("App %s contributes with step %s", app.Name, util.PrettyPrint(step)) steps = append(steps, step) } // 4) step = stepCreateTektonCRDs(params) steps = append(steps, step) return steps, nil } func stepMergePullRefs(pullRefs prow.PullRefs) syntax.Step { // we only need to run the merge step in case there is anything to merge // Tekton has at this stage the base branch already checked out if len(pullRefs.ToMerge) == 0 { return stepSkip(mergePullRefsStepName, "Nothing to merge") } args := []string{"--verbose", "--baseBranch", pullRefs.BaseBranch, "--baseSHA", pullRefs.BaseSha} for _, mergeSha := range pullRefs.ToMerge { args = append(args, "--sha", mergeSha) } step := syntax.Step{ Name: mergePullRefsStepName, Comment: "Pipeline step merging pull refs", Command: "jx step git merge", Arguments: args, } return step } func stepEffectivePipeline(params CRDCreationParameters) syntax.Step { args := []string{"--output-dir", "."} if params.Context != "" { args = append(args, "--context", params.Context) } step := syntax.Step{ Name: createEffectivePipelineStepName, Comment: "Pipeline step creating the effective pipeline configuration", Command: "jx step syntax effective", Arguments: args, } return step } func stepCreateTektonCRDs(params CRDCreationParameters) syntax.Step { args := []string{"--clone-dir", filepath.Join(tektonBaseDir, params.SourceDir)} args = append(args, "--kind", params.PipelineKind) for prID := range params.PullRef.ToMerge { args = append(args, "--pr-number", prID) // there might be a batch build building multiple PRs, in which case we just use the first in this case break } args = append(args, "--service-account", params.ServiceAccount) args = append(args, "--source", params.SourceDir) args = append(args, "--branch", params.BranchIdentifier) args = append(args, "--build-number", params.BuildNumber) if params.Context != "" { args = append(args, "--context", params.Context) } for _, l := range params.Labels { args = append(args, "--label", l) } for _, e := range params.EnvVars { args = append(args, "--env", e) } step := syntax.Step{ Name: createTektonCRDsStepName, Comment: "Pipeline step to create the Tekton CRDs for the actual pipeline run", Command: "jx step create task", Arguments: args, } return step } func stepSkip(stepName string, msg string) syntax.Step { skipMsg := fmt.Sprintf("SKIP %s: %s", stepName, msg) step := syntax.Step{ Name: stepName, Comment: skipMsg, Command: "echo", Arguments: []string{fmt.Sprintf("'%s'", skipMsg)}, } return step } func determineDefaultStepImage(defaultImage string) string { if defaultImage != "" { return defaultImage } return syntax.DefaultContainerImage } func buildEnvParams(params CRDCreationParameters) []corev1.EnvVar { var envVars []corev1.EnvVar envVars = append(envVars, corev1.EnvVar{ Name: "JX_LOG_FORMAT", Value: "json", }) envVars = append(envVars, corev1.EnvVar{ Name: "BUILD_NUMBER", Value: params.BuildNumber, }) envVars = append(envVars, corev1.EnvVar{ Name: "PIPELINE_KIND", Value: params.PipelineKind, }) envVars = append(envVars, corev1.EnvVar{ Name: "PULL_REFS", Value: params.PullRef.String(), }) context := params.Context if context != "" { envVars = append(envVars, corev1.EnvVar{ Name: "PIPELINE_CONTEXT", Value: context, }) } gitInfo := params.GitInfo envVars = append(envVars, corev1.EnvVar{ Name: "SOURCE_URL", Value: gitInfo.URL, }) owner := gitInfo.Organisation if owner != "" { envVars = append(envVars, corev1.EnvVar{ Name: "REPO_OWNER", Value: owner, }) } repo := gitInfo.Name if repo != "" { envVars = append(envVars, corev1.EnvVar{ Name: "REPO_NAME", Value: repo, }) // lets keep the APP_NAME environment variable we need for previews envVars = append(envVars, corev1.EnvVar{ Name: "APP_NAME", Value: repo, }) } branch := params.BranchIdentifier if branch != "" { if kube.GetSliceEnvVar(envVars, "BRANCH_NAME") == nil { envVars = append(envVars, corev1.EnvVar{ Name: "BRANCH_NAME", Value: branch, }) } } if owner != "" && repo != "" && branch != "" { jobName := fmt.Sprintf("%s/%s/%s", owner, repo, branch) if kube.GetSliceEnvVar(envVars, "JOB_NAME") == nil { envVars = append(envVars, corev1.EnvVar{ Name: "JOB_NAME", Value: jobName, }) } } envVars = append(envVars, buildEnvVars(params)...) log.Logger().Debugf("step environment variables: %s", util.PrettyPrint(envVars)) return envVars } // TODO: Merge this with step_create_task's setBuildValues equivalent somewhere. func buildLabels(params CRDCreationParameters) (map[string]string, error) { labels := map[string]string{} labels[tekton.LabelOwner] = params.GitInfo.Organisation labels[tekton.LabelRepo] = params.GitInfo.Name labels[tekton.LabelBranch] = params.BranchIdentifier if params.Context != "" { labels[tekton.LabelContext] = params.Context } labels[tekton.LabelBuild] = params.BuildNumber // add any custom labels customLabels, err := util.ExtractKeyValuePairs(params.Labels, "=") if err != nil { return nil, err } return util.MergeMaps(labels, customLabels), nil } func buildEnvVars(params CRDCreationParameters) []corev1.EnvVar { var envVars []corev1.EnvVar vars, _ := util.ExtractKeyValuePairs(params.EnvVars, "=") for key, value := range vars { envVars = append(envVars, corev1.EnvVar{ Name: key, Value: value, }) } return envVars }
buildSteps
identifier_name
metapipeline.go
package metapipeline import ( "fmt" "path/filepath" jenkinsv1 "github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1" "github.com/jenkins-x/jx/pkg/apps" "github.com/jenkins-x/jx/pkg/client/clientset/versioned" "github.com/jenkins-x/jx/pkg/gits" "github.com/jenkins-x/jx/pkg/kube" "github.com/jenkins-x/jx/pkg/prow" "github.com/jenkins-x/jx/pkg/tekton" "github.com/jenkins-x/jx/pkg/tekton/syntax" "github.com/jenkins-x/jx/pkg/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/jenkins-x/jx/pkg/log" "github.com/pkg/errors" pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" corev1 "k8s.io/api/core/v1" ) const ( appExtensionStageName = "app-extension" // mergePullRefsStepName is the meta pipeline step name for merging all pull refs into the workspace mergePullRefsStepName = "merge-pull-refs" // createEffectivePipelineStepName is the meta pipeline step name for the generation of the effective jenkins-x pipeline config createEffectivePipelineStepName = "create-effective-pipeline" // createTektonCRDsStepName is the meta pipeline step name for the Tekton CRD creation createTektonCRDsStepName = "create-tekton-crds" tektonBaseDir = "/workspace" ) // CRDCreationParameters are the parameters needed to create the Tekton CRDs type CRDCreationParameters struct { Namespace string Context string PipelineName string ResourceName string PipelineKind string BuildNumber string GitInfo gits.GitRepository BranchIdentifier string PullRef prow.PullRefs SourceDir string PodTemplates map[string]*corev1.Pod ServiceAccount string Labels []string EnvVars []string DefaultImage string Apps []jenkinsv1.App VersionsDir string } // CreateMetaPipelineCRDs creates the Tekton CRDs needed to execute the meta pipeline. // The meta pipeline is responsible to checkout the source repository at the right revision, allows Jenkins-X Apps // to modify the pipeline (via modifying the configuration on the file system) and finally triggering the actual // pipeline build. // An error is returned in case the creation of the Tekton CRDs fails. func CreateMetaPipelineCRDs(params CRDCreationParameters) (*tekton.CRDWrapper, error) { parsedPipeline, err := createPipeline(params) if err != nil { return nil, err } labels, err := buildLabels(params) if err != nil { return nil, err } pipeline, tasks, structure, err := parsedPipeline.GenerateCRDs(params.PipelineName, params.BuildNumber, params.ResourceName, params.Namespace, params.PodTemplates, params.VersionsDir, nil, params.SourceDir, labels, params.DefaultImage) if err != nil { return nil, err } revision := params.PullRef.BaseSha if revision == "" { revision = params.PullRef.BaseBranch } resources := []*pipelineapi.PipelineResource{tekton.GenerateSourceRepoResource(params.ResourceName, &params.GitInfo, revision)} run := tekton.CreatePipelineRun(resources, pipeline.Name, pipeline.APIVersion, labels, params.ServiceAccount, nil, nil, nil) tektonCRDs, err := tekton.NewCRDWrapper(pipeline, tasks, resources, structure, run) if err != nil { return nil, err } return tektonCRDs, nil } // GetExtendingApps returns the list of apps which are installed in the cluster registered for extending the pipeline. // An app registers its interest in extending the pipeline by having the 'pipeline-extension' label set. func GetExtendingApps(jxClient versioned.Interface, namespace string) ([]jenkinsv1.App, error) { listOptions := metav1.ListOptions{} listOptions.LabelSelector = fmt.Sprintf(apps.AppTypeLabel+" in (%s)", apps.PipelineExtension) appsList, err := jxClient.JenkinsV1().Apps(namespace).List(listOptions) if err != nil { return nil, errors.Wrap(err, "error retrieving pipeline contributor apps") } return appsList.Items, nil } // createPipeline builds the parsed/typed pipeline which servers as input for the Tekton CRD creation. func createPipeline(params CRDCreationParameters) (*syntax.ParsedPipeline, error)
// buildSteps builds the meta pipeline steps. // The tasks of the meta pipeline are: // 1) make sure the right commits are merged // 2) create the effective pipeline and write it to disk // 3) one step for each extending app // 4) create Tekton CRDs for the meta pipeline func buildSteps(params CRDCreationParameters) ([]syntax.Step, error) { var steps []syntax.Step // 1) step := stepMergePullRefs(params.PullRef) steps = append(steps, step) // 2) step = stepEffectivePipeline(params) steps = append(steps, step) log.Logger().Debugf("creating pipeline steps for extending apps") // 3) for _, app := range params.Apps { if app.Spec.PipelineExtension == nil { log.Logger().Warnf("Skipping app %s in meta pipeline. It contains label %s with value %s, but does not contain PipelineExtension fields.", app.Name, apps.AppTypeLabel, apps.PipelineExtension) continue } extension := app.Spec.PipelineExtension step := syntax.Step{ Name: extension.Name, Image: extension.Image, Command: extension.Command, Arguments: extension.Args, } log.Logger().Debugf("App %s contributes with step %s", app.Name, util.PrettyPrint(step)) steps = append(steps, step) } // 4) step = stepCreateTektonCRDs(params) steps = append(steps, step) return steps, nil } func stepMergePullRefs(pullRefs prow.PullRefs) syntax.Step { // we only need to run the merge step in case there is anything to merge // Tekton has at this stage the base branch already checked out if len(pullRefs.ToMerge) == 0 { return stepSkip(mergePullRefsStepName, "Nothing to merge") } args := []string{"--verbose", "--baseBranch", pullRefs.BaseBranch, "--baseSHA", pullRefs.BaseSha} for _, mergeSha := range pullRefs.ToMerge { args = append(args, "--sha", mergeSha) } step := syntax.Step{ Name: mergePullRefsStepName, Comment: "Pipeline step merging pull refs", Command: "jx step git merge", Arguments: args, } return step } func stepEffectivePipeline(params CRDCreationParameters) syntax.Step { args := []string{"--output-dir", "."} if params.Context != "" { args = append(args, "--context", params.Context) } step := syntax.Step{ Name: createEffectivePipelineStepName, Comment: "Pipeline step creating the effective pipeline configuration", Command: "jx step syntax effective", Arguments: args, } return step } func stepCreateTektonCRDs(params CRDCreationParameters) syntax.Step { args := []string{"--clone-dir", filepath.Join(tektonBaseDir, params.SourceDir)} args = append(args, "--kind", params.PipelineKind) for prID := range params.PullRef.ToMerge { args = append(args, "--pr-number", prID) // there might be a batch build building multiple PRs, in which case we just use the first in this case break } args = append(args, "--service-account", params.ServiceAccount) args = append(args, "--source", params.SourceDir) args = append(args, "--branch", params.BranchIdentifier) args = append(args, "--build-number", params.BuildNumber) if params.Context != "" { args = append(args, "--context", params.Context) } for _, l := range params.Labels { args = append(args, "--label", l) } for _, e := range params.EnvVars { args = append(args, "--env", e) } step := syntax.Step{ Name: createTektonCRDsStepName, Comment: "Pipeline step to create the Tekton CRDs for the actual pipeline run", Command: "jx step create task", Arguments: args, } return step } func stepSkip(stepName string, msg string) syntax.Step { skipMsg := fmt.Sprintf("SKIP %s: %s", stepName, msg) step := syntax.Step{ Name: stepName, Comment: skipMsg, Command: "echo", Arguments: []string{fmt.Sprintf("'%s'", skipMsg)}, } return step } func determineDefaultStepImage(defaultImage string) string { if defaultImage != "" { return defaultImage } return syntax.DefaultContainerImage } func buildEnvParams(params CRDCreationParameters) []corev1.EnvVar { var envVars []corev1.EnvVar envVars = append(envVars, corev1.EnvVar{ Name: "JX_LOG_FORMAT", Value: "json", }) envVars = append(envVars, corev1.EnvVar{ Name: "BUILD_NUMBER", Value: params.BuildNumber, }) envVars = append(envVars, corev1.EnvVar{ Name: "PIPELINE_KIND", Value: params.PipelineKind, }) envVars = append(envVars, corev1.EnvVar{ Name: "PULL_REFS", Value: params.PullRef.String(), }) context := params.Context if context != "" { envVars = append(envVars, corev1.EnvVar{ Name: "PIPELINE_CONTEXT", Value: context, }) } gitInfo := params.GitInfo envVars = append(envVars, corev1.EnvVar{ Name: "SOURCE_URL", Value: gitInfo.URL, }) owner := gitInfo.Organisation if owner != "" { envVars = append(envVars, corev1.EnvVar{ Name: "REPO_OWNER", Value: owner, }) } repo := gitInfo.Name if repo != "" { envVars = append(envVars, corev1.EnvVar{ Name: "REPO_NAME", Value: repo, }) // lets keep the APP_NAME environment variable we need for previews envVars = append(envVars, corev1.EnvVar{ Name: "APP_NAME", Value: repo, }) } branch := params.BranchIdentifier if branch != "" { if kube.GetSliceEnvVar(envVars, "BRANCH_NAME") == nil { envVars = append(envVars, corev1.EnvVar{ Name: "BRANCH_NAME", Value: branch, }) } } if owner != "" && repo != "" && branch != "" { jobName := fmt.Sprintf("%s/%s/%s", owner, repo, branch) if kube.GetSliceEnvVar(envVars, "JOB_NAME") == nil { envVars = append(envVars, corev1.EnvVar{ Name: "JOB_NAME", Value: jobName, }) } } envVars = append(envVars, buildEnvVars(params)...) log.Logger().Debugf("step environment variables: %s", util.PrettyPrint(envVars)) return envVars } // TODO: Merge this with step_create_task's setBuildValues equivalent somewhere. func buildLabels(params CRDCreationParameters) (map[string]string, error) { labels := map[string]string{} labels[tekton.LabelOwner] = params.GitInfo.Organisation labels[tekton.LabelRepo] = params.GitInfo.Name labels[tekton.LabelBranch] = params.BranchIdentifier if params.Context != "" { labels[tekton.LabelContext] = params.Context } labels[tekton.LabelBuild] = params.BuildNumber // add any custom labels customLabels, err := util.ExtractKeyValuePairs(params.Labels, "=") if err != nil { return nil, err } return util.MergeMaps(labels, customLabels), nil } func buildEnvVars(params CRDCreationParameters) []corev1.EnvVar { var envVars []corev1.EnvVar vars, _ := util.ExtractKeyValuePairs(params.EnvVars, "=") for key, value := range vars { envVars = append(envVars, corev1.EnvVar{ Name: key, Value: value, }) } return envVars }
{ steps, err := buildSteps(params) if err != nil { return nil, errors.Wrap(err, "unable to create app extending pipeline steps") } stage := syntax.Stage{ Name: appExtensionStageName, Steps: steps, Agent: &syntax.Agent{ Image: determineDefaultStepImage(params.DefaultImage), }, } parsedPipeline := &syntax.ParsedPipeline{ Stages: []syntax.Stage{stage}, } env := buildEnvParams(params) parsedPipeline.AddContainerEnvVarsToPipeline(env) return parsedPipeline, nil }
identifier_body
metapipeline.go
package metapipeline import ( "fmt" "path/filepath" jenkinsv1 "github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1" "github.com/jenkins-x/jx/pkg/apps" "github.com/jenkins-x/jx/pkg/client/clientset/versioned" "github.com/jenkins-x/jx/pkg/gits" "github.com/jenkins-x/jx/pkg/kube" "github.com/jenkins-x/jx/pkg/prow" "github.com/jenkins-x/jx/pkg/tekton" "github.com/jenkins-x/jx/pkg/tekton/syntax" "github.com/jenkins-x/jx/pkg/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/jenkins-x/jx/pkg/log" "github.com/pkg/errors" pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" corev1 "k8s.io/api/core/v1" ) const ( appExtensionStageName = "app-extension" // mergePullRefsStepName is the meta pipeline step name for merging all pull refs into the workspace mergePullRefsStepName = "merge-pull-refs" // createEffectivePipelineStepName is the meta pipeline step name for the generation of the effective jenkins-x pipeline config createEffectivePipelineStepName = "create-effective-pipeline" // createTektonCRDsStepName is the meta pipeline step name for the Tekton CRD creation createTektonCRDsStepName = "create-tekton-crds" tektonBaseDir = "/workspace" ) // CRDCreationParameters are the parameters needed to create the Tekton CRDs type CRDCreationParameters struct { Namespace string Context string PipelineName string
BranchIdentifier string PullRef prow.PullRefs SourceDir string PodTemplates map[string]*corev1.Pod ServiceAccount string Labels []string EnvVars []string DefaultImage string Apps []jenkinsv1.App VersionsDir string } // CreateMetaPipelineCRDs creates the Tekton CRDs needed to execute the meta pipeline. // The meta pipeline is responsible to checkout the source repository at the right revision, allows Jenkins-X Apps // to modify the pipeline (via modifying the configuration on the file system) and finally triggering the actual // pipeline build. // An error is returned in case the creation of the Tekton CRDs fails. func CreateMetaPipelineCRDs(params CRDCreationParameters) (*tekton.CRDWrapper, error) { parsedPipeline, err := createPipeline(params) if err != nil { return nil, err } labels, err := buildLabels(params) if err != nil { return nil, err } pipeline, tasks, structure, err := parsedPipeline.GenerateCRDs(params.PipelineName, params.BuildNumber, params.ResourceName, params.Namespace, params.PodTemplates, params.VersionsDir, nil, params.SourceDir, labels, params.DefaultImage) if err != nil { return nil, err } revision := params.PullRef.BaseSha if revision == "" { revision = params.PullRef.BaseBranch } resources := []*pipelineapi.PipelineResource{tekton.GenerateSourceRepoResource(params.ResourceName, &params.GitInfo, revision)} run := tekton.CreatePipelineRun(resources, pipeline.Name, pipeline.APIVersion, labels, params.ServiceAccount, nil, nil, nil) tektonCRDs, err := tekton.NewCRDWrapper(pipeline, tasks, resources, structure, run) if err != nil { return nil, err } return tektonCRDs, nil } // GetExtendingApps returns the list of apps which are installed in the cluster registered for extending the pipeline. // An app registers its interest in extending the pipeline by having the 'pipeline-extension' label set. func GetExtendingApps(jxClient versioned.Interface, namespace string) ([]jenkinsv1.App, error) { listOptions := metav1.ListOptions{} listOptions.LabelSelector = fmt.Sprintf(apps.AppTypeLabel+" in (%s)", apps.PipelineExtension) appsList, err := jxClient.JenkinsV1().Apps(namespace).List(listOptions) if err != nil { return nil, errors.Wrap(err, "error retrieving pipeline contributor apps") } return appsList.Items, nil } // createPipeline builds the parsed/typed pipeline which servers as input for the Tekton CRD creation. func createPipeline(params CRDCreationParameters) (*syntax.ParsedPipeline, error) { steps, err := buildSteps(params) if err != nil { return nil, errors.Wrap(err, "unable to create app extending pipeline steps") } stage := syntax.Stage{ Name: appExtensionStageName, Steps: steps, Agent: &syntax.Agent{ Image: determineDefaultStepImage(params.DefaultImage), }, } parsedPipeline := &syntax.ParsedPipeline{ Stages: []syntax.Stage{stage}, } env := buildEnvParams(params) parsedPipeline.AddContainerEnvVarsToPipeline(env) return parsedPipeline, nil } // buildSteps builds the meta pipeline steps. // The tasks of the meta pipeline are: // 1) make sure the right commits are merged // 2) create the effective pipeline and write it to disk // 3) one step for each extending app // 4) create Tekton CRDs for the meta pipeline func buildSteps(params CRDCreationParameters) ([]syntax.Step, error) { var steps []syntax.Step // 1) step := stepMergePullRefs(params.PullRef) steps = append(steps, step) // 2) step = stepEffectivePipeline(params) steps = append(steps, step) log.Logger().Debugf("creating pipeline steps for extending apps") // 3) for _, app := range params.Apps { if app.Spec.PipelineExtension == nil { log.Logger().Warnf("Skipping app %s in meta pipeline. It contains label %s with value %s, but does not contain PipelineExtension fields.", app.Name, apps.AppTypeLabel, apps.PipelineExtension) continue } extension := app.Spec.PipelineExtension step := syntax.Step{ Name: extension.Name, Image: extension.Image, Command: extension.Command, Arguments: extension.Args, } log.Logger().Debugf("App %s contributes with step %s", app.Name, util.PrettyPrint(step)) steps = append(steps, step) } // 4) step = stepCreateTektonCRDs(params) steps = append(steps, step) return steps, nil } func stepMergePullRefs(pullRefs prow.PullRefs) syntax.Step { // we only need to run the merge step in case there is anything to merge // Tekton has at this stage the base branch already checked out if len(pullRefs.ToMerge) == 0 { return stepSkip(mergePullRefsStepName, "Nothing to merge") } args := []string{"--verbose", "--baseBranch", pullRefs.BaseBranch, "--baseSHA", pullRefs.BaseSha} for _, mergeSha := range pullRefs.ToMerge { args = append(args, "--sha", mergeSha) } step := syntax.Step{ Name: mergePullRefsStepName, Comment: "Pipeline step merging pull refs", Command: "jx step git merge", Arguments: args, } return step } func stepEffectivePipeline(params CRDCreationParameters) syntax.Step { args := []string{"--output-dir", "."} if params.Context != "" { args = append(args, "--context", params.Context) } step := syntax.Step{ Name: createEffectivePipelineStepName, Comment: "Pipeline step creating the effective pipeline configuration", Command: "jx step syntax effective", Arguments: args, } return step } func stepCreateTektonCRDs(params CRDCreationParameters) syntax.Step { args := []string{"--clone-dir", filepath.Join(tektonBaseDir, params.SourceDir)} args = append(args, "--kind", params.PipelineKind) for prID := range params.PullRef.ToMerge { args = append(args, "--pr-number", prID) // there might be a batch build building multiple PRs, in which case we just use the first in this case break } args = append(args, "--service-account", params.ServiceAccount) args = append(args, "--source", params.SourceDir) args = append(args, "--branch", params.BranchIdentifier) args = append(args, "--build-number", params.BuildNumber) if params.Context != "" { args = append(args, "--context", params.Context) } for _, l := range params.Labels { args = append(args, "--label", l) } for _, e := range params.EnvVars { args = append(args, "--env", e) } step := syntax.Step{ Name: createTektonCRDsStepName, Comment: "Pipeline step to create the Tekton CRDs for the actual pipeline run", Command: "jx step create task", Arguments: args, } return step } func stepSkip(stepName string, msg string) syntax.Step { skipMsg := fmt.Sprintf("SKIP %s: %s", stepName, msg) step := syntax.Step{ Name: stepName, Comment: skipMsg, Command: "echo", Arguments: []string{fmt.Sprintf("'%s'", skipMsg)}, } return step } func determineDefaultStepImage(defaultImage string) string { if defaultImage != "" { return defaultImage } return syntax.DefaultContainerImage } func buildEnvParams(params CRDCreationParameters) []corev1.EnvVar { var envVars []corev1.EnvVar envVars = append(envVars, corev1.EnvVar{ Name: "JX_LOG_FORMAT", Value: "json", }) envVars = append(envVars, corev1.EnvVar{ Name: "BUILD_NUMBER", Value: params.BuildNumber, }) envVars = append(envVars, corev1.EnvVar{ Name: "PIPELINE_KIND", Value: params.PipelineKind, }) envVars = append(envVars, corev1.EnvVar{ Name: "PULL_REFS", Value: params.PullRef.String(), }) context := params.Context if context != "" { envVars = append(envVars, corev1.EnvVar{ Name: "PIPELINE_CONTEXT", Value: context, }) } gitInfo := params.GitInfo envVars = append(envVars, corev1.EnvVar{ Name: "SOURCE_URL", Value: gitInfo.URL, }) owner := gitInfo.Organisation if owner != "" { envVars = append(envVars, corev1.EnvVar{ Name: "REPO_OWNER", Value: owner, }) } repo := gitInfo.Name if repo != "" { envVars = append(envVars, corev1.EnvVar{ Name: "REPO_NAME", Value: repo, }) // lets keep the APP_NAME environment variable we need for previews envVars = append(envVars, corev1.EnvVar{ Name: "APP_NAME", Value: repo, }) } branch := params.BranchIdentifier if branch != "" { if kube.GetSliceEnvVar(envVars, "BRANCH_NAME") == nil { envVars = append(envVars, corev1.EnvVar{ Name: "BRANCH_NAME", Value: branch, }) } } if owner != "" && repo != "" && branch != "" { jobName := fmt.Sprintf("%s/%s/%s", owner, repo, branch) if kube.GetSliceEnvVar(envVars, "JOB_NAME") == nil { envVars = append(envVars, corev1.EnvVar{ Name: "JOB_NAME", Value: jobName, }) } } envVars = append(envVars, buildEnvVars(params)...) log.Logger().Debugf("step environment variables: %s", util.PrettyPrint(envVars)) return envVars } // TODO: Merge this with step_create_task's setBuildValues equivalent somewhere. func buildLabels(params CRDCreationParameters) (map[string]string, error) { labels := map[string]string{} labels[tekton.LabelOwner] = params.GitInfo.Organisation labels[tekton.LabelRepo] = params.GitInfo.Name labels[tekton.LabelBranch] = params.BranchIdentifier if params.Context != "" { labels[tekton.LabelContext] = params.Context } labels[tekton.LabelBuild] = params.BuildNumber // add any custom labels customLabels, err := util.ExtractKeyValuePairs(params.Labels, "=") if err != nil { return nil, err } return util.MergeMaps(labels, customLabels), nil } func buildEnvVars(params CRDCreationParameters) []corev1.EnvVar { var envVars []corev1.EnvVar vars, _ := util.ExtractKeyValuePairs(params.EnvVars, "=") for key, value := range vars { envVars = append(envVars, corev1.EnvVar{ Name: key, Value: value, }) } return envVars }
ResourceName string PipelineKind string BuildNumber string GitInfo gits.GitRepository
random_line_split
metapipeline.go
package metapipeline import ( "fmt" "path/filepath" jenkinsv1 "github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1" "github.com/jenkins-x/jx/pkg/apps" "github.com/jenkins-x/jx/pkg/client/clientset/versioned" "github.com/jenkins-x/jx/pkg/gits" "github.com/jenkins-x/jx/pkg/kube" "github.com/jenkins-x/jx/pkg/prow" "github.com/jenkins-x/jx/pkg/tekton" "github.com/jenkins-x/jx/pkg/tekton/syntax" "github.com/jenkins-x/jx/pkg/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/jenkins-x/jx/pkg/log" "github.com/pkg/errors" pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" corev1 "k8s.io/api/core/v1" ) const ( appExtensionStageName = "app-extension" // mergePullRefsStepName is the meta pipeline step name for merging all pull refs into the workspace mergePullRefsStepName = "merge-pull-refs" // createEffectivePipelineStepName is the meta pipeline step name for the generation of the effective jenkins-x pipeline config createEffectivePipelineStepName = "create-effective-pipeline" // createTektonCRDsStepName is the meta pipeline step name for the Tekton CRD creation createTektonCRDsStepName = "create-tekton-crds" tektonBaseDir = "/workspace" ) // CRDCreationParameters are the parameters needed to create the Tekton CRDs type CRDCreationParameters struct { Namespace string Context string PipelineName string ResourceName string PipelineKind string BuildNumber string GitInfo gits.GitRepository BranchIdentifier string PullRef prow.PullRefs SourceDir string PodTemplates map[string]*corev1.Pod ServiceAccount string Labels []string EnvVars []string DefaultImage string Apps []jenkinsv1.App VersionsDir string } // CreateMetaPipelineCRDs creates the Tekton CRDs needed to execute the meta pipeline. // The meta pipeline is responsible to checkout the source repository at the right revision, allows Jenkins-X Apps // to modify the pipeline (via modifying the configuration on the file system) and finally triggering the actual // pipeline build. // An error is returned in case the creation of the Tekton CRDs fails. func CreateMetaPipelineCRDs(params CRDCreationParameters) (*tekton.CRDWrapper, error) { parsedPipeline, err := createPipeline(params) if err != nil { return nil, err } labels, err := buildLabels(params) if err != nil { return nil, err } pipeline, tasks, structure, err := parsedPipeline.GenerateCRDs(params.PipelineName, params.BuildNumber, params.ResourceName, params.Namespace, params.PodTemplates, params.VersionsDir, nil, params.SourceDir, labels, params.DefaultImage) if err != nil { return nil, err } revision := params.PullRef.BaseSha if revision == "" { revision = params.PullRef.BaseBranch } resources := []*pipelineapi.PipelineResource{tekton.GenerateSourceRepoResource(params.ResourceName, &params.GitInfo, revision)} run := tekton.CreatePipelineRun(resources, pipeline.Name, pipeline.APIVersion, labels, params.ServiceAccount, nil, nil, nil) tektonCRDs, err := tekton.NewCRDWrapper(pipeline, tasks, resources, structure, run) if err != nil { return nil, err } return tektonCRDs, nil } // GetExtendingApps returns the list of apps which are installed in the cluster registered for extending the pipeline. // An app registers its interest in extending the pipeline by having the 'pipeline-extension' label set. func GetExtendingApps(jxClient versioned.Interface, namespace string) ([]jenkinsv1.App, error) { listOptions := metav1.ListOptions{} listOptions.LabelSelector = fmt.Sprintf(apps.AppTypeLabel+" in (%s)", apps.PipelineExtension) appsList, err := jxClient.JenkinsV1().Apps(namespace).List(listOptions) if err != nil { return nil, errors.Wrap(err, "error retrieving pipeline contributor apps") } return appsList.Items, nil } // createPipeline builds the parsed/typed pipeline which servers as input for the Tekton CRD creation. func createPipeline(params CRDCreationParameters) (*syntax.ParsedPipeline, error) { steps, err := buildSteps(params) if err != nil { return nil, errors.Wrap(err, "unable to create app extending pipeline steps") } stage := syntax.Stage{ Name: appExtensionStageName, Steps: steps, Agent: &syntax.Agent{ Image: determineDefaultStepImage(params.DefaultImage), }, } parsedPipeline := &syntax.ParsedPipeline{ Stages: []syntax.Stage{stage}, } env := buildEnvParams(params) parsedPipeline.AddContainerEnvVarsToPipeline(env) return parsedPipeline, nil } // buildSteps builds the meta pipeline steps. // The tasks of the meta pipeline are: // 1) make sure the right commits are merged // 2) create the effective pipeline and write it to disk // 3) one step for each extending app // 4) create Tekton CRDs for the meta pipeline func buildSteps(params CRDCreationParameters) ([]syntax.Step, error) { var steps []syntax.Step // 1) step := stepMergePullRefs(params.PullRef) steps = append(steps, step) // 2) step = stepEffectivePipeline(params) steps = append(steps, step) log.Logger().Debugf("creating pipeline steps for extending apps") // 3) for _, app := range params.Apps
// 4) step = stepCreateTektonCRDs(params) steps = append(steps, step) return steps, nil } func stepMergePullRefs(pullRefs prow.PullRefs) syntax.Step { // we only need to run the merge step in case there is anything to merge // Tekton has at this stage the base branch already checked out if len(pullRefs.ToMerge) == 0 { return stepSkip(mergePullRefsStepName, "Nothing to merge") } args := []string{"--verbose", "--baseBranch", pullRefs.BaseBranch, "--baseSHA", pullRefs.BaseSha} for _, mergeSha := range pullRefs.ToMerge { args = append(args, "--sha", mergeSha) } step := syntax.Step{ Name: mergePullRefsStepName, Comment: "Pipeline step merging pull refs", Command: "jx step git merge", Arguments: args, } return step } func stepEffectivePipeline(params CRDCreationParameters) syntax.Step { args := []string{"--output-dir", "."} if params.Context != "" { args = append(args, "--context", params.Context) } step := syntax.Step{ Name: createEffectivePipelineStepName, Comment: "Pipeline step creating the effective pipeline configuration", Command: "jx step syntax effective", Arguments: args, } return step } func stepCreateTektonCRDs(params CRDCreationParameters) syntax.Step { args := []string{"--clone-dir", filepath.Join(tektonBaseDir, params.SourceDir)} args = append(args, "--kind", params.PipelineKind) for prID := range params.PullRef.ToMerge { args = append(args, "--pr-number", prID) // there might be a batch build building multiple PRs, in which case we just use the first in this case break } args = append(args, "--service-account", params.ServiceAccount) args = append(args, "--source", params.SourceDir) args = append(args, "--branch", params.BranchIdentifier) args = append(args, "--build-number", params.BuildNumber) if params.Context != "" { args = append(args, "--context", params.Context) } for _, l := range params.Labels { args = append(args, "--label", l) } for _, e := range params.EnvVars { args = append(args, "--env", e) } step := syntax.Step{ Name: createTektonCRDsStepName, Comment: "Pipeline step to create the Tekton CRDs for the actual pipeline run", Command: "jx step create task", Arguments: args, } return step } func stepSkip(stepName string, msg string) syntax.Step { skipMsg := fmt.Sprintf("SKIP %s: %s", stepName, msg) step := syntax.Step{ Name: stepName, Comment: skipMsg, Command: "echo", Arguments: []string{fmt.Sprintf("'%s'", skipMsg)}, } return step } func determineDefaultStepImage(defaultImage string) string { if defaultImage != "" { return defaultImage } return syntax.DefaultContainerImage } func buildEnvParams(params CRDCreationParameters) []corev1.EnvVar { var envVars []corev1.EnvVar envVars = append(envVars, corev1.EnvVar{ Name: "JX_LOG_FORMAT", Value: "json", }) envVars = append(envVars, corev1.EnvVar{ Name: "BUILD_NUMBER", Value: params.BuildNumber, }) envVars = append(envVars, corev1.EnvVar{ Name: "PIPELINE_KIND", Value: params.PipelineKind, }) envVars = append(envVars, corev1.EnvVar{ Name: "PULL_REFS", Value: params.PullRef.String(), }) context := params.Context if context != "" { envVars = append(envVars, corev1.EnvVar{ Name: "PIPELINE_CONTEXT", Value: context, }) } gitInfo := params.GitInfo envVars = append(envVars, corev1.EnvVar{ Name: "SOURCE_URL", Value: gitInfo.URL, }) owner := gitInfo.Organisation if owner != "" { envVars = append(envVars, corev1.EnvVar{ Name: "REPO_OWNER", Value: owner, }) } repo := gitInfo.Name if repo != "" { envVars = append(envVars, corev1.EnvVar{ Name: "REPO_NAME", Value: repo, }) // lets keep the APP_NAME environment variable we need for previews envVars = append(envVars, corev1.EnvVar{ Name: "APP_NAME", Value: repo, }) } branch := params.BranchIdentifier if branch != "" { if kube.GetSliceEnvVar(envVars, "BRANCH_NAME") == nil { envVars = append(envVars, corev1.EnvVar{ Name: "BRANCH_NAME", Value: branch, }) } } if owner != "" && repo != "" && branch != "" { jobName := fmt.Sprintf("%s/%s/%s", owner, repo, branch) if kube.GetSliceEnvVar(envVars, "JOB_NAME") == nil { envVars = append(envVars, corev1.EnvVar{ Name: "JOB_NAME", Value: jobName, }) } } envVars = append(envVars, buildEnvVars(params)...) log.Logger().Debugf("step environment variables: %s", util.PrettyPrint(envVars)) return envVars } // TODO: Merge this with step_create_task's setBuildValues equivalent somewhere. func buildLabels(params CRDCreationParameters) (map[string]string, error) { labels := map[string]string{} labels[tekton.LabelOwner] = params.GitInfo.Organisation labels[tekton.LabelRepo] = params.GitInfo.Name labels[tekton.LabelBranch] = params.BranchIdentifier if params.Context != "" { labels[tekton.LabelContext] = params.Context } labels[tekton.LabelBuild] = params.BuildNumber // add any custom labels customLabels, err := util.ExtractKeyValuePairs(params.Labels, "=") if err != nil { return nil, err } return util.MergeMaps(labels, customLabels), nil } func buildEnvVars(params CRDCreationParameters) []corev1.EnvVar { var envVars []corev1.EnvVar vars, _ := util.ExtractKeyValuePairs(params.EnvVars, "=") for key, value := range vars { envVars = append(envVars, corev1.EnvVar{ Name: key, Value: value, }) } return envVars }
{ if app.Spec.PipelineExtension == nil { log.Logger().Warnf("Skipping app %s in meta pipeline. It contains label %s with value %s, but does not contain PipelineExtension fields.", app.Name, apps.AppTypeLabel, apps.PipelineExtension) continue } extension := app.Spec.PipelineExtension step := syntax.Step{ Name: extension.Name, Image: extension.Image, Command: extension.Command, Arguments: extension.Args, } log.Logger().Debugf("App %s contributes with step %s", app.Name, util.PrettyPrint(step)) steps = append(steps, step) }
conditional_block