file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
execution.rs
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::reflect::{ ContractEnv, DispatchError, }; use core::{ any::TypeId, convert::Infallible, mem::ManuallyDrop, }; use ink_env::{ Environment, ReturnFlags, }; use ink_primitives::{ Key, KeyPtr, }; use ink_storage::{ alloc, alloc::ContractPhase, traits::{ pull_spread_root, push_spread_root, SpreadAllocate, SpreadLayout, }, }; /// The root key of the ink! smart contract. /// /// # Note /// /// - This is the key where storage allocation, pushing and pulling is rooted /// using the `SpreadLayout` and `SpreadAllocate` traits primarily. /// - This trait is automatically implemented by the ink! codegen. /// - The existence of this trait allows to customize the root key in future /// versions of ink! if needed. pub trait ContractRootKey { const ROOT_KEY: Key; } /// Returns `Ok` if the caller did not transfer additional value to the callee. /// /// # Errors /// /// If the caller did send some amount of transferred value to the callee. #[inline] pub fn deny_payment<E>() -> Result<(), DispatchError> where E: Environment, { let transferred = ink_env::transferred_balance::<E>(); if transferred != <E as Environment>::Balance::from(0_u32) { return Err(DispatchError::PaidUnpayableMessage) } Ok(()) } /// Configuration for execution of ink! constructor. #[derive(Debug, Copy, Clone)] pub struct ExecuteConstructorConfig { /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Executes the given ink! constructor. /// /// # Note /// /// The closure is supposed to already contain all the arguments that the real /// constructor message requires and forwards them. #[inline] pub fn execute_constructor<Contract, F, R>( config: ExecuteConstructorConfig, f: F, ) -> Result<(), DispatchError> where Contract: SpreadLayout + ContractRootKey, F: FnOnce() -> R, <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode, private::Seal<R>: ConstructorReturnType<Contract>, { if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Deploy); } let result = ManuallyDrop::new(private::Seal(f())); match result.as_result() { Ok(contract) => { // Constructor is infallible or is fallible but succeeded. // // This requires us to sync back the changes of the contract storage. let root_key = <Contract as ContractRootKey>::ROOT_KEY; push_spread_root::<Contract>(contract, &root_key); if config.dynamic_storage_alloc { alloc::finalize(); } Ok(()) } Err(_) => { // Constructor is fallible and failed. // // We need to revert the state of the transaction. ink_env::return_value::< <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue, >( ReturnFlags::default().set_reverted(true), result.return_value(), ) } } } /// Initializes the ink! contract using the given initialization routine. /// /// # Note /// /// - This uses `SpreadAllocate` trait in order to default initialize the /// ink! smart contract before calling the initialization routine. /// - This either returns `Contract` or `Result<Contract, E>` depending /// on the return type `R` of the initializer closure `F`. /// If `R` is `()` then `Contract` is returned and if `R` is any type of /// `Result<(), E>` then `Result<Contract, E>` is returned. /// Other return types for `F` than the ones listed above are not allowed. #[inline] pub fn initialize_contract<Contract, F, R>( initializer: F, ) -> <R as InitializerReturnType<Contract>>::Wrapped where Contract: ContractRootKey + SpreadAllocate, F: FnOnce(&mut Contract) -> R, R: InitializerReturnType<Contract>, { let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY); let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr); let result = initializer(&mut instance); result.into_wrapped(instance) } mod private { /// Seals the implementation of `ContractInitializerReturnType`. pub trait Sealed {} impl Sealed for () {} impl<T, E> Sealed for Result<T, E> {} /// A thin-wrapper type that automatically seals its inner type. /// /// Since it is private it can only be used from within this crate. /// We need this type in order to properly seal the `ConstructorReturnType` /// trait from unwanted external trait implementations. #[repr(transparent)] pub struct Seal<T>(pub T); impl<T> Sealed for Seal<T> {} } /// Guards against using invalid contract initializer types. /// /// # Note /// /// Currently the only allowed types are `()` and `Result<(), E>`
/// where `E` is some unspecified error type. /// If the contract initializer returns `Result::Err` the utility /// method that is used to initialize an ink! smart contract will /// revert the state of the contract instantiation. pub trait ConstructorReturnType<C>: private::Sealed { /// Is `true` if `Self` is `Result<C, E>`. const IS_RESULT: bool = false; /// The error type of the constructor return type. /// /// # Note /// /// For infallible constructors this is `core::convert::Infallible`. type Error; /// The type of the return value of the constructor. /// /// # Note /// /// For infallible constructors this is `()` whereas for fallible /// constructors this is the actual return value. Since we only ever /// return a value in case of `Result::Err` the `Result::Ok` value /// does not matter. type ReturnValue; /// Converts the return value into a `Result` instance. /// /// # Note /// /// For infallible constructor returns this always yields `Ok`. fn as_result(&self) -> Result<&C, &Self::Error>; /// Returns the actual return value of the constructor. /// /// # Note /// /// For infallible constructor returns this always yields `()` /// and is basically ignored since this does not get called /// if the constructor did not fail. fn return_value(&self) -> &Self::ReturnValue; } impl<C> ConstructorReturnType<C> for private::Seal<C> { type Error = Infallible; type ReturnValue = (); #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { Ok(&self.0) } #[inline] fn return_value(&self) -> &Self::ReturnValue { &() } } impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> { const IS_RESULT: bool = true; type Error = E; type ReturnValue = Result<C, E>; #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { self.0.as_ref() } #[inline] fn return_value(&self) -> &Self::ReturnValue { &self.0 } } /// Trait used to convert return types of contract initializer routines. /// /// Only `()` and `Result<(), E>` are allowed contract initializer return types. /// For `WrapReturnType<C>` where `C` is the contract type the trait converts /// `()` into `C` and `Result<(), E>` into `Result<C, E>`. pub trait InitializerReturnType<C>: private::Sealed { type Wrapped; /// Performs the type conversion of the initialization routine return type. fn into_wrapped(self, wrapped: C) -> Self::Wrapped; } impl<C> InitializerReturnType<C> for () { type Wrapped = C; #[inline] fn into_wrapped(self, wrapped: C) -> C { wrapped } } impl<C, E> InitializerReturnType<C> for Result<(), E> { type Wrapped = Result<C, E>; #[inline] fn into_wrapped(self, wrapped: C) -> Self::Wrapped { self.map(|_| wrapped) } } /// Configuration for execution of ink! messages. #[derive(Debug, Copy, Clone)] pub struct ExecuteMessageConfig { /// Yields `true` if the ink! message accepts payment. /// /// # Note /// /// If no ink! message within the same ink! smart contract /// is payable then this flag will be `true` since the check /// then is moved before the message dispatch as an optimization. pub payable: bool, /// Yields `true` if the ink! message might mutate contract storage. /// /// # Note /// /// This is usually true for `&mut self` ink! messages. pub mutates: bool, /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Initiates an ink! message call with the given configuration. /// /// Returns the contract state pulled from the root storage region upon success. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn initiate_message<Contract>( config: ExecuteMessageConfig, ) -> Result<Contract, DispatchError> where Contract: SpreadLayout + ContractEnv, { if !config.payable { deny_payment::<<Contract as ContractEnv>::Env>()?; } if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Call); } let root_key = Key::from([0x00; 32]); let contract = pull_spread_root::<Contract>(&root_key); Ok(contract) } /// Finalizes an ink! message call with the given configuration. /// /// This dispatches into fallible and infallible message finalization /// depending on the given `success` state. /// /// - If the message call was successful the return value is simply returned /// and cached storage is pushed back to the contract storage. /// - If the message call failed the return value result is returned instead /// and the transaction is signalled to be reverted. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn finalize_message<Contract, R>( success: bool, contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode + 'static, { if success { finalize_infallible_message(contract, config, result) } else { finalize_fallible_message(result) } } #[inline] fn finalize_infallible_message<Contract, R>( contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode + 'static, { if config.mutates { let root_key = Key::from([0x00; 32]); push_spread_root::<Contract>(contract, &root_key); } if config.dynamic_storage_alloc { alloc::finalize(); } if TypeId::of::<R>() != TypeId::of::<()>() { // In case the return type is `()` we do not return a value. ink_env::return_value::<R>(ReturnFlags::default(), result) } Ok(()) } #[inline] fn finalize_fallible_message<R>(result: &R) -> ! where R: scale::Encode + 'static, { // There is no need to push back the intermediate results of the // contract since the transaction is going to be reverted. ink_env::return_value::<R>(ReturnFlags::default().set_reverted(true), result) }
random_line_split
fate_of_ashborne.py
#======================================== #Title: The Fate of Ashborne #Creator: Adam Majmudar #Date Created: Friday, January 11th, 2019 #======================================== import text_adventure_engine as txt import text_adventure_gui as gui """ Add intial phrase that establishes goal to 'find out what happened' Can make it so that the names of objects don't reveal much but the descriptions when looking at them give hints Add events which result from incorrect actions (ie. using the harpoon on the dog, axe to open chest) Events that are hidden, and you can only activate them once other events are completed (events reveal other events) - This can apply to conversations
#Ease of Use for True and False T = True F = False towering_cliffs = txt.Location("The Towering Cliffs", -1, 0, 0, T, [T, T, F, F, F, F]) towering_cliffs.initial_description = "A few hundred feet below, a river rushes by and beats the side of the precipitous cliff. You look out in the distance and can see what looks to be a large town burning with embers. Smoke rises from the ruins. You worry as you try to think what could have caused this..." towering_cliffs.description = "A few hundred feet below, a river rushes by and beats the side of the precipitous cliff. You can see a burning town in the distance." river_bank = txt.Location("The River Bank", -1, 1, 0, T, [F, T, T, F, F, F]) river_bank.description = "A river rushes by rapidly in front of you, crashing into rocks and causing the icy water to splash up against you." deepest_forest = txt.Location("The Forest", 0, -3, 0, T, [T, T, F, F, F, F]) deepest_forest.initial_description = "This is the farthest point in the forest you should go. The forest ahead is full of sharp tree trunks and toppled trees. It would be unsafe to continue farther. For now at least..." deepest_forest.description = "This is the farthest point in the forest you should go. The forest ahead is full of sharp tree trunks and toppled trees. It would be unsafe to continue farther." forest_heart = txt.Location("The Heart of the Forest", 0, -2, 0, T, [T, T, T, F, F, F]) forest_heart.description = "The vast swathes of pine trees extend in all directions." forest = txt.Location("The Forest", 0, -1, 0, T, [T, T, T, F, F, F]) forest.description = "There is very little light coming through the tree tops. Thick mist gathers around the trunks of the towering oak trees." forest_clearing = txt.Location("A Clearing in the Forest", 0, 0, 0, T, [T, F, T, T, F, F]) forest_clearing.description = "Dim light shines through the thick canopy of oak trees above. The earth is ominously scorched and there is burnt tree bark strewn across the ground. There are large gates to the EAST with a padlock chaining them shut." wheat_field = txt.Location("A Wheat Field", 0, 1, 0, T, [F, F, T, T, F, F]) wheat_field.initial_description = "Bent over stalks of wheat are all over and there is ash on the ground. What could have done this..." wheat_field.description = "Bent over stalks of wheat are all over and there is ash on the ground. There is a shed at the NORTH end of the field, but the door appears to be locked." shed = txt.Location("The Shed", 0, 2, 0, T, [F, F, T, F, F, F]) shed.description = "It is dark inside. There are cobwebs all across the walls. There are improvised shelves made of wood planks which hold up various tools and trinkets." forest_cave = txt.Location("A Cave in the Middle of the Forest", 1, -3, 0, T, [T, F, F, T, F, F]) forest_cave.description = "The cave is pitch black and you cannot see anything. You hear the steady dripping of water from the ceiling of the cave to the floor" ruined_forest = txt.Location("The Forest", 1, -2, 0, T, [T, F, T, T, F, F]) ruined_forest.description = "The trees in this area are all carelessly cut to the stumps. Ash lines the ground all around and the smell of burnt wood permeates through the air." deep_forest = txt.Location("The Deep Forest", 1, -1, 0, T, [F, F, T, T, F, F]) deep_forest.description = "Trees are densely packed all around you. There is an eerie silence that denotes the absence of much wildlife in this area." courtyard = txt.Location("The Courtyard", 1, 0, 0, T, [T, F, F, T, F, F]) courtyard.initial_description = "The ground is unkempt and scorched all around. In front of you is a dilapidated house. You notice some firewood lying on the porch, newly chopped. Someone - or something - has been here recently..." courtyard.description = "The ground is unkempt and scorched all around. In front of you is a dilapidated house. The door is boarded shut with wood." lake = txt.Location("The Lake", 1, 1, 0, T, [F, T, T, F, F, F]) lake.description = "The water is dark and dirty. The lake extends for a few hundred feet, no more." house = txt.Location("The Dilapidated House", 2, 0, 0, T, [F, F, F, T, F, F]) house.description = "The house is messy. Food is lying around everywhere. A layer of dust covers every surface." basement = txt.Location("The Basement", 2, 1, -1, T, [F, F, F, F, T, F]) basement.description = "It is dark and difficult to see. Cobwebs line the wooden ceilings." garden = txt.Location("The Garden Behind the House", 2, 1, 0, T, [F, F, F, T, F, F]) garden.description = "There might once have been flowers and plants here but the only things that remain are ashes and wilted stems. The back of the house and an entrance to its basement lies before you." bucket = txt.Object("Bucket", -1, 0, 0, T) bucket.room_location = "a toppled {} next to some rocks" bucket.description = "The pail is sturdy and is made of some metal, probably iron judging by the patches of rust on the inside." full_bucket = txt.Object("Full Bucket", -1, 1, 0, F) full_bucket.description = "The bucket is filled to the brim with water from the river." axe = txt.Object("Axe", 0, -3, 0, T) axe.room_location = "an {} wedged in a tree stump" axe.description = "Just a normal old axe... for chopping things..." grain = txt.Object("Grain", 0, 1, 0, T) grain.room_location = "some {} on the ground" grain.description = "Its just regular old grain. Probably wheat or something of that kind." #Can edit last line of urns description to change difficulty clay_urn = txt.Object("Clay Urn", 0, 2, 0, T, takeable = F) clay_urn.room_location = "a {} on the shelf" clay_urn.description = "The urn has faded patterns on it. There is an IRON KEY at the bottom but the hole on top is too small to reach through with your hand. There must be some way to get the key to the top..." iron_key = txt.Object("Iron Key", 0, 2, 0, F) iron_key.description = "Its just a rusted iron key. Its unusually large." harpoon = txt.Object("Harpoon", 1, -3, 0, F) harpoon.room_location = "a {} laying against a large rock" harpoon.description = "It is sharpened to a point. It would be perfect to stab through something." #Can edit last line of birds description to change difficulty bird = txt.Object("Bird", 1, -1, 0, T, takeable = F) bird.room_location = "a single {} high in the trees" bird.description = "It appears to be a raven. There is something shiny around the birds foot, possibly a key. If only there were some way to make it come down from the trees..." wooden_key = txt.Object("Wooden Key", 1, -1, 0, F) wooden_key.room_location = "a {} on the ground" wooden_key.description = "It is a rather poorly made key. It seems to be made by someone with little experience in craftsmanship" fish = txt.Object("Fish", 1, 1, 0, T, takeable = F) fish.room_location = "a single {} visible in the lake" fish.description = "The fish is a muted brownish color and is around a foot in length. It is the only fish you can see in the lake. Something terrible must have happened to the others..." dead_fish = txt.Object("Dead Fish", 1, 1, 0, F) dead_fish.description = "The dead fish is a muted brownish color and is around a foot in length. It reeks of rotting carcas and salt water." torch = txt.Object("Torch", 1, 0, 0, T) torch.room_location = "a {} mounted on front of the house" torch.description = "The light emanating from the torch is bright." chest = txt.Object("Chest", 2, 0, 0, T, takeable = F) chest.room_location = "a {} in the corner" chest.description = "The chest is sturdy and rectangular. It is made of iron and is locked. It has a gold lock hole." golden_key = txt.Object("Golden Key", 2, 1, -1, T) golden_key.description = "The key is clean and unblemished." golden_key.room_location = "a {} lying on a shelf mounted to the wall" hound = txt.Object("Hound", 2, 1, 0, T, takeable = F) hound.room_location = "a {} barking at you fiercely and guarding the entrance to the basement angrily. You won't be able to get past him until he calms down." hound.description = "The beast's ribs are visible through his skin. He looks starved." sleeping_hound = txt.Object("Sleeping Hound", 2, 1, 0, F, takeable = F) sleeping_hound.room_location = "a {} lying by the entrance to the basement" sleeping_hound.description = "The hound is sleeping soundly. He must have enjoyed that meal." bucket_event = txt.Event(-1, 1, 0, bucket) bucket_event.message = "You dip the bucket in the river and the rushing water quickly fills it to the brim. You now have a FULL BUCKET." bucket_event.add_delete(bucket) bucket_event.add_receive(full_bucket) iron_key_event = txt.Event(0, 0, 0, iron_key) iron_key_event.message = "You walk up to the gates and try the iron key. It fits perfectly into the padlock and the gates are now open. You can now go EAST." iron_key_event.add_delete(iron_key) iron_key_event.add_exit_change(forest_clearing, [T, T, T, T, F, F]) iron_key_event.add_description(forest_clearing, "Dim light shines through the thick canopy of oak trees above. The earth is ominously scorched and there is burnt tree bark strewn across the ground. The gates to the EAST are now swung open and the padlock is on the ground.") wooden_key_event = txt.Event(0, 1, 0, wooden_key) wooden_key_event.message = "You walk toward the shed at the north end of the field pull out the key. It fits nicely into the keyhole in the shed door and you hear a click as the shed opens. You can now go NORTH." wooden_key_event.add_delete(wooden_key) wooden_key_event.add_exit_change(wheat_field, [T, F, T, T, F, F]) wooden_key_event.add_description(wheat_field, "Bent over stalks of wheat are all over and there is ash on the ground.") full_bucket_event = txt.Event(0, 2, 0, full_bucket) full_bucket_event.message = "You pour the water into the urn and watch as the IRON KEY inside rises with the water to the top and is now in your reach." full_bucket_event.add_reveal(iron_key) full_bucket_event.add_delete(full_bucket) full_bucket_event.add_description(clay_urn, "The urn has faded patterns on it. It is a normal old urn.") torch_event = txt.Event(1, -3, 0, torch) torch_event.message = "You mount the torch on the wall and it illuminates the cave fully. You can now see the vastness of the underground cavern that extends below you. You also see a HARPOON lying against the cave walls." torch_event.add_description(forest_cave, "The cave is illuminated with the torch. You can see the vastness of the underground cavern that extends below you and hear the steady dripping of water from the ceiling of the cave to the floor.") torch_event.add_reveal(harpoon) torch_event.add_delete(torch) grain_event = txt.Event(1, -1, 0, grain) grain_event.message = "A BIRD swoops down from the trees to nibble at the grain, dropping a WOODEN KEY on the ground. After quickly finishing its meal, the BIRD flies off." grain_event.add_delete(grain, bird) grain_event.add_reveal(wooden_key) axe_event = txt.Event(1, 0, 0, axe) axe_event.message = "You chop the boards blocking the door to the house. The doorway is now open so you can go EAST." axe_event.add_exit_change(courtyard, [T, T, F, T, F, F]) axe_event.add_description(courtyard, "The ground is unkempt and scorched all around. In front of you is a dilapidated house.") harpoon_event = txt.Event(1, 1, 0, harpoon) harpoon_event.message = "You stand still as the fish swims its way toward you. Then, in one quick motion, you jab the harpoon into the water, piercing right through its flesh. You now have a DEAD FISH." harpoon_event.add_receive(dead_fish) harpoon_event.add_delete(fish) golden_key_event = txt.Event(2, 0, 0, golden_key) golden_key_event.message = "There is a book inside. You lift it up and open. It consists of numerous handwritten messages. You turn to the last page of the notebook with writing and read on: 'Add some message here about the dragon but it can't be cringe.'" golden_key_event.add_delete(golden_key) dead_fish_event = txt.Event(2, 1, 0, dead_fish) dead_fish_event.message = "You drop the dead fish at the feet of the HOUND. He devours it ravenously and seemingly gives you a look of thankfullness. Finally, he gives a grunt and falls soundly asleep. The basement entrance is now unguarded and you can now go DOWN" dead_fish_event.add_delete(dead_fish, hound) dead_fish_event.add_reveal(sleeping_hound) dead_fish_event.add_exit_change(garden, [F, F, F, T, F, T]) player = txt.Player(0, 0, 0) #======================================== gui.start_engine()
"""
random_line_split
svm_1_0_0.py
#!/usr/bin/env python """ usage: svm.py unified_input.csv engine_score_column_name i.e. : svm.py omssa_2_1_6_unified.csv 'OMSSA:pvalue' Writes a new file with added column "SVMscore" which is the distance to the separating hyperplane of a Percolator-like support vector machine. """ import numpy as np import sklearn from sklearn import svm from sklearn.cross_validation import StratifiedKFold from sklearn.preprocessing import Imputer from collections import Counter, defaultdict from random import random import csv import re import os import argparse from misc import ( get_score_colname_and_order, field_to_float, unify_sequence, calc_FDR, scale_scores, row_is_decoy, field_to_bayes_float, get_mz_values, ) SCALER = ( sklearn.preprocessing.RobustScaler() ) # RobustScaler() seems to be most robust ;) PROTON = 1.00727646677 class SVMWrapper(dict): def __init__(self): self._svm_score_name = "SVMscore" self.counter = { # counting the # of possible training PSMs "target": 0, "decoy": 0, "positive": 0, "negative": 0, "unknown": 0, "parsed PSMs": 0, } self.results = {} self.shitty_decoy_seqs = set() # is overwritten by find_shitty_decoys() self.mgf_lookup = {} self.pep_to_mz = {} if __name__ == "__main__": self.parse_options() # parse command line args and set options self.set_input_csv() self.observed_charges = set() self.used_extra_fields = set() self.decoy_train_prob = ( None # probability to include decoy PSMs as negative training examples ) self.maximum_proteins_per_line = 0 self.tryptic_aas = set(["R", "K", "-"]) self.delim_regex = re.compile( r"<\|>|\;" ) # regex to split a line by both ";" and "<|>" return def parse_options(self): """ parses the command line args for options/parameters """ parser = argparse.ArgumentParser() parser.add_argument( "-i", "--input_csv", type=str, help="Input CSV path(s)", required=True, nargs="+", ) parser.add_argument( "-o", "--output_csv", type=str, help="Output CSV path", required=True ) parser.add_argument( "-k", "--kernel", type=str, default="rbf", help='SVM kernel type ("rbf", "linear", "poly" or "sigmoid")', ) parser.add_argument( "-c", type=float, default=1.0, help="Penalty parameter C of the error term" ) parser.add_argument( "-g", "--gamma", type=str, default="auto", help="Gamma parameter of the SVM.", ) parser.add_argument( "-r", "--mb_ram", type=float, default=4000, help="Available RAM in megabytes, for SVM calculation", ) parser.add_argument( "-f", "--fdr_cutoff", type=float, default=0.01, help="Target PSMs with a lower FDR will be used as a " "positive training set", ) parser.add_argument( "-x", "--columns_as_features", type=str, nargs="+", default=[ "MS-GF:RawScore", "MS-GF:DeNovoScore", "MS-GF:SpecEValue", "MS-GF:EValue", "OMSSA:evalue", "OMSSA:pvalue", "X\!Tandem:expect", "X\!Tandem:hyperscore", ], help="Columns that should be used as a feature directly " "(e.g. secondary scores). Will be converted to float", ) parser.add_argument( "-d", "--dump_svm_matrix", type=str, default=False, help="Dump SVM matrix in PIN (Percolator input) format " "to the specified path, mostly for debugging " "and benchmarking.", ) arg_dict = vars(parser.parse_args()) # convert to dict self.update(arg_dict) try: self["gamma"] = float(self["gamma"]) except ValueError: assert ( self["gamma"] == "auto" ), "Invalid gamma param: " '"{0}", using "auto" instead.'.format( self["gamma"] ) def set_input_csv(self): """ distinguishes one vs. many unified input csv files and either sets the single csv as input, or merges all csvs and sets the merged csv as input. """ if len(self["input_csv"]) > 1: raise Exception("You must only specify *one* unified CSV file!") self.csv_path = self["input_csv"][0] print("Using input file", self.csv_path) def find_shitty_decoys(self): """ Finds and notes decoys that share their sequence with a target PSM. Also counts the number of targets and decoys to get a quick estimate of how many positive/negative training examples can be "claimed". """ target_seqs = set() decoy_seqs = set() with open(self.csv_path, "r") as f: reader = csv.DictReader(f) sorted_reader = sorted( reader, reverse=self["bigger_scores_better"], key=lambda d: float(d[self.col_for_sorting]), ) for row in sorted_reader: self.observed_charges.add(int(row["Charge"])) if row_is_decoy(row): decoy_seqs.add(unify_sequence(row["Sequence"])) self.counter["decoy"] += 1 else: target_seqs.add(unify_sequence(row["Sequence"])) self.counter["target"] += 1 self.shitty_decoy_seqs = target_seqs.intersection(decoy_seqs) if len(self.shitty_decoy_seqs) > 0: print( "Warning! Found {0} sequences that are target AND decoy " "(immutable peptides?). These will not be used for training.\n".format( len(self.shitty_decoy_seqs) ) ) return def determine_csv_sorting(self): with open(self.csv_path, "r") as in_file: reader = csv.DictReader(in_file) ( self.col_for_sorting, self["bigger_scores_better"], ) = get_score_colname_and_order(reader.fieldnames) if self.col_for_sorting == self._svm_score_name: self._svm_score_name = self._svm_score_name + "2" print( "CSV will be sorted by column {0} (reverse={1}" ")".format(self.col_for_sorting, self["bigger_scores_better"]) ) for feat in self["columns_as_features"]: if feat in reader.fieldnames and feat != self.col_for_sorting: self.used_extra_fields.add(feat) def sort_by_rank(self, rowdict): score = float(rowdict[self.col_for_sorting]) spec_title = rowdict["Spectrum Title"] return (spec_title, score) @staticmethod def parse_protein_ids(csv_field, sep="<|>"): """ Turns the unified CSV column "Protein ID" into a set of all protein IDs. """ clean = csv_field.replace("decoy_", "").strip() prot_id_set = set(clean.split(sep)) return prot_id_set def count_intra_set_features(self): """ intra-set features as calculated by Percolator: - num_pep: Number of PSMs for which this is the best scoring peptide. - num_prot: Number of times the matched protein matches other PSMs. - pep_site: Number of different peptides that match this protein. own ideas: - pep_charge_states: in how many charge states was the peptide found? - seq_mods: in how many mod states was the AA-sequence found? - num_spec: Number of times the matched spectrum matches other peptides. """ print("Counting intra-set features...") self.num_pep = defaultdict(int) self.num_prot = defaultdict(set) self.pep_site = defaultdict(set) self.score_list_dict = defaultdict(list) self.pep_charge_states = defaultdict(set) self.seq_mods = defaultdict(set) self.num_spec = defaultdict(set) with open(self.csv_path, "r") as f: reader = csv.DictReader(f) previous_spec_title = None rows_of_spectrum = [] for row in sorted( reader, reverse=self["bigger_scores_better"], key=self.sort_by_rank ): if unify_sequence(row["Sequence"]) in self.shitty_decoy_seqs: continue current_spec_title = row["Spectrum Title"] if current_spec_title != previous_spec_title: # the next spectrum started, so let's process the info we # collected for the previous spectrum: score_list = [ field_to_bayes_float(r[self.col_for_sorting]) for r in rows_of_spectrum ] self.score_list_dict[previous_spec_title] = score_list for rank, line in enumerate(rows_of_spectrum): # print("\t".join([ # str(rank), line['Spectrum Title'], line[self.col_for_sorting] # ])) uni_sequence = unify_sequence(line["Sequence"]) peptide = (uni_sequence, line["Modifications"]) # multiple proteins are separated by <|> # ignore start_stop_pre_post part since it depends on the peptide # and not the protein (i.e. _233_243_A_R) proteins = set( line["Protein ID"].replace("decoy_", "").split(";") ) # old unify csv format: # proteins = self.parse_protein_ids( # line['proteinacc_start_stop_pre_post_;'] # ) if len(proteins) > self.maximum_proteins_per_line: self.maximum_proteins_per_line = len(proteins) if rank == 0: # this is the 'best' peptide for that spectrum self.num_pep[peptide] += 1 for protein in proteins: self.num_prot[protein].add( ( line["Spectrum Title"], uni_sequence, line["Modifications"], ) ) self.pep_site[protein].add(peptide) self.pep_charge_states[peptide].add(int(row["Charge"])) self.seq_mods[uni_sequence].add(row["Modifications"]) self.num_spec[line["Spectrum Title"]].add(peptide) rows_of_spectrum = [] rows_of_spectrum.append(row) previous_spec_title = current_spec_title def row_to_features(self, row): """ Converts a unified CSV row to a SVM feature matrix (numbers only!) """ sequence = unify_sequence(row["Sequence"]) charge = field_to_float(row["Charge"]) score = field_to_bayes_float(row[self.col_for_sorting]) calc_mz, exp_mz, calc_mass, exp_mass = get_mz_values(row) # calc_mz = field_to_float( row['Calc m/z'] ) # calc m/z or uCalc? # exp_mz = field_to_float( row['Exp m/z'] ) pre_aa_field = row["Sequence Pre AA"] post_aa_field = row["Sequence Post AA"] all_pre_aas = set(re.split(self.delim_regex, pre_aa_field)) all_post_aas = set(re.split(self.delim_regex, post_aa_field)) if any(pre_aa not in self.tryptic_aas for pre_aa in all_pre_aas): enzN = 0 else: enzN = 1 if any(post_aa not in self.tryptic_aas for post_aa in all_post_aas): enzC = 0 else: enzC = 1 n_missed_cleavages = len( [aa for aa in sequence[:-1] if aa in ["R", "K"]] ) # / len(sequence) missed_cleavages = [0] * 6 try: missed_cleavages[n_missed_cleavages] = 1 except IndexError: # if a peptide has more than 6 missed cleavages missed_cleavages[-1] = 2 spectrum = row["Spectrum Title"].strip() mass = (exp_mz * charge) - (charge - 1) * PROTON pep_len = len(sequence) # delta_mz = calc_mz - exp_mz delta_mass = calc_mass - exp_mass peptide = (sequence, row["Modifications"]) proteins = self.parse_protein_ids(row["Protein ID"]) num_pep = self.num_pep[peptide] pep_charge_states = len(self.pep_charge_states[peptide]) seq_mods = len(self.seq_mods[sequence]) num_spec = len(self.num_spec[row["Spectrum Title"]]) num_prot = sum((len(self.num_prot[protein]) for protein in proteins)) pep_site = sum((len(self.pep_site[protein]) for protein in proteins)) user_specified_features = [] for feat in self.used_extra_fields: if feat != self.col_for_sorting: try: user_specified_features.append(field_to_float(row[feat])) except ValueError: pass charges = defaultdict(int) for charge_n in sorted(self.pep_charge_states[peptide]): charges[charge_n] = 1 if sequence in self.shitty_decoy_seqs: is_shitty = 1 else: is_shitty = 0 score_list = sorted( list(set(self.score_list_dict[spectrum])), reverse=self["bigger_scores_better"], ) try: score_list_scaled = scale_scores(score_list) rank = score_list.index(score) deltLCn = ( score_list_scaled[rank] - score_list_scaled[1] ) # Fractional difference between current and second best XCorr deltCn = ( score_list_scaled[rank] - score_list_scaled[-1] ) # Fractional difference between current and worst XCorr except (ValueError, IndexError, AssertionError): # NaN values will be replaced by the column mean later # NaN values are entered when there is no ranking # e.g. when only one peptide was matched to the spectrum. rank, deltLCn, deltCn = np.nan, np.nan, np.nan features = [ score, rank, deltCn, deltLCn, charge, # delta_mz,# / pep_len, delta_mass, # / pep_len, # abs(delta_mz),# / pep_len, abs(delta_mass), # / pep_len, n_missed_cleavages / pep_len, missed_cleavages[0], missed_cleavages[1], missed_cleavages[2], missed_cleavages[3], missed_cleavages[4], missed_cleavages[5], enzN, enzC, mass, pep_len, num_pep, num_prot, pep_site, is_shitty, pep_charge_states, num_spec, seq_mods, ] for charge_n in self.observed_charges: features.append(charges[charge_n]) return features + user_specified_features def collect_data(self): """ parses a unified csv file and collects features from each row """ categories = [] list_of_feature_lists = [] feature_sets = set() with open(self.csv_path, "r") as f: reader = csv.DictReader(f) # collecting some stats for FDR calculation: self.PSM_count = 0 self.decoy_count = 0 if self["dump_svm_matrix"]: self.init_svm_matrix_dump() additional_matrix_info = [] for i, row in enumerate( sorted( reader, reverse=self["bigger_scores_better"], key=lambda d: float(d[self.col_for_sorting]), ) ): features = self.row_to_features(row) if tuple(features) in feature_sets: continue feature_sets.add(tuple(features)) category, psm_FDR = self.get_psm_category(row) list_of_feature_lists.append(features) categories.append(category) if self["dump_svm_matrix"]: label = -1 if row_is_decoy(row) else 1 sequence = "{0}.{1}#{2}.{3}".format( row["Sequence Pre AA"].strip(), row["Sequence"].strip(), row["Modifications"].strip(), row["Sequence Post AA"].strip(), ) additional_matrix_info.append( { "psm_id": row["Spectrum Title"].strip(), "label": label, "scannr": row["Spectrum Title"].strip().split(".")[-2], "peptide": sequence, "proteins": self.parse_protein_ids(row["Protein ID"]), } ) if i % 1000 == 0: score_val = float(row[self.col_for_sorting]) msg = ( "Generating feature matrix from input csv " "(line ~{0}) with score {1} and FDR " "{2}".format(i, score_val, psm_FDR) ) print(msg, end="\r") # All data points are collected in one big matrix, to make standardization possible print("\nConverting feature matrix to NumPy array...") X_raw = np.array(list_of_feature_lists, dtype=float) print("Replacing empty/NaN values with the mean of each column...") self.nan_replacer = Imputer() self.nan_replacer.fit(X_raw) X_raw = self.nan_replacer.transform(X_raw) # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance print("Standardizing input matrix...") self.scaler = SCALER.fit(X_raw) self.X = self.scaler.transform(X_raw) self.categories = np.array(categories) print() if self["dump_svm_matrix"]: print("Dumping SVM matrix to", self["dump_svm_matrix"]) for i, matrix_row in enumerate(self.X): matrix_row_info = additional_matrix_info[i] self.dump_svm_matrix_row( row=list(matrix_row), psm_id=matrix_row_info["psm_id"], label=matrix_row_info["label"], scannr=matrix_row_info["scannr"], peptide=matrix_row_info["peptide"], proteins=matrix_row_info["proteins"], ) print("Dumped SVM matrix to", self["dump_svm_matrix"]) return def init_svm_matrix_dump(self): from misc import FEATURE_NAMES colnames = ["PSMId", "label", "scannr"] + FEATURE_NAMES colnames += ["charge{0}".format(c) for c in self.observed_charges] for extra_field in sorted(self.used_extra_fields): colnames += [extra_field] colnames += ["peptide"] for n_proteins in range(self.maximum_proteins_per_line): colnames.append("proteinId{0}".format(n_proteins + 1)) self.matrix_csv_path = self["dump_svm_matrix"] print("Dumping raw SVM input matrix to", self.matrix_csv_path) with open(self.matrix_csv_path, "w") as f: f.write("\t".join(colnames) + "\n") def dump_svm_matrix_row( self, row=None, psm_id=None, label=None, scannr=None, peptide=None, proteins=None, ): full_row = [psm_id, label, scannr] + row + [peptide] + list(proteins) with open(self.matrix_csv_path, "a") as f: row_str = "\t".join(str(x) for x in full_row) + "\n" f.write(row_str) def get_psm_category(self, row): """ Determines whether a PSM (csv row) should be used as a negative or positive training example. returns 1 - high-scoring target (positive training example) 0 - not-high-scoring target (not usable for training) -1 - decoy (negative training example) """ category = 0 # unknown (mix of true positives and false positives) self.PSM_count += 1 # for FDR calculation sequence = unify_sequence(row["Sequence"]) psm_FDR = calc_FDR(self.PSM_count, self.decoy_count) if row_is_decoy(row): self.decoy_count += 1 if psm_FDR <= 0.25 and sequence not in self.shitty_decoy_seqs: category = -1 # decoy (false positive hits) self.counter["negative"] += 1 else: if not self.decoy_train_prob: need_max = self.counter["positive"] * 2 have = self.counter["negative"] still_there = self.counter["decoy"] - have prob = need_max / still_there if prob < 0.001: prob = 0.001 self.decoy_train_prob = prob print() print(self.counter) print("need max:", need_max) print("have:", have) print("still_there:", still_there) print("probability:", self.decoy_train_prob) print() if self.decoy_train_prob >= 1.0 or random() <= self.decoy_train_prob: category = -1 # decoy (false positive hits) self.counter["negative"] += 1 else: # row is target if psm_FDR <= self["fdr_cutoff"] and sequence not in self.shitty_decoy_seqs: category = 1 # high quality target (almost certainly true positives) self.counter["positive"] += 1 if category == 0: self.counter["unknown"] += 1 return (category, psm_FDR) def train(self, training_matrix, training_categories): counter = Counter(training_categories) msg = "Training {0} SVM on {1} target PSMs and {2} decoy PSMs" "...".format( self["kernel"], counter[1], counter[-1] ) print(msg, end="\r") # specify the classification method (rbf and linear SVC seem to work best and are quite fast) classifier = svm.SVC( C=self["c"], kernel=self["kernel"], probability=False, # we don't want to get probabilities later on -> faster cache_size=self["mb_ram"], # available RAM in megabytes # decision_function_shape = 'ovr', # doesn't seem to matter # class_weight= 'balanced', # doesn't seem to matter ) # train the SVC on our set of training data: classifier.fit( training_matrix, training_categories, ) print(msg + " done!") return classifier def classify(self, classifier, psm_matrix): msg = "Classifying {0} PSMs...".format(len(psm_matrix)) print(msg, end="\r") for i, row in enumerate(psm_matrix): # get the distance to the separating SVM hyperplane and use it as a score: svm_score = classifier.decision_function(np.array([row]))[0] features = tuple(row) if features not in self.results: self.results[features] = svm_score else: print( "Warning! This combination of features already has a predicted probability! " "Previous svm_score: {0:f} - Current svm_score: {1:f}" "".format(self.results[tuple(row)], svm_score) ) # take the mean value, no idea how to handle this better, but it never happened so far... self.results[features] = (self.results[features] + svm_score) / 2.0 print(msg + " done!") return def add_scores_to_csv(self): outfname = os.path.basename(self["output_csv"]) print("Writing output csv {0} ...".format(outfname)) msg = "Writing output csv {0} (line ~{1})..." with open(self["output_csv"], "w", newline="") as out_csv, open( self.csv_path, "r" ) as in_csv: reader = csv.DictReader(in_csv) writer = csv.DictWriter(out_csv, reader.fieldnames + [self._svm_score_name]) writer.writeheader() for i, row in enumerate(reader): if i % 1000 == 0: print(msg.format(outfname, i), end="\r") features = self.nan_replacer.transform( np.array([self.row_to_features(row)]) ) features_scaled = tuple(list(self.scaler.transform(features)[0])) SVMScore = self.results[features_scaled] row[self._svm_score_name] = SVMScore writer.writerow(row) print("\n") return def __str__(self): out_str = ["\n\tpyPercolator Options:"] for option, value in self.items(): out_str.append("{0:·<25}{1}".format(option, value)) return "\n".join(out_str) if __name__ == "__main__": s = SVMWrapper() print(s) # print parameter/settings overview s.determine_csv_sorting() s.find_shitty_decoys() print("\nCounter:") print(s.counter) print() s.count_intra_set_features() s.collect_data() print( "Splitting data in half to avoid training and testing on the same features..." ) skfold = StratifiedKFold(s.categories, n_folds=2, shuffle=True) # use one half to score the other half, and vice versa: for i, (train_index, test_index) in enumerate(skfold): current_half = "1st" if i == 0 else "2nd" other_half = "2nd" if i == 0 else "1st" print( "\nUsing high-scoring PSMs and decoys of the {0} half to train...".format( current_half ) ) mask = s.categories[train_index] != 0 train_categories = s.categories[train_index][mask] train_features = s.X[train_index][mask] svm_classifier = s.train( training_matrix=train_features, training_categories=train_categories, ) print( "Using the trained SVM to classify all PSMs of the {0} half".format( other_half ) )
) if s["kernel"].lower() == "linear": print() # print SVM coefficients (only works for linear kernel) print(svm_classifier.coef_) print() print("\nCounter:") print(s.counter) print() s.add_scores_to_csv()
s.classify( svm_classifier, s.X[test_index],
random_line_split
svm_1_0_0.py
#!/usr/bin/env python """ usage: svm.py unified_input.csv engine_score_column_name i.e. : svm.py omssa_2_1_6_unified.csv 'OMSSA:pvalue' Writes a new file with added column "SVMscore" which is the distance to the separating hyperplane of a Percolator-like support vector machine. """ import numpy as np import sklearn from sklearn import svm from sklearn.cross_validation import StratifiedKFold from sklearn.preprocessing import Imputer from collections import Counter, defaultdict from random import random import csv import re import os import argparse from misc import ( get_score_colname_and_order, field_to_float, unify_sequence, calc_FDR, scale_scores, row_is_decoy, field_to_bayes_float, get_mz_values, ) SCALER = ( sklearn.preprocessing.RobustScaler() ) # RobustScaler() seems to be most robust ;) PROTON = 1.00727646677 class SVMWrapper(dict): def __init__(self): self._svm_score_name = "SVMscore" self.counter = { # counting the # of possible training PSMs "target": 0, "decoy": 0, "positive": 0, "negative": 0, "unknown": 0, "parsed PSMs": 0, } self.results = {} self.shitty_decoy_seqs = set() # is overwritten by find_shitty_decoys() self.mgf_lookup = {} self.pep_to_mz = {} if __name__ == "__main__": self.parse_options() # parse command line args and set options self.set_input_csv() self.observed_charges = set() self.used_extra_fields = set() self.decoy_train_prob = ( None # probability to include decoy PSMs as negative training examples ) self.maximum_proteins_per_line = 0 self.tryptic_aas = set(["R", "K", "-"]) self.delim_regex = re.compile( r"<\|>|\;" ) # regex to split a line by both ";" and "<|>" return def parse_options(self): """ parses the command line args for options/parameters """ parser = argparse.ArgumentParser() parser.add_argument( "-i", "--input_csv", type=str, help="Input CSV path(s)", required=True, nargs="+", ) parser.add_argument( "-o", "--output_csv", type=str, help="Output CSV path", required=True ) parser.add_argument( "-k", "--kernel", type=str, default="rbf", help='SVM kernel type ("rbf", "linear", "poly" or "sigmoid")', ) parser.add_argument( "-c", type=float, default=1.0, help="Penalty parameter C of the error term" ) parser.add_argument( "-g", "--gamma", type=str, default="auto", help="Gamma parameter of the SVM.", ) parser.add_argument( "-r", "--mb_ram", type=float, default=4000, help="Available RAM in megabytes, for SVM calculation", ) parser.add_argument( "-f", "--fdr_cutoff", type=float, default=0.01, help="Target PSMs with a lower FDR will be used as a " "positive training set", ) parser.add_argument( "-x", "--columns_as_features", type=str, nargs="+", default=[ "MS-GF:RawScore", "MS-GF:DeNovoScore", "MS-GF:SpecEValue", "MS-GF:EValue", "OMSSA:evalue", "OMSSA:pvalue", "X\!Tandem:expect", "X\!Tandem:hyperscore", ], help="Columns that should be used as a feature directly " "(e.g. secondary scores). Will be converted to float", ) parser.add_argument( "-d", "--dump_svm_matrix", type=str, default=False, help="Dump SVM matrix in PIN (Percolator input) format " "to the specified path, mostly for debugging " "and benchmarking.", ) arg_dict = vars(parser.parse_args()) # convert to dict self.update(arg_dict) try: self["gamma"] = float(self["gamma"]) except ValueError: assert ( self["gamma"] == "auto" ), "Invalid gamma param: " '"{0}", using "auto" instead.'.format( self["gamma"] ) def set_input_csv(self): """ distinguishes one vs. many unified input csv files and either sets the single csv as input, or merges all csvs and sets the merged csv as input. """ if len(self["input_csv"]) > 1: raise Exception("You must only specify *one* unified CSV file!") self.csv_path = self["input_csv"][0] print("Using input file", self.csv_path) def find_shitty_decoys(self): """ Finds and notes decoys that share their sequence with a target PSM. Also counts the number of targets and decoys to get a quick estimate of how many positive/negative training examples can be "claimed". """ target_seqs = set() decoy_seqs = set() with open(self.csv_path, "r") as f: reader = csv.DictReader(f) sorted_reader = sorted( reader, reverse=self["bigger_scores_better"], key=lambda d: float(d[self.col_for_sorting]), ) for row in sorted_reader: self.observed_charges.add(int(row["Charge"])) if row_is_decoy(row): decoy_seqs.add(unify_sequence(row["Sequence"])) self.counter["decoy"] += 1 else: target_seqs.add(unify_sequence(row["Sequence"])) self.counter["target"] += 1 self.shitty_decoy_seqs = target_seqs.intersection(decoy_seqs) if len(self.shitty_decoy_seqs) > 0: print( "Warning! Found {0} sequences that are target AND decoy " "(immutable peptides?). These will not be used for training.\n".format( len(self.shitty_decoy_seqs) ) ) return def determine_csv_sorting(self): with open(self.csv_path, "r") as in_file: reader = csv.DictReader(in_file) ( self.col_for_sorting, self["bigger_scores_better"], ) = get_score_colname_and_order(reader.fieldnames) if self.col_for_sorting == self._svm_score_name: self._svm_score_name = self._svm_score_name + "2" print( "CSV will be sorted by column {0} (reverse={1}" ")".format(self.col_for_sorting, self["bigger_scores_better"]) ) for feat in self["columns_as_features"]: if feat in reader.fieldnames and feat != self.col_for_sorting: self.used_extra_fields.add(feat) def sort_by_rank(self, rowdict): score = float(rowdict[self.col_for_sorting]) spec_title = rowdict["Spectrum Title"] return (spec_title, score) @staticmethod def parse_protein_ids(csv_field, sep="<|>"): """ Turns the unified CSV column "Protein ID" into a set of all protein IDs. """ clean = csv_field.replace("decoy_", "").strip() prot_id_set = set(clean.split(sep)) return prot_id_set def count_intra_set_features(self): """ intra-set features as calculated by Percolator: - num_pep: Number of PSMs for which this is the best scoring peptide. - num_prot: Number of times the matched protein matches other PSMs. - pep_site: Number of different peptides that match this protein. own ideas: - pep_charge_states: in how many charge states was the peptide found? - seq_mods: in how many mod states was the AA-sequence found? - num_spec: Number of times the matched spectrum matches other peptides. """ print("Counting intra-set features...") self.num_pep = defaultdict(int) self.num_prot = defaultdict(set) self.pep_site = defaultdict(set) self.score_list_dict = defaultdict(list) self.pep_charge_states = defaultdict(set) self.seq_mods = defaultdict(set) self.num_spec = defaultdict(set) with open(self.csv_path, "r") as f: reader = csv.DictReader(f) previous_spec_title = None rows_of_spectrum = [] for row in sorted( reader, reverse=self["bigger_scores_better"], key=self.sort_by_rank ): if unify_sequence(row["Sequence"]) in self.shitty_decoy_seqs: continue current_spec_title = row["Spectrum Title"] if current_spec_title != previous_spec_title: # the next spectrum started, so let's process the info we # collected for the previous spectrum: score_list = [ field_to_bayes_float(r[self.col_for_sorting]) for r in rows_of_spectrum ] self.score_list_dict[previous_spec_title] = score_list for rank, line in enumerate(rows_of_spectrum): # print("\t".join([ # str(rank), line['Spectrum Title'], line[self.col_for_sorting] # ])) uni_sequence = unify_sequence(line["Sequence"]) peptide = (uni_sequence, line["Modifications"]) # multiple proteins are separated by <|> # ignore start_stop_pre_post part since it depends on the peptide # and not the protein (i.e. _233_243_A_R) proteins = set( line["Protein ID"].replace("decoy_", "").split(";") ) # old unify csv format: # proteins = self.parse_protein_ids( # line['proteinacc_start_stop_pre_post_;'] # ) if len(proteins) > self.maximum_proteins_per_line: self.maximum_proteins_per_line = len(proteins) if rank == 0: # this is the 'best' peptide for that spectrum self.num_pep[peptide] += 1 for protein in proteins: self.num_prot[protein].add( ( line["Spectrum Title"], uni_sequence, line["Modifications"], ) ) self.pep_site[protein].add(peptide) self.pep_charge_states[peptide].add(int(row["Charge"])) self.seq_mods[uni_sequence].add(row["Modifications"]) self.num_spec[line["Spectrum Title"]].add(peptide) rows_of_spectrum = [] rows_of_spectrum.append(row) previous_spec_title = current_spec_title def row_to_features(self, row): """ Converts a unified CSV row to a SVM feature matrix (numbers only!) """ sequence = unify_sequence(row["Sequence"]) charge = field_to_float(row["Charge"]) score = field_to_bayes_float(row[self.col_for_sorting]) calc_mz, exp_mz, calc_mass, exp_mass = get_mz_values(row) # calc_mz = field_to_float( row['Calc m/z'] ) # calc m/z or uCalc? # exp_mz = field_to_float( row['Exp m/z'] ) pre_aa_field = row["Sequence Pre AA"] post_aa_field = row["Sequence Post AA"] all_pre_aas = set(re.split(self.delim_regex, pre_aa_field)) all_post_aas = set(re.split(self.delim_regex, post_aa_field)) if any(pre_aa not in self.tryptic_aas for pre_aa in all_pre_aas): enzN = 0 else: enzN = 1 if any(post_aa not in self.tryptic_aas for post_aa in all_post_aas): enzC = 0 else: enzC = 1 n_missed_cleavages = len( [aa for aa in sequence[:-1] if aa in ["R", "K"]] ) # / len(sequence) missed_cleavages = [0] * 6 try: missed_cleavages[n_missed_cleavages] = 1 except IndexError: # if a peptide has more than 6 missed cleavages missed_cleavages[-1] = 2 spectrum = row["Spectrum Title"].strip() mass = (exp_mz * charge) - (charge - 1) * PROTON pep_len = len(sequence) # delta_mz = calc_mz - exp_mz delta_mass = calc_mass - exp_mass peptide = (sequence, row["Modifications"]) proteins = self.parse_protein_ids(row["Protein ID"]) num_pep = self.num_pep[peptide] pep_charge_states = len(self.pep_charge_states[peptide]) seq_mods = len(self.seq_mods[sequence]) num_spec = len(self.num_spec[row["Spectrum Title"]]) num_prot = sum((len(self.num_prot[protein]) for protein in proteins)) pep_site = sum((len(self.pep_site[protein]) for protein in proteins)) user_specified_features = [] for feat in self.used_extra_fields: if feat != self.col_for_sorting: try: user_specified_features.append(field_to_float(row[feat])) except ValueError: pass charges = defaultdict(int) for charge_n in sorted(self.pep_charge_states[peptide]): charges[charge_n] = 1 if sequence in self.shitty_decoy_seqs: is_shitty = 1 else: is_shitty = 0 score_list = sorted( list(set(self.score_list_dict[spectrum])), reverse=self["bigger_scores_better"], ) try: score_list_scaled = scale_scores(score_list) rank = score_list.index(score) deltLCn = ( score_list_scaled[rank] - score_list_scaled[1] ) # Fractional difference between current and second best XCorr deltCn = ( score_list_scaled[rank] - score_list_scaled[-1] ) # Fractional difference between current and worst XCorr except (ValueError, IndexError, AssertionError): # NaN values will be replaced by the column mean later # NaN values are entered when there is no ranking # e.g. when only one peptide was matched to the spectrum. rank, deltLCn, deltCn = np.nan, np.nan, np.nan features = [ score, rank, deltCn, deltLCn, charge, # delta_mz,# / pep_len, delta_mass, # / pep_len, # abs(delta_mz),# / pep_len, abs(delta_mass), # / pep_len, n_missed_cleavages / pep_len, missed_cleavages[0], missed_cleavages[1], missed_cleavages[2], missed_cleavages[3], missed_cleavages[4], missed_cleavages[5], enzN, enzC, mass, pep_len, num_pep, num_prot, pep_site, is_shitty, pep_charge_states, num_spec, seq_mods, ] for charge_n in self.observed_charges: features.append(charges[charge_n]) return features + user_specified_features def collect_data(self): """ parses a unified csv file and collects features from each row """ categories = [] list_of_feature_lists = [] feature_sets = set() with open(self.csv_path, "r") as f: reader = csv.DictReader(f) # collecting some stats for FDR calculation: self.PSM_count = 0 self.decoy_count = 0 if self["dump_svm_matrix"]: self.init_svm_matrix_dump() additional_matrix_info = [] for i, row in enumerate( sorted( reader, reverse=self["bigger_scores_better"], key=lambda d: float(d[self.col_for_sorting]), ) ): features = self.row_to_features(row) if tuple(features) in feature_sets: continue feature_sets.add(tuple(features)) category, psm_FDR = self.get_psm_category(row) list_of_feature_lists.append(features) categories.append(category) if self["dump_svm_matrix"]: label = -1 if row_is_decoy(row) else 1 sequence = "{0}.{1}#{2}.{3}".format( row["Sequence Pre AA"].strip(), row["Sequence"].strip(), row["Modifications"].strip(), row["Sequence Post AA"].strip(), ) additional_matrix_info.append( { "psm_id": row["Spectrum Title"].strip(), "label": label, "scannr": row["Spectrum Title"].strip().split(".")[-2], "peptide": sequence, "proteins": self.parse_protein_ids(row["Protein ID"]), } ) if i % 1000 == 0: score_val = float(row[self.col_for_sorting]) msg = ( "Generating feature matrix from input csv " "(line ~{0}) with score {1} and FDR " "{2}".format(i, score_val, psm_FDR) ) print(msg, end="\r") # All data points are collected in one big matrix, to make standardization possible print("\nConverting feature matrix to NumPy array...") X_raw = np.array(list_of_feature_lists, dtype=float) print("Replacing empty/NaN values with the mean of each column...") self.nan_replacer = Imputer() self.nan_replacer.fit(X_raw) X_raw = self.nan_replacer.transform(X_raw) # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance print("Standardizing input matrix...") self.scaler = SCALER.fit(X_raw) self.X = self.scaler.transform(X_raw) self.categories = np.array(categories) print() if self["dump_svm_matrix"]: print("Dumping SVM matrix to", self["dump_svm_matrix"]) for i, matrix_row in enumerate(self.X): matrix_row_info = additional_matrix_info[i] self.dump_svm_matrix_row( row=list(matrix_row), psm_id=matrix_row_info["psm_id"], label=matrix_row_info["label"], scannr=matrix_row_info["scannr"], peptide=matrix_row_info["peptide"], proteins=matrix_row_info["proteins"], ) print("Dumped SVM matrix to", self["dump_svm_matrix"]) return def init_svm_matrix_dump(self): from misc import FEATURE_NAMES colnames = ["PSMId", "label", "scannr"] + FEATURE_NAMES colnames += ["charge{0}".format(c) for c in self.observed_charges] for extra_field in sorted(self.used_extra_fields): colnames += [extra_field] colnames += ["peptide"] for n_proteins in range(self.maximum_proteins_per_line):
self.matrix_csv_path = self["dump_svm_matrix"] print("Dumping raw SVM input matrix to", self.matrix_csv_path) with open(self.matrix_csv_path, "w") as f: f.write("\t".join(colnames) + "\n") def dump_svm_matrix_row( self, row=None, psm_id=None, label=None, scannr=None, peptide=None, proteins=None, ): full_row = [psm_id, label, scannr] + row + [peptide] + list(proteins) with open(self.matrix_csv_path, "a") as f: row_str = "\t".join(str(x) for x in full_row) + "\n" f.write(row_str) def get_psm_category(self, row): """ Determines whether a PSM (csv row) should be used as a negative or positive training example. returns 1 - high-scoring target (positive training example) 0 - not-high-scoring target (not usable for training) -1 - decoy (negative training example) """ category = 0 # unknown (mix of true positives and false positives) self.PSM_count += 1 # for FDR calculation sequence = unify_sequence(row["Sequence"]) psm_FDR = calc_FDR(self.PSM_count, self.decoy_count) if row_is_decoy(row): self.decoy_count += 1 if psm_FDR <= 0.25 and sequence not in self.shitty_decoy_seqs: category = -1 # decoy (false positive hits) self.counter["negative"] += 1 else: if not self.decoy_train_prob: need_max = self.counter["positive"] * 2 have = self.counter["negative"] still_there = self.counter["decoy"] - have prob = need_max / still_there if prob < 0.001: prob = 0.001 self.decoy_train_prob = prob print() print(self.counter) print("need max:", need_max) print("have:", have) print("still_there:", still_there) print("probability:", self.decoy_train_prob) print() if self.decoy_train_prob >= 1.0 or random() <= self.decoy_train_prob: category = -1 # decoy (false positive hits) self.counter["negative"] += 1 else: # row is target if psm_FDR <= self["fdr_cutoff"] and sequence not in self.shitty_decoy_seqs: category = 1 # high quality target (almost certainly true positives) self.counter["positive"] += 1 if category == 0: self.counter["unknown"] += 1 return (category, psm_FDR) def train(self, training_matrix, training_categories): counter = Counter(training_categories) msg = "Training {0} SVM on {1} target PSMs and {2} decoy PSMs" "...".format( self["kernel"], counter[1], counter[-1] ) print(msg, end="\r") # specify the classification method (rbf and linear SVC seem to work best and are quite fast) classifier = svm.SVC( C=self["c"], kernel=self["kernel"], probability=False, # we don't want to get probabilities later on -> faster cache_size=self["mb_ram"], # available RAM in megabytes # decision_function_shape = 'ovr', # doesn't seem to matter # class_weight= 'balanced', # doesn't seem to matter ) # train the SVC on our set of training data: classifier.fit( training_matrix, training_categories, ) print(msg + " done!") return classifier def classify(self, classifier, psm_matrix): msg = "Classifying {0} PSMs...".format(len(psm_matrix)) print(msg, end="\r") for i, row in enumerate(psm_matrix): # get the distance to the separating SVM hyperplane and use it as a score: svm_score = classifier.decision_function(np.array([row]))[0] features = tuple(row) if features not in self.results: self.results[features] = svm_score else: print( "Warning! This combination of features already has a predicted probability! " "Previous svm_score: {0:f} - Current svm_score: {1:f}" "".format(self.results[tuple(row)], svm_score) ) # take the mean value, no idea how to handle this better, but it never happened so far... self.results[features] = (self.results[features] + svm_score) / 2.0 print(msg + " done!") return def add_scores_to_csv(self): outfname = os.path.basename(self["output_csv"]) print("Writing output csv {0} ...".format(outfname)) msg = "Writing output csv {0} (line ~{1})..." with open(self["output_csv"], "w", newline="") as out_csv, open( self.csv_path, "r" ) as in_csv: reader = csv.DictReader(in_csv) writer = csv.DictWriter(out_csv, reader.fieldnames + [self._svm_score_name]) writer.writeheader() for i, row in enumerate(reader): if i % 1000 == 0: print(msg.format(outfname, i), end="\r") features = self.nan_replacer.transform( np.array([self.row_to_features(row)]) ) features_scaled = tuple(list(self.scaler.transform(features)[0])) SVMScore = self.results[features_scaled] row[self._svm_score_name] = SVMScore writer.writerow(row) print("\n") return def __str__(self): out_str = ["\n\tpyPercolator Options:"] for option, value in self.items(): out_str.append("{0:·<25}{1}".format(option, value)) return "\n".join(out_str) if __name__ == "__main__": s = SVMWrapper() print(s) # print parameter/settings overview s.determine_csv_sorting() s.find_shitty_decoys() print("\nCounter:") print(s.counter) print() s.count_intra_set_features() s.collect_data() print( "Splitting data in half to avoid training and testing on the same features..." ) skfold = StratifiedKFold(s.categories, n_folds=2, shuffle=True) # use one half to score the other half, and vice versa: for i, (train_index, test_index) in enumerate(skfold): current_half = "1st" if i == 0 else "2nd" other_half = "2nd" if i == 0 else "1st" print( "\nUsing high-scoring PSMs and decoys of the {0} half to train...".format( current_half ) ) mask = s.categories[train_index] != 0 train_categories = s.categories[train_index][mask] train_features = s.X[train_index][mask] svm_classifier = s.train( training_matrix=train_features, training_categories=train_categories, ) print( "Using the trained SVM to classify all PSMs of the {0} half".format( other_half ) ) s.classify( svm_classifier, s.X[test_index], ) if s["kernel"].lower() == "linear": print() # print SVM coefficients (only works for linear kernel) print(svm_classifier.coef_) print() print("\nCounter:") print(s.counter) print() s.add_scores_to_csv()
colnames.append("proteinId{0}".format(n_proteins + 1))
conditional_block
svm_1_0_0.py
#!/usr/bin/env python """ usage: svm.py unified_input.csv engine_score_column_name i.e. : svm.py omssa_2_1_6_unified.csv 'OMSSA:pvalue' Writes a new file with added column "SVMscore" which is the distance to the separating hyperplane of a Percolator-like support vector machine. """ import numpy as np import sklearn from sklearn import svm from sklearn.cross_validation import StratifiedKFold from sklearn.preprocessing import Imputer from collections import Counter, defaultdict from random import random import csv import re import os import argparse from misc import ( get_score_colname_and_order, field_to_float, unify_sequence, calc_FDR, scale_scores, row_is_decoy, field_to_bayes_float, get_mz_values, ) SCALER = ( sklearn.preprocessing.RobustScaler() ) # RobustScaler() seems to be most robust ;) PROTON = 1.00727646677 class SVMWrapper(dict): def __init__(self): self._svm_score_name = "SVMscore" self.counter = { # counting the # of possible training PSMs "target": 0, "decoy": 0, "positive": 0, "negative": 0, "unknown": 0, "parsed PSMs": 0, } self.results = {} self.shitty_decoy_seqs = set() # is overwritten by find_shitty_decoys() self.mgf_lookup = {} self.pep_to_mz = {} if __name__ == "__main__": self.parse_options() # parse command line args and set options self.set_input_csv() self.observed_charges = set() self.used_extra_fields = set() self.decoy_train_prob = ( None # probability to include decoy PSMs as negative training examples ) self.maximum_proteins_per_line = 0 self.tryptic_aas = set(["R", "K", "-"]) self.delim_regex = re.compile( r"<\|>|\;" ) # regex to split a line by both ";" and "<|>" return def parse_options(self): """ parses the command line args for options/parameters """ parser = argparse.ArgumentParser() parser.add_argument( "-i", "--input_csv", type=str, help="Input CSV path(s)", required=True, nargs="+", ) parser.add_argument( "-o", "--output_csv", type=str, help="Output CSV path", required=True ) parser.add_argument( "-k", "--kernel", type=str, default="rbf", help='SVM kernel type ("rbf", "linear", "poly" or "sigmoid")', ) parser.add_argument( "-c", type=float, default=1.0, help="Penalty parameter C of the error term" ) parser.add_argument( "-g", "--gamma", type=str, default="auto", help="Gamma parameter of the SVM.", ) parser.add_argument( "-r", "--mb_ram", type=float, default=4000, help="Available RAM in megabytes, for SVM calculation", ) parser.add_argument( "-f", "--fdr_cutoff", type=float, default=0.01, help="Target PSMs with a lower FDR will be used as a " "positive training set", ) parser.add_argument( "-x", "--columns_as_features", type=str, nargs="+", default=[ "MS-GF:RawScore", "MS-GF:DeNovoScore", "MS-GF:SpecEValue", "MS-GF:EValue", "OMSSA:evalue", "OMSSA:pvalue", "X\!Tandem:expect", "X\!Tandem:hyperscore", ], help="Columns that should be used as a feature directly " "(e.g. secondary scores). Will be converted to float", ) parser.add_argument( "-d", "--dump_svm_matrix", type=str, default=False, help="Dump SVM matrix in PIN (Percolator input) format " "to the specified path, mostly for debugging " "and benchmarking.", ) arg_dict = vars(parser.parse_args()) # convert to dict self.update(arg_dict) try: self["gamma"] = float(self["gamma"]) except ValueError: assert ( self["gamma"] == "auto" ), "Invalid gamma param: " '"{0}", using "auto" instead.'.format( self["gamma"] ) def set_input_csv(self): """ distinguishes one vs. many unified input csv files and either sets the single csv as input, or merges all csvs and sets the merged csv as input. """ if len(self["input_csv"]) > 1: raise Exception("You must only specify *one* unified CSV file!") self.csv_path = self["input_csv"][0] print("Using input file", self.csv_path) def find_shitty_decoys(self): """ Finds and notes decoys that share their sequence with a target PSM. Also counts the number of targets and decoys to get a quick estimate of how many positive/negative training examples can be "claimed". """ target_seqs = set() decoy_seqs = set() with open(self.csv_path, "r") as f: reader = csv.DictReader(f) sorted_reader = sorted( reader, reverse=self["bigger_scores_better"], key=lambda d: float(d[self.col_for_sorting]), ) for row in sorted_reader: self.observed_charges.add(int(row["Charge"])) if row_is_decoy(row): decoy_seqs.add(unify_sequence(row["Sequence"])) self.counter["decoy"] += 1 else: target_seqs.add(unify_sequence(row["Sequence"])) self.counter["target"] += 1 self.shitty_decoy_seqs = target_seqs.intersection(decoy_seqs) if len(self.shitty_decoy_seqs) > 0: print( "Warning! Found {0} sequences that are target AND decoy " "(immutable peptides?). These will not be used for training.\n".format( len(self.shitty_decoy_seqs) ) ) return def determine_csv_sorting(self): with open(self.csv_path, "r") as in_file: reader = csv.DictReader(in_file) ( self.col_for_sorting, self["bigger_scores_better"], ) = get_score_colname_and_order(reader.fieldnames) if self.col_for_sorting == self._svm_score_name: self._svm_score_name = self._svm_score_name + "2" print( "CSV will be sorted by column {0} (reverse={1}" ")".format(self.col_for_sorting, self["bigger_scores_better"]) ) for feat in self["columns_as_features"]: if feat in reader.fieldnames and feat != self.col_for_sorting: self.used_extra_fields.add(feat) def sort_by_rank(self, rowdict): score = float(rowdict[self.col_for_sorting]) spec_title = rowdict["Spectrum Title"] return (spec_title, score) @staticmethod def parse_protein_ids(csv_field, sep="<|>"): """ Turns the unified CSV column "Protein ID" into a set of all protein IDs. """ clean = csv_field.replace("decoy_", "").strip() prot_id_set = set(clean.split(sep)) return prot_id_set def count_intra_set_features(self): """ intra-set features as calculated by Percolator: - num_pep: Number of PSMs for which this is the best scoring peptide. - num_prot: Number of times the matched protein matches other PSMs. - pep_site: Number of different peptides that match this protein. own ideas: - pep_charge_states: in how many charge states was the peptide found? - seq_mods: in how many mod states was the AA-sequence found? - num_spec: Number of times the matched spectrum matches other peptides. """ print("Counting intra-set features...") self.num_pep = defaultdict(int) self.num_prot = defaultdict(set) self.pep_site = defaultdict(set) self.score_list_dict = defaultdict(list) self.pep_charge_states = defaultdict(set) self.seq_mods = defaultdict(set) self.num_spec = defaultdict(set) with open(self.csv_path, "r") as f: reader = csv.DictReader(f) previous_spec_title = None rows_of_spectrum = [] for row in sorted( reader, reverse=self["bigger_scores_better"], key=self.sort_by_rank ): if unify_sequence(row["Sequence"]) in self.shitty_decoy_seqs: continue current_spec_title = row["Spectrum Title"] if current_spec_title != previous_spec_title: # the next spectrum started, so let's process the info we # collected for the previous spectrum: score_list = [ field_to_bayes_float(r[self.col_for_sorting]) for r in rows_of_spectrum ] self.score_list_dict[previous_spec_title] = score_list for rank, line in enumerate(rows_of_spectrum): # print("\t".join([ # str(rank), line['Spectrum Title'], line[self.col_for_sorting] # ])) uni_sequence = unify_sequence(line["Sequence"]) peptide = (uni_sequence, line["Modifications"]) # multiple proteins are separated by <|> # ignore start_stop_pre_post part since it depends on the peptide # and not the protein (i.e. _233_243_A_R) proteins = set( line["Protein ID"].replace("decoy_", "").split(";") ) # old unify csv format: # proteins = self.parse_protein_ids( # line['proteinacc_start_stop_pre_post_;'] # ) if len(proteins) > self.maximum_proteins_per_line: self.maximum_proteins_per_line = len(proteins) if rank == 0: # this is the 'best' peptide for that spectrum self.num_pep[peptide] += 1 for protein in proteins: self.num_prot[protein].add( ( line["Spectrum Title"], uni_sequence, line["Modifications"], ) ) self.pep_site[protein].add(peptide) self.pep_charge_states[peptide].add(int(row["Charge"])) self.seq_mods[uni_sequence].add(row["Modifications"]) self.num_spec[line["Spectrum Title"]].add(peptide) rows_of_spectrum = [] rows_of_spectrum.append(row) previous_spec_title = current_spec_title def row_to_features(self, row): """ Converts a unified CSV row to a SVM feature matrix (numbers only!) """ sequence = unify_sequence(row["Sequence"]) charge = field_to_float(row["Charge"]) score = field_to_bayes_float(row[self.col_for_sorting]) calc_mz, exp_mz, calc_mass, exp_mass = get_mz_values(row) # calc_mz = field_to_float( row['Calc m/z'] ) # calc m/z or uCalc? # exp_mz = field_to_float( row['Exp m/z'] ) pre_aa_field = row["Sequence Pre AA"] post_aa_field = row["Sequence Post AA"] all_pre_aas = set(re.split(self.delim_regex, pre_aa_field)) all_post_aas = set(re.split(self.delim_regex, post_aa_field)) if any(pre_aa not in self.tryptic_aas for pre_aa in all_pre_aas): enzN = 0 else: enzN = 1 if any(post_aa not in self.tryptic_aas for post_aa in all_post_aas): enzC = 0 else: enzC = 1 n_missed_cleavages = len( [aa for aa in sequence[:-1] if aa in ["R", "K"]] ) # / len(sequence) missed_cleavages = [0] * 6 try: missed_cleavages[n_missed_cleavages] = 1 except IndexError: # if a peptide has more than 6 missed cleavages missed_cleavages[-1] = 2 spectrum = row["Spectrum Title"].strip() mass = (exp_mz * charge) - (charge - 1) * PROTON pep_len = len(sequence) # delta_mz = calc_mz - exp_mz delta_mass = calc_mass - exp_mass peptide = (sequence, row["Modifications"]) proteins = self.parse_protein_ids(row["Protein ID"]) num_pep = self.num_pep[peptide] pep_charge_states = len(self.pep_charge_states[peptide]) seq_mods = len(self.seq_mods[sequence]) num_spec = len(self.num_spec[row["Spectrum Title"]]) num_prot = sum((len(self.num_prot[protein]) for protein in proteins)) pep_site = sum((len(self.pep_site[protein]) for protein in proteins)) user_specified_features = [] for feat in self.used_extra_fields: if feat != self.col_for_sorting: try: user_specified_features.append(field_to_float(row[feat])) except ValueError: pass charges = defaultdict(int) for charge_n in sorted(self.pep_charge_states[peptide]): charges[charge_n] = 1 if sequence in self.shitty_decoy_seqs: is_shitty = 1 else: is_shitty = 0 score_list = sorted( list(set(self.score_list_dict[spectrum])), reverse=self["bigger_scores_better"], ) try: score_list_scaled = scale_scores(score_list) rank = score_list.index(score) deltLCn = ( score_list_scaled[rank] - score_list_scaled[1] ) # Fractional difference between current and second best XCorr deltCn = ( score_list_scaled[rank] - score_list_scaled[-1] ) # Fractional difference between current and worst XCorr except (ValueError, IndexError, AssertionError): # NaN values will be replaced by the column mean later # NaN values are entered when there is no ranking # e.g. when only one peptide was matched to the spectrum. rank, deltLCn, deltCn = np.nan, np.nan, np.nan features = [ score, rank, deltCn, deltLCn, charge, # delta_mz,# / pep_len, delta_mass, # / pep_len, # abs(delta_mz),# / pep_len, abs(delta_mass), # / pep_len, n_missed_cleavages / pep_len, missed_cleavages[0], missed_cleavages[1], missed_cleavages[2], missed_cleavages[3], missed_cleavages[4], missed_cleavages[5], enzN, enzC, mass, pep_len, num_pep, num_prot, pep_site, is_shitty, pep_charge_states, num_spec, seq_mods, ] for charge_n in self.observed_charges: features.append(charges[charge_n]) return features + user_specified_features def collect_data(self): """ parses a unified csv file and collects features from each row """ categories = [] list_of_feature_lists = [] feature_sets = set() with open(self.csv_path, "r") as f: reader = csv.DictReader(f) # collecting some stats for FDR calculation: self.PSM_count = 0 self.decoy_count = 0 if self["dump_svm_matrix"]: self.init_svm_matrix_dump() additional_matrix_info = [] for i, row in enumerate( sorted( reader, reverse=self["bigger_scores_better"], key=lambda d: float(d[self.col_for_sorting]), ) ): features = self.row_to_features(row) if tuple(features) in feature_sets: continue feature_sets.add(tuple(features)) category, psm_FDR = self.get_psm_category(row) list_of_feature_lists.append(features) categories.append(category) if self["dump_svm_matrix"]: label = -1 if row_is_decoy(row) else 1 sequence = "{0}.{1}#{2}.{3}".format( row["Sequence Pre AA"].strip(), row["Sequence"].strip(), row["Modifications"].strip(), row["Sequence Post AA"].strip(), ) additional_matrix_info.append( { "psm_id": row["Spectrum Title"].strip(), "label": label, "scannr": row["Spectrum Title"].strip().split(".")[-2], "peptide": sequence, "proteins": self.parse_protein_ids(row["Protein ID"]), } ) if i % 1000 == 0: score_val = float(row[self.col_for_sorting]) msg = ( "Generating feature matrix from input csv " "(line ~{0}) with score {1} and FDR " "{2}".format(i, score_val, psm_FDR) ) print(msg, end="\r") # All data points are collected in one big matrix, to make standardization possible print("\nConverting feature matrix to NumPy array...") X_raw = np.array(list_of_feature_lists, dtype=float) print("Replacing empty/NaN values with the mean of each column...") self.nan_replacer = Imputer() self.nan_replacer.fit(X_raw) X_raw = self.nan_replacer.transform(X_raw) # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance print("Standardizing input matrix...") self.scaler = SCALER.fit(X_raw) self.X = self.scaler.transform(X_raw) self.categories = np.array(categories) print() if self["dump_svm_matrix"]: print("Dumping SVM matrix to", self["dump_svm_matrix"]) for i, matrix_row in enumerate(self.X): matrix_row_info = additional_matrix_info[i] self.dump_svm_matrix_row( row=list(matrix_row), psm_id=matrix_row_info["psm_id"], label=matrix_row_info["label"], scannr=matrix_row_info["scannr"], peptide=matrix_row_info["peptide"], proteins=matrix_row_info["proteins"], ) print("Dumped SVM matrix to", self["dump_svm_matrix"]) return def init_svm_matrix_dump(self): from misc import FEATURE_NAMES colnames = ["PSMId", "label", "scannr"] + FEATURE_NAMES colnames += ["charge{0}".format(c) for c in self.observed_charges] for extra_field in sorted(self.used_extra_fields): colnames += [extra_field] colnames += ["peptide"] for n_proteins in range(self.maximum_proteins_per_line): colnames.append("proteinId{0}".format(n_proteins + 1)) self.matrix_csv_path = self["dump_svm_matrix"] print("Dumping raw SVM input matrix to", self.matrix_csv_path) with open(self.matrix_csv_path, "w") as f: f.write("\t".join(colnames) + "\n") def dump_svm_matrix_row( self, row=None, psm_id=None, label=None, scannr=None, peptide=None, proteins=None, ): full_row = [psm_id, label, scannr] + row + [peptide] + list(proteins) with open(self.matrix_csv_path, "a") as f: row_str = "\t".join(str(x) for x in full_row) + "\n" f.write(row_str) def get_psm_category(self, row): """ Determines whether a PSM (csv row) should be used as a negative or positive training example. returns 1 - high-scoring target (positive training example) 0 - not-high-scoring target (not usable for training) -1 - decoy (negative training example) """ category = 0 # unknown (mix of true positives and false positives) self.PSM_count += 1 # for FDR calculation sequence = unify_sequence(row["Sequence"]) psm_FDR = calc_FDR(self.PSM_count, self.decoy_count) if row_is_decoy(row): self.decoy_count += 1 if psm_FDR <= 0.25 and sequence not in self.shitty_decoy_seqs: category = -1 # decoy (false positive hits) self.counter["negative"] += 1 else: if not self.decoy_train_prob: need_max = self.counter["positive"] * 2 have = self.counter["negative"] still_there = self.counter["decoy"] - have prob = need_max / still_there if prob < 0.001: prob = 0.001 self.decoy_train_prob = prob print() print(self.counter) print("need max:", need_max) print("have:", have) print("still_there:", still_there) print("probability:", self.decoy_train_prob) print() if self.decoy_train_prob >= 1.0 or random() <= self.decoy_train_prob: category = -1 # decoy (false positive hits) self.counter["negative"] += 1 else: # row is target if psm_FDR <= self["fdr_cutoff"] and sequence not in self.shitty_decoy_seqs: category = 1 # high quality target (almost certainly true positives) self.counter["positive"] += 1 if category == 0: self.counter["unknown"] += 1 return (category, psm_FDR) def train(self, training_matrix, training_categories): counter = Counter(training_categories) msg = "Training {0} SVM on {1} target PSMs and {2} decoy PSMs" "...".format( self["kernel"], counter[1], counter[-1] ) print(msg, end="\r") # specify the classification method (rbf and linear SVC seem to work best and are quite fast) classifier = svm.SVC( C=self["c"], kernel=self["kernel"], probability=False, # we don't want to get probabilities later on -> faster cache_size=self["mb_ram"], # available RAM in megabytes # decision_function_shape = 'ovr', # doesn't seem to matter # class_weight= 'balanced', # doesn't seem to matter ) # train the SVC on our set of training data: classifier.fit( training_matrix, training_categories, ) print(msg + " done!") return classifier def classify(self, classifier, psm_matrix):
def add_scores_to_csv(self): outfname = os.path.basename(self["output_csv"]) print("Writing output csv {0} ...".format(outfname)) msg = "Writing output csv {0} (line ~{1})..." with open(self["output_csv"], "w", newline="") as out_csv, open( self.csv_path, "r" ) as in_csv: reader = csv.DictReader(in_csv) writer = csv.DictWriter(out_csv, reader.fieldnames + [self._svm_score_name]) writer.writeheader() for i, row in enumerate(reader): if i % 1000 == 0: print(msg.format(outfname, i), end="\r") features = self.nan_replacer.transform( np.array([self.row_to_features(row)]) ) features_scaled = tuple(list(self.scaler.transform(features)[0])) SVMScore = self.results[features_scaled] row[self._svm_score_name] = SVMScore writer.writerow(row) print("\n") return def __str__(self): out_str = ["\n\tpyPercolator Options:"] for option, value in self.items(): out_str.append("{0:·<25}{1}".format(option, value)) return "\n".join(out_str) if __name__ == "__main__": s = SVMWrapper() print(s) # print parameter/settings overview s.determine_csv_sorting() s.find_shitty_decoys() print("\nCounter:") print(s.counter) print() s.count_intra_set_features() s.collect_data() print( "Splitting data in half to avoid training and testing on the same features..." ) skfold = StratifiedKFold(s.categories, n_folds=2, shuffle=True) # use one half to score the other half, and vice versa: for i, (train_index, test_index) in enumerate(skfold): current_half = "1st" if i == 0 else "2nd" other_half = "2nd" if i == 0 else "1st" print( "\nUsing high-scoring PSMs and decoys of the {0} half to train...".format( current_half ) ) mask = s.categories[train_index] != 0 train_categories = s.categories[train_index][mask] train_features = s.X[train_index][mask] svm_classifier = s.train( training_matrix=train_features, training_categories=train_categories, ) print( "Using the trained SVM to classify all PSMs of the {0} half".format( other_half ) ) s.classify( svm_classifier, s.X[test_index], ) if s["kernel"].lower() == "linear": print() # print SVM coefficients (only works for linear kernel) print(svm_classifier.coef_) print() print("\nCounter:") print(s.counter) print() s.add_scores_to_csv()
msg = "Classifying {0} PSMs...".format(len(psm_matrix)) print(msg, end="\r") for i, row in enumerate(psm_matrix): # get the distance to the separating SVM hyperplane and use it as a score: svm_score = classifier.decision_function(np.array([row]))[0] features = tuple(row) if features not in self.results: self.results[features] = svm_score else: print( "Warning! This combination of features already has a predicted probability! " "Previous svm_score: {0:f} - Current svm_score: {1:f}" "".format(self.results[tuple(row)], svm_score) ) # take the mean value, no idea how to handle this better, but it never happened so far... self.results[features] = (self.results[features] + svm_score) / 2.0 print(msg + " done!") return
identifier_body
svm_1_0_0.py
#!/usr/bin/env python """ usage: svm.py unified_input.csv engine_score_column_name i.e. : svm.py omssa_2_1_6_unified.csv 'OMSSA:pvalue' Writes a new file with added column "SVMscore" which is the distance to the separating hyperplane of a Percolator-like support vector machine. """ import numpy as np import sklearn from sklearn import svm from sklearn.cross_validation import StratifiedKFold from sklearn.preprocessing import Imputer from collections import Counter, defaultdict from random import random import csv import re import os import argparse from misc import ( get_score_colname_and_order, field_to_float, unify_sequence, calc_FDR, scale_scores, row_is_decoy, field_to_bayes_float, get_mz_values, ) SCALER = ( sklearn.preprocessing.RobustScaler() ) # RobustScaler() seems to be most robust ;) PROTON = 1.00727646677 class SVMWrapper(dict): def __init__(self): self._svm_score_name = "SVMscore" self.counter = { # counting the # of possible training PSMs "target": 0, "decoy": 0, "positive": 0, "negative": 0, "unknown": 0, "parsed PSMs": 0, } self.results = {} self.shitty_decoy_seqs = set() # is overwritten by find_shitty_decoys() self.mgf_lookup = {} self.pep_to_mz = {} if __name__ == "__main__": self.parse_options() # parse command line args and set options self.set_input_csv() self.observed_charges = set() self.used_extra_fields = set() self.decoy_train_prob = ( None # probability to include decoy PSMs as negative training examples ) self.maximum_proteins_per_line = 0 self.tryptic_aas = set(["R", "K", "-"]) self.delim_regex = re.compile( r"<\|>|\;" ) # regex to split a line by both ";" and "<|>" return def parse_options(self): """ parses the command line args for options/parameters """ parser = argparse.ArgumentParser() parser.add_argument( "-i", "--input_csv", type=str, help="Input CSV path(s)", required=True, nargs="+", ) parser.add_argument( "-o", "--output_csv", type=str, help="Output CSV path", required=True ) parser.add_argument( "-k", "--kernel", type=str, default="rbf", help='SVM kernel type ("rbf", "linear", "poly" or "sigmoid")', ) parser.add_argument( "-c", type=float, default=1.0, help="Penalty parameter C of the error term" ) parser.add_argument( "-g", "--gamma", type=str, default="auto", help="Gamma parameter of the SVM.", ) parser.add_argument( "-r", "--mb_ram", type=float, default=4000, help="Available RAM in megabytes, for SVM calculation", ) parser.add_argument( "-f", "--fdr_cutoff", type=float, default=0.01, help="Target PSMs with a lower FDR will be used as a " "positive training set", ) parser.add_argument( "-x", "--columns_as_features", type=str, nargs="+", default=[ "MS-GF:RawScore", "MS-GF:DeNovoScore", "MS-GF:SpecEValue", "MS-GF:EValue", "OMSSA:evalue", "OMSSA:pvalue", "X\!Tandem:expect", "X\!Tandem:hyperscore", ], help="Columns that should be used as a feature directly " "(e.g. secondary scores). Will be converted to float", ) parser.add_argument( "-d", "--dump_svm_matrix", type=str, default=False, help="Dump SVM matrix in PIN (Percolator input) format " "to the specified path, mostly for debugging " "and benchmarking.", ) arg_dict = vars(parser.parse_args()) # convert to dict self.update(arg_dict) try: self["gamma"] = float(self["gamma"]) except ValueError: assert ( self["gamma"] == "auto" ), "Invalid gamma param: " '"{0}", using "auto" instead.'.format( self["gamma"] ) def set_input_csv(self): """ distinguishes one vs. many unified input csv files and either sets the single csv as input, or merges all csvs and sets the merged csv as input. """ if len(self["input_csv"]) > 1: raise Exception("You must only specify *one* unified CSV file!") self.csv_path = self["input_csv"][0] print("Using input file", self.csv_path) def find_shitty_decoys(self): """ Finds and notes decoys that share their sequence with a target PSM. Also counts the number of targets and decoys to get a quick estimate of how many positive/negative training examples can be "claimed". """ target_seqs = set() decoy_seqs = set() with open(self.csv_path, "r") as f: reader = csv.DictReader(f) sorted_reader = sorted( reader, reverse=self["bigger_scores_better"], key=lambda d: float(d[self.col_for_sorting]), ) for row in sorted_reader: self.observed_charges.add(int(row["Charge"])) if row_is_decoy(row): decoy_seqs.add(unify_sequence(row["Sequence"])) self.counter["decoy"] += 1 else: target_seqs.add(unify_sequence(row["Sequence"])) self.counter["target"] += 1 self.shitty_decoy_seqs = target_seqs.intersection(decoy_seqs) if len(self.shitty_decoy_seqs) > 0: print( "Warning! Found {0} sequences that are target AND decoy " "(immutable peptides?). These will not be used for training.\n".format( len(self.shitty_decoy_seqs) ) ) return def determine_csv_sorting(self): with open(self.csv_path, "r") as in_file: reader = csv.DictReader(in_file) ( self.col_for_sorting, self["bigger_scores_better"], ) = get_score_colname_and_order(reader.fieldnames) if self.col_for_sorting == self._svm_score_name: self._svm_score_name = self._svm_score_name + "2" print( "CSV will be sorted by column {0} (reverse={1}" ")".format(self.col_for_sorting, self["bigger_scores_better"]) ) for feat in self["columns_as_features"]: if feat in reader.fieldnames and feat != self.col_for_sorting: self.used_extra_fields.add(feat) def sort_by_rank(self, rowdict): score = float(rowdict[self.col_for_sorting]) spec_title = rowdict["Spectrum Title"] return (spec_title, score) @staticmethod def parse_protein_ids(csv_field, sep="<|>"): """ Turns the unified CSV column "Protein ID" into a set of all protein IDs. """ clean = csv_field.replace("decoy_", "").strip() prot_id_set = set(clean.split(sep)) return prot_id_set def count_intra_set_features(self): """ intra-set features as calculated by Percolator: - num_pep: Number of PSMs for which this is the best scoring peptide. - num_prot: Number of times the matched protein matches other PSMs. - pep_site: Number of different peptides that match this protein. own ideas: - pep_charge_states: in how many charge states was the peptide found? - seq_mods: in how many mod states was the AA-sequence found? - num_spec: Number of times the matched spectrum matches other peptides. """ print("Counting intra-set features...") self.num_pep = defaultdict(int) self.num_prot = defaultdict(set) self.pep_site = defaultdict(set) self.score_list_dict = defaultdict(list) self.pep_charge_states = defaultdict(set) self.seq_mods = defaultdict(set) self.num_spec = defaultdict(set) with open(self.csv_path, "r") as f: reader = csv.DictReader(f) previous_spec_title = None rows_of_spectrum = [] for row in sorted( reader, reverse=self["bigger_scores_better"], key=self.sort_by_rank ): if unify_sequence(row["Sequence"]) in self.shitty_decoy_seqs: continue current_spec_title = row["Spectrum Title"] if current_spec_title != previous_spec_title: # the next spectrum started, so let's process the info we # collected for the previous spectrum: score_list = [ field_to_bayes_float(r[self.col_for_sorting]) for r in rows_of_spectrum ] self.score_list_dict[previous_spec_title] = score_list for rank, line in enumerate(rows_of_spectrum): # print("\t".join([ # str(rank), line['Spectrum Title'], line[self.col_for_sorting] # ])) uni_sequence = unify_sequence(line["Sequence"]) peptide = (uni_sequence, line["Modifications"]) # multiple proteins are separated by <|> # ignore start_stop_pre_post part since it depends on the peptide # and not the protein (i.e. _233_243_A_R) proteins = set( line["Protein ID"].replace("decoy_", "").split(";") ) # old unify csv format: # proteins = self.parse_protein_ids( # line['proteinacc_start_stop_pre_post_;'] # ) if len(proteins) > self.maximum_proteins_per_line: self.maximum_proteins_per_line = len(proteins) if rank == 0: # this is the 'best' peptide for that spectrum self.num_pep[peptide] += 1 for protein in proteins: self.num_prot[protein].add( ( line["Spectrum Title"], uni_sequence, line["Modifications"], ) ) self.pep_site[protein].add(peptide) self.pep_charge_states[peptide].add(int(row["Charge"])) self.seq_mods[uni_sequence].add(row["Modifications"]) self.num_spec[line["Spectrum Title"]].add(peptide) rows_of_spectrum = [] rows_of_spectrum.append(row) previous_spec_title = current_spec_title def
(self, row): """ Converts a unified CSV row to a SVM feature matrix (numbers only!) """ sequence = unify_sequence(row["Sequence"]) charge = field_to_float(row["Charge"]) score = field_to_bayes_float(row[self.col_for_sorting]) calc_mz, exp_mz, calc_mass, exp_mass = get_mz_values(row) # calc_mz = field_to_float( row['Calc m/z'] ) # calc m/z or uCalc? # exp_mz = field_to_float( row['Exp m/z'] ) pre_aa_field = row["Sequence Pre AA"] post_aa_field = row["Sequence Post AA"] all_pre_aas = set(re.split(self.delim_regex, pre_aa_field)) all_post_aas = set(re.split(self.delim_regex, post_aa_field)) if any(pre_aa not in self.tryptic_aas for pre_aa in all_pre_aas): enzN = 0 else: enzN = 1 if any(post_aa not in self.tryptic_aas for post_aa in all_post_aas): enzC = 0 else: enzC = 1 n_missed_cleavages = len( [aa for aa in sequence[:-1] if aa in ["R", "K"]] ) # / len(sequence) missed_cleavages = [0] * 6 try: missed_cleavages[n_missed_cleavages] = 1 except IndexError: # if a peptide has more than 6 missed cleavages missed_cleavages[-1] = 2 spectrum = row["Spectrum Title"].strip() mass = (exp_mz * charge) - (charge - 1) * PROTON pep_len = len(sequence) # delta_mz = calc_mz - exp_mz delta_mass = calc_mass - exp_mass peptide = (sequence, row["Modifications"]) proteins = self.parse_protein_ids(row["Protein ID"]) num_pep = self.num_pep[peptide] pep_charge_states = len(self.pep_charge_states[peptide]) seq_mods = len(self.seq_mods[sequence]) num_spec = len(self.num_spec[row["Spectrum Title"]]) num_prot = sum((len(self.num_prot[protein]) for protein in proteins)) pep_site = sum((len(self.pep_site[protein]) for protein in proteins)) user_specified_features = [] for feat in self.used_extra_fields: if feat != self.col_for_sorting: try: user_specified_features.append(field_to_float(row[feat])) except ValueError: pass charges = defaultdict(int) for charge_n in sorted(self.pep_charge_states[peptide]): charges[charge_n] = 1 if sequence in self.shitty_decoy_seqs: is_shitty = 1 else: is_shitty = 0 score_list = sorted( list(set(self.score_list_dict[spectrum])), reverse=self["bigger_scores_better"], ) try: score_list_scaled = scale_scores(score_list) rank = score_list.index(score) deltLCn = ( score_list_scaled[rank] - score_list_scaled[1] ) # Fractional difference between current and second best XCorr deltCn = ( score_list_scaled[rank] - score_list_scaled[-1] ) # Fractional difference between current and worst XCorr except (ValueError, IndexError, AssertionError): # NaN values will be replaced by the column mean later # NaN values are entered when there is no ranking # e.g. when only one peptide was matched to the spectrum. rank, deltLCn, deltCn = np.nan, np.nan, np.nan features = [ score, rank, deltCn, deltLCn, charge, # delta_mz,# / pep_len, delta_mass, # / pep_len, # abs(delta_mz),# / pep_len, abs(delta_mass), # / pep_len, n_missed_cleavages / pep_len, missed_cleavages[0], missed_cleavages[1], missed_cleavages[2], missed_cleavages[3], missed_cleavages[4], missed_cleavages[5], enzN, enzC, mass, pep_len, num_pep, num_prot, pep_site, is_shitty, pep_charge_states, num_spec, seq_mods, ] for charge_n in self.observed_charges: features.append(charges[charge_n]) return features + user_specified_features def collect_data(self): """ parses a unified csv file and collects features from each row """ categories = [] list_of_feature_lists = [] feature_sets = set() with open(self.csv_path, "r") as f: reader = csv.DictReader(f) # collecting some stats for FDR calculation: self.PSM_count = 0 self.decoy_count = 0 if self["dump_svm_matrix"]: self.init_svm_matrix_dump() additional_matrix_info = [] for i, row in enumerate( sorted( reader, reverse=self["bigger_scores_better"], key=lambda d: float(d[self.col_for_sorting]), ) ): features = self.row_to_features(row) if tuple(features) in feature_sets: continue feature_sets.add(tuple(features)) category, psm_FDR = self.get_psm_category(row) list_of_feature_lists.append(features) categories.append(category) if self["dump_svm_matrix"]: label = -1 if row_is_decoy(row) else 1 sequence = "{0}.{1}#{2}.{3}".format( row["Sequence Pre AA"].strip(), row["Sequence"].strip(), row["Modifications"].strip(), row["Sequence Post AA"].strip(), ) additional_matrix_info.append( { "psm_id": row["Spectrum Title"].strip(), "label": label, "scannr": row["Spectrum Title"].strip().split(".")[-2], "peptide": sequence, "proteins": self.parse_protein_ids(row["Protein ID"]), } ) if i % 1000 == 0: score_val = float(row[self.col_for_sorting]) msg = ( "Generating feature matrix from input csv " "(line ~{0}) with score {1} and FDR " "{2}".format(i, score_val, psm_FDR) ) print(msg, end="\r") # All data points are collected in one big matrix, to make standardization possible print("\nConverting feature matrix to NumPy array...") X_raw = np.array(list_of_feature_lists, dtype=float) print("Replacing empty/NaN values with the mean of each column...") self.nan_replacer = Imputer() self.nan_replacer.fit(X_raw) X_raw = self.nan_replacer.transform(X_raw) # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance print("Standardizing input matrix...") self.scaler = SCALER.fit(X_raw) self.X = self.scaler.transform(X_raw) self.categories = np.array(categories) print() if self["dump_svm_matrix"]: print("Dumping SVM matrix to", self["dump_svm_matrix"]) for i, matrix_row in enumerate(self.X): matrix_row_info = additional_matrix_info[i] self.dump_svm_matrix_row( row=list(matrix_row), psm_id=matrix_row_info["psm_id"], label=matrix_row_info["label"], scannr=matrix_row_info["scannr"], peptide=matrix_row_info["peptide"], proteins=matrix_row_info["proteins"], ) print("Dumped SVM matrix to", self["dump_svm_matrix"]) return def init_svm_matrix_dump(self): from misc import FEATURE_NAMES colnames = ["PSMId", "label", "scannr"] + FEATURE_NAMES colnames += ["charge{0}".format(c) for c in self.observed_charges] for extra_field in sorted(self.used_extra_fields): colnames += [extra_field] colnames += ["peptide"] for n_proteins in range(self.maximum_proteins_per_line): colnames.append("proteinId{0}".format(n_proteins + 1)) self.matrix_csv_path = self["dump_svm_matrix"] print("Dumping raw SVM input matrix to", self.matrix_csv_path) with open(self.matrix_csv_path, "w") as f: f.write("\t".join(colnames) + "\n") def dump_svm_matrix_row( self, row=None, psm_id=None, label=None, scannr=None, peptide=None, proteins=None, ): full_row = [psm_id, label, scannr] + row + [peptide] + list(proteins) with open(self.matrix_csv_path, "a") as f: row_str = "\t".join(str(x) for x in full_row) + "\n" f.write(row_str) def get_psm_category(self, row): """ Determines whether a PSM (csv row) should be used as a negative or positive training example. returns 1 - high-scoring target (positive training example) 0 - not-high-scoring target (not usable for training) -1 - decoy (negative training example) """ category = 0 # unknown (mix of true positives and false positives) self.PSM_count += 1 # for FDR calculation sequence = unify_sequence(row["Sequence"]) psm_FDR = calc_FDR(self.PSM_count, self.decoy_count) if row_is_decoy(row): self.decoy_count += 1 if psm_FDR <= 0.25 and sequence not in self.shitty_decoy_seqs: category = -1 # decoy (false positive hits) self.counter["negative"] += 1 else: if not self.decoy_train_prob: need_max = self.counter["positive"] * 2 have = self.counter["negative"] still_there = self.counter["decoy"] - have prob = need_max / still_there if prob < 0.001: prob = 0.001 self.decoy_train_prob = prob print() print(self.counter) print("need max:", need_max) print("have:", have) print("still_there:", still_there) print("probability:", self.decoy_train_prob) print() if self.decoy_train_prob >= 1.0 or random() <= self.decoy_train_prob: category = -1 # decoy (false positive hits) self.counter["negative"] += 1 else: # row is target if psm_FDR <= self["fdr_cutoff"] and sequence not in self.shitty_decoy_seqs: category = 1 # high quality target (almost certainly true positives) self.counter["positive"] += 1 if category == 0: self.counter["unknown"] += 1 return (category, psm_FDR) def train(self, training_matrix, training_categories): counter = Counter(training_categories) msg = "Training {0} SVM on {1} target PSMs and {2} decoy PSMs" "...".format( self["kernel"], counter[1], counter[-1] ) print(msg, end="\r") # specify the classification method (rbf and linear SVC seem to work best and are quite fast) classifier = svm.SVC( C=self["c"], kernel=self["kernel"], probability=False, # we don't want to get probabilities later on -> faster cache_size=self["mb_ram"], # available RAM in megabytes # decision_function_shape = 'ovr', # doesn't seem to matter # class_weight= 'balanced', # doesn't seem to matter ) # train the SVC on our set of training data: classifier.fit( training_matrix, training_categories, ) print(msg + " done!") return classifier def classify(self, classifier, psm_matrix): msg = "Classifying {0} PSMs...".format(len(psm_matrix)) print(msg, end="\r") for i, row in enumerate(psm_matrix): # get the distance to the separating SVM hyperplane and use it as a score: svm_score = classifier.decision_function(np.array([row]))[0] features = tuple(row) if features not in self.results: self.results[features] = svm_score else: print( "Warning! This combination of features already has a predicted probability! " "Previous svm_score: {0:f} - Current svm_score: {1:f}" "".format(self.results[tuple(row)], svm_score) ) # take the mean value, no idea how to handle this better, but it never happened so far... self.results[features] = (self.results[features] + svm_score) / 2.0 print(msg + " done!") return def add_scores_to_csv(self): outfname = os.path.basename(self["output_csv"]) print("Writing output csv {0} ...".format(outfname)) msg = "Writing output csv {0} (line ~{1})..." with open(self["output_csv"], "w", newline="") as out_csv, open( self.csv_path, "r" ) as in_csv: reader = csv.DictReader(in_csv) writer = csv.DictWriter(out_csv, reader.fieldnames + [self._svm_score_name]) writer.writeheader() for i, row in enumerate(reader): if i % 1000 == 0: print(msg.format(outfname, i), end="\r") features = self.nan_replacer.transform( np.array([self.row_to_features(row)]) ) features_scaled = tuple(list(self.scaler.transform(features)[0])) SVMScore = self.results[features_scaled] row[self._svm_score_name] = SVMScore writer.writerow(row) print("\n") return def __str__(self): out_str = ["\n\tpyPercolator Options:"] for option, value in self.items(): out_str.append("{0:·<25}{1}".format(option, value)) return "\n".join(out_str) if __name__ == "__main__": s = SVMWrapper() print(s) # print parameter/settings overview s.determine_csv_sorting() s.find_shitty_decoys() print("\nCounter:") print(s.counter) print() s.count_intra_set_features() s.collect_data() print( "Splitting data in half to avoid training and testing on the same features..." ) skfold = StratifiedKFold(s.categories, n_folds=2, shuffle=True) # use one half to score the other half, and vice versa: for i, (train_index, test_index) in enumerate(skfold): current_half = "1st" if i == 0 else "2nd" other_half = "2nd" if i == 0 else "1st" print( "\nUsing high-scoring PSMs and decoys of the {0} half to train...".format( current_half ) ) mask = s.categories[train_index] != 0 train_categories = s.categories[train_index][mask] train_features = s.X[train_index][mask] svm_classifier = s.train( training_matrix=train_features, training_categories=train_categories, ) print( "Using the trained SVM to classify all PSMs of the {0} half".format( other_half ) ) s.classify( svm_classifier, s.X[test_index], ) if s["kernel"].lower() == "linear": print() # print SVM coefficients (only works for linear kernel) print(svm_classifier.coef_) print() print("\nCounter:") print(s.counter) print() s.add_scores_to_csv()
row_to_features
identifier_name
exec.rs
// Copyright 2018 Grove Enterprises LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashMap; use std::io::Error; use std::io::BufReader; use std::io::prelude::*; use std::iter::Iterator; use std::fs::File; use std::string::String; use std::convert::*; extern crate csv; use super::csv::StringRecord; use super::api::*; use super::rel::*; use super::parser::*; use super::sqltorel::*; use super::dataframe::*; use super::functions::math::*; use super::functions::geospatial::*; #[derive(Debug)] pub enum ExecutionError { IoError(Error), CsvError(csv::Error), ParserError(ParserError), Custom(String) } impl From<Error> for ExecutionError { fn from(e: Error) -> Self { ExecutionError::IoError(e) } } impl From<String> for ExecutionError { fn from(e: String) -> Self { ExecutionError::Custom(e) } } impl From<ParserError> for ExecutionError { fn from(e: ParserError) -> Self { ExecutionError::ParserError(e) } } /// Represents a csv file with a known schema #[derive(Debug)] pub struct CsvRelation { file: File, schema: Schema } pub struct FilterRelation { schema: Schema, input: Box<SimpleRelation>, expr: Expr } pub struct ProjectRelation { schema: Schema, input: Box<SimpleRelation>, expr: Vec<Expr> } pub struct LimitRelation { schema: Schema, input: Box<SimpleRelation>, limit: usize, } impl<'a> CsvRelation { pub fn open(file: File, schema: Schema) -> Result<Self,ExecutionError> { Ok(CsvRelation { file, schema }) } /// Convert StringRecord into our internal tuple type based on the known schema fn create_tuple(&self, r: &StringRecord) -> Result<Row,ExecutionError> { assert_eq!(self.schema.columns.len(), r.len()); let values = self.schema.columns.iter().zip(r.into_iter()).map(|(c,s)| match c.data_type { //TODO: remove unwrap use here DataType::UnsignedLong => Value::UnsignedLong(s.parse::<u64>().unwrap()), DataType::String => Value::String(s.to_string()), DataType::Double => Value::Double(s.parse::<f64>().unwrap()), _ => panic!("csv unsupported type") }).collect(); Ok(Row::new(values)) } } /// trait for all relations (a relation is essentially just an iterator over tuples with /// a known schema) pub trait SimpleRelation { /// scan all records in this relation fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a>; /// get the schema for this relation fn schema<'a>(&'a self) -> &'a Schema; } impl SimpleRelation for CsvRelation { fn scan<'a>(&'a self, _ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a> { let buf_reader = BufReader::new(&self.file); let csv_reader = csv::Reader::from_reader(buf_reader); let record_iter = csv_reader.into_records(); let tuple_iter = record_iter.map(move|r| match r { Ok(record) => self.create_tuple(&record), Err(e) => Err(ExecutionError::CsvError(e)) }); Box::new(tuple_iter) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for FilterRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).filter(move|t| match t { &Ok(ref tuple) => match ctx.evaluate(tuple, &self.schema, &self.expr) { Ok(Value::Boolean(b)) => b, _ => panic!("Predicate expression evaluated to non-boolean value") }, _ => true // let errors through the filter so they can be handled later } )) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for ProjectRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { let foo = self.input.scan(ctx).map(move|r| match r { Ok(tuple) => { let values = self.expr.iter() .map(|e| match e { &Expr::TupleValue(i) => tuple.values[i].clone(), //TODO: relation delegating back to execution context seems wrong way around _ => ctx.evaluate(&tuple,&self.schema, e).unwrap() //TODO: remove unwrap //unimplemented!("Unsupported expression for projection") }) .collect(); Ok(Row::new(values)) }, Err(_) => r }); Box::new(foo) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for LimitRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).take(self.limit)) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } /// Execution plans are sent to worker nodes for execution pub enum ExecutionPlan { /// Run a query and return the results to the client Interactive { plan: LogicalPlan }, /// Partition the relation Partition { plan: LogicalPlan, partition_count: usize, partition_expr: Expr } } #[derive(Debug,Clone)] pub struct ExecutionContext { schemas: HashMap<String, Schema>, functions: HashMap<String, FunctionMeta>, } impl ExecutionContext { pub fn new() -> Self { ExecutionContext { schemas: HashMap::new(), functions: HashMap::new() } } pub fn define_schema(&mut self, name: &str, schema: &Schema) { self.schemas.insert(name.to_string(), schema.clone()); } pub fn define_function(&mut self, func: &ScalarFunction) { let fm = FunctionMeta { name: func.name(), args: func.args(), return_type: func.return_type() }; self.functions.insert(fm.name.to_lowercase(), fm); } pub fn sql(&self, sql: &str) -> Result<Box<DataFrame>, ExecutionError> { // parse SQL into AST let ast = Parser::parse_sql(String::from(sql))?; // create a query planner let query_planner = SqlToRel::new(self.schemas.clone()); //TODO: pass reference to schemas // plan the query (create a logical relational plan) let plan = query_planner.sql_to_rel(&ast)?; // return the DataFrame Ok(Box::new(DF { ctx: Box::new(self.clone()), plan: plan })) //TODO: don't clone context } /// Open a CSV file ///TODO: this is building a relational plan not an execution plan so shouldn't really be here pub fn load(&self, filename: &str, schema: &Schema) -> Result<Box<DataFrame>, ExecutionError> { let plan = LogicalPlan::CsvFile { filename: filename.to_string(), schema: schema.clone() }; Ok(Box::new(DF { ctx: Box::new((*self).clone()), plan: Box::new(plan) })) } pub fn register_table(&mut self, name: String, schema: Schema) { self.schemas.insert(name, schema); } pub fn create_execution_plan(&self, plan: &LogicalPlan) -> Result<Box<SimpleRelation>,ExecutionError> { match *plan { LogicalPlan::EmptyRelation => { panic!() }, LogicalPlan::TableScan { ref table_name, ref schema, .. } => { // for now, tables are csv files let file = File::open(format!("test/data/{}.csv", table_name))?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::CsvFile { ref filename, ref schema } => { let file = File::open(filename)?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::Selection { ref expr, ref input, ref schema } => { let input_rel = self.create_execution_plan(input)?; let rel = FilterRelation { input: input_rel, expr: expr.clone(), schema: schema.clone() }; Ok(Box::new(rel)) }, LogicalPlan::Projection { ref expr, ref input, .. } => { let input_rel = self.create_execution_plan(&input)?; let input_schema = input_rel.schema().clone(); //TODO: seems to be duplicate of sql_to_rel code let project_columns: Vec<Field> = expr.iter().map(|e| { match e { &Expr::TupleValue(i) => input_schema.columns[i].clone(), &Expr::ScalarFunction {ref name, .. } => Field { name: name.clone(), data_type: DataType::Double, //TODO: hard-coded .. no function metadata yet nullable: true }, _ => unimplemented!("Unsupported projection expression") } }).collect(); let project_schema = Schema { columns: project_columns }; let rel = ProjectRelation { input: input_rel, expr: expr.clone(), schema: project_schema, }; Ok(Box::new(rel)) } LogicalPlan::Limit { limit, ref input, ref schema, .. } => { let input_rel = self.create_execution_plan(input)?; let rel = LimitRelation { input: input_rel, limit: limit, schema: schema.clone() }; Ok(Box::new(rel)) } } } /// Evaluate a relational expression against a tuple pub fn evaluate(&self, tuple: &Row, tt: &Schema, rex: &Expr) -> Result<Value, Box<ExecutionError>> { match rex { &Expr::BinaryExpr { ref left, ref op, ref right } => { let left_value = self.evaluate(tuple, tt, left)?; let right_value = self.evaluate(tuple, tt, right)?; match op { &Operator::Eq => Ok(Value::Boolean(left_value == right_value)), &Operator::NotEq => Ok(Value::Boolean(left_value != right_value)), &Operator::Lt => Ok(Value::Boolean(left_value < right_value)), &Operator::LtEq => Ok(Value::Boolean(left_value <= right_value)), &Operator::Gt => Ok(Value::Boolean(left_value > right_value)), &Operator::GtEq => Ok(Value::Boolean(left_value >= right_value)), } }, &Expr::TupleValue(index) => Ok(tuple.values[index].clone()), &Expr::Literal(ref value) => Ok(value.clone()), &Expr::ScalarFunction { ref name, ref args } => { // evaluate the arguments to the function let arg_values : Vec<Value> = args.iter() .map(|a| self.evaluate(tuple, tt, &a)) .collect::<Result<Vec<Value>, Box<ExecutionError>>>()?; let func = self.load_function_impl(name.as_ref())?; match func.execute(arg_values) { Ok(value) => Ok(value), Err(_) => Err(Box::new(ExecutionError::Custom("TBD".to_string()))) //TODO: fix } } } } /// load a function implementation fn load_function_impl(&self, function_name: &str) -> Result<Box<ScalarFunction>,Box<ExecutionError>> { //TODO: this is a huge hack since the functions have already been registered with the // execution context ... I need to implement this so it dynamically loads the functions match function_name.to_lowercase().as_ref() { "sqrt" => Ok(Box::new(SqrtFunction {})), "st_point" => Ok(Box::new(STPointFunc {})), "st_astext" => Ok(Box::new(STAsText {})), _ => Err(Box::new(ExecutionError::Custom(format!("Unknown function {}", function_name)))) } } pub fn udf(&self, name: &str, args: Vec<Expr>) -> Expr { Expr::ScalarFunction { name: name.to_string(), args: args.clone() } } } pub struct DF { ctx: Box<ExecutionContext>, plan: Box<LogicalPlan> } impl DataFrame for DF { fn select(&self, expr: Vec<Expr>) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Projection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn filter(&self, expr: Expr) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Selection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn write(&self, filename: &str) -> Result<(), DataFrameError> { let execution_plan = self.ctx.create_execution_plan(&self.plan)?; // create output file // println!("Writing csv to {}", filename); let mut file = File::create(filename)?; // implement execution here for now but should be a common method for processing a plan let it = execution_plan.scan(&self.ctx); it.for_each(|t| { match t { Ok(tuple) => { let csv = format!("{}\n", tuple.to_string()); file.write(&csv.into_bytes()).unwrap(); //TODO: remove unwrap }, Err(e) => panic!(format!("Error processing tuple: {:?}", e)) //TODO: error handling } }); Ok(()) } fn col(&self, column_name: &str) -> Result<Expr, DataFrameError> { match self.plan.schema().column(column_name) { Some((i,_)) => Ok(Expr::TupleValue(i)), _ => Err(DataFrameError::InvalidColumn(column_name.to_string())) } } fn schema(&self) -> Schema { self.plan.schema().clone() } fn repartition(&self, _n: u32) -> Result<Box<DataFrame>, DataFrameError> { unimplemented!() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_sqrt() { let mut ctx = create_context(); ctx.define_function(&SqrtFunction {}); let df = ctx.sql(&"SELECT id, sqrt(id) FROM people").unwrap(); df.write("_sqrt_out.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_sql_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_Point(lat, lng) FROM uk_cities").unwrap(); df.write("_uk_cities_sql.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_df_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let schema = Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)]); let df = ctx.load("test/data/uk_cities.csv", &schema).unwrap(); // create an expression for invoking a scalar function // let func_expr = Expr::ScalarFunction { // name: "ST_Point".to_string(), // args: vec![df.col("lat").unwrap(), df.col("lng").unwrap()] // }; // invoke custom code as a scalar UDF let func_expr = ctx.udf("ST_Point",vec![ df.col("lat").unwrap(), df.col("lng").unwrap()] ); let df2 = df.select(vec![func_expr]).unwrap(); df2.write("_uk_cities_df.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_chaining_functions() { let mut ctx = create_context();
df.write("_uk_cities_wkt.csv").unwrap(); //TODO: check that generated file has expected contents } fn create_context() -> ExecutionContext { // create execution context let mut ctx = ExecutionContext::new(); // define schemas for test data ctx.define_schema("people", &Schema::new(vec![ Field::new("id", DataType::UnsignedLong, false), Field::new("name", DataType::String, false)])); ctx.define_schema("uk_cities", &Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)])); ctx } }
ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_AsText(ST_Point(lat, lng)) FROM uk_cities").unwrap();
random_line_split
exec.rs
// Copyright 2018 Grove Enterprises LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashMap; use std::io::Error; use std::io::BufReader; use std::io::prelude::*; use std::iter::Iterator; use std::fs::File; use std::string::String; use std::convert::*; extern crate csv; use super::csv::StringRecord; use super::api::*; use super::rel::*; use super::parser::*; use super::sqltorel::*; use super::dataframe::*; use super::functions::math::*; use super::functions::geospatial::*; #[derive(Debug)] pub enum ExecutionError { IoError(Error), CsvError(csv::Error), ParserError(ParserError), Custom(String) } impl From<Error> for ExecutionError { fn from(e: Error) -> Self { ExecutionError::IoError(e) } } impl From<String> for ExecutionError { fn from(e: String) -> Self { ExecutionError::Custom(e) } } impl From<ParserError> for ExecutionError { fn from(e: ParserError) -> Self { ExecutionError::ParserError(e) } } /// Represents a csv file with a known schema #[derive(Debug)] pub struct CsvRelation { file: File, schema: Schema } pub struct FilterRelation { schema: Schema, input: Box<SimpleRelation>, expr: Expr } pub struct ProjectRelation { schema: Schema, input: Box<SimpleRelation>, expr: Vec<Expr> } pub struct LimitRelation { schema: Schema, input: Box<SimpleRelation>, limit: usize, } impl<'a> CsvRelation { pub fn open(file: File, schema: Schema) -> Result<Self,ExecutionError> { Ok(CsvRelation { file, schema }) } /// Convert StringRecord into our internal tuple type based on the known schema fn create_tuple(&self, r: &StringRecord) -> Result<Row,ExecutionError> { assert_eq!(self.schema.columns.len(), r.len()); let values = self.schema.columns.iter().zip(r.into_iter()).map(|(c,s)| match c.data_type { //TODO: remove unwrap use here DataType::UnsignedLong => Value::UnsignedLong(s.parse::<u64>().unwrap()), DataType::String => Value::String(s.to_string()), DataType::Double => Value::Double(s.parse::<f64>().unwrap()), _ => panic!("csv unsupported type") }).collect(); Ok(Row::new(values)) } } /// trait for all relations (a relation is essentially just an iterator over tuples with /// a known schema) pub trait SimpleRelation { /// scan all records in this relation fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a>; /// get the schema for this relation fn schema<'a>(&'a self) -> &'a Schema; } impl SimpleRelation for CsvRelation { fn scan<'a>(&'a self, _ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a> { let buf_reader = BufReader::new(&self.file); let csv_reader = csv::Reader::from_reader(buf_reader); let record_iter = csv_reader.into_records(); let tuple_iter = record_iter.map(move|r| match r { Ok(record) => self.create_tuple(&record), Err(e) => Err(ExecutionError::CsvError(e)) }); Box::new(tuple_iter) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for FilterRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).filter(move|t| match t { &Ok(ref tuple) => match ctx.evaluate(tuple, &self.schema, &self.expr) { Ok(Value::Boolean(b)) => b, _ => panic!("Predicate expression evaluated to non-boolean value") }, _ => true // let errors through the filter so they can be handled later } )) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for ProjectRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { let foo = self.input.scan(ctx).map(move|r| match r { Ok(tuple) => { let values = self.expr.iter() .map(|e| match e { &Expr::TupleValue(i) => tuple.values[i].clone(), //TODO: relation delegating back to execution context seems wrong way around _ => ctx.evaluate(&tuple,&self.schema, e).unwrap() //TODO: remove unwrap //unimplemented!("Unsupported expression for projection") }) .collect(); Ok(Row::new(values)) }, Err(_) => r }); Box::new(foo) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for LimitRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).take(self.limit)) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } /// Execution plans are sent to worker nodes for execution pub enum ExecutionPlan { /// Run a query and return the results to the client Interactive { plan: LogicalPlan }, /// Partition the relation Partition { plan: LogicalPlan, partition_count: usize, partition_expr: Expr } } #[derive(Debug,Clone)] pub struct ExecutionContext { schemas: HashMap<String, Schema>, functions: HashMap<String, FunctionMeta>, } impl ExecutionContext { pub fn new() -> Self { ExecutionContext { schemas: HashMap::new(), functions: HashMap::new() } } pub fn define_schema(&mut self, name: &str, schema: &Schema) { self.schemas.insert(name.to_string(), schema.clone()); } pub fn define_function(&mut self, func: &ScalarFunction) { let fm = FunctionMeta { name: func.name(), args: func.args(), return_type: func.return_type() }; self.functions.insert(fm.name.to_lowercase(), fm); } pub fn sql(&self, sql: &str) -> Result<Box<DataFrame>, ExecutionError> { // parse SQL into AST let ast = Parser::parse_sql(String::from(sql))?; // create a query planner let query_planner = SqlToRel::new(self.schemas.clone()); //TODO: pass reference to schemas // plan the query (create a logical relational plan) let plan = query_planner.sql_to_rel(&ast)?; // return the DataFrame Ok(Box::new(DF { ctx: Box::new(self.clone()), plan: plan })) //TODO: don't clone context } /// Open a CSV file ///TODO: this is building a relational plan not an execution plan so shouldn't really be here pub fn load(&self, filename: &str, schema: &Schema) -> Result<Box<DataFrame>, ExecutionError> { let plan = LogicalPlan::CsvFile { filename: filename.to_string(), schema: schema.clone() }; Ok(Box::new(DF { ctx: Box::new((*self).clone()), plan: Box::new(plan) })) } pub fn register_table(&mut self, name: String, schema: Schema) { self.schemas.insert(name, schema); } pub fn create_execution_plan(&self, plan: &LogicalPlan) -> Result<Box<SimpleRelation>,ExecutionError> { match *plan { LogicalPlan::EmptyRelation => { panic!() }, LogicalPlan::TableScan { ref table_name, ref schema, .. } => { // for now, tables are csv files let file = File::open(format!("test/data/{}.csv", table_name))?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::CsvFile { ref filename, ref schema } => { let file = File::open(filename)?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::Selection { ref expr, ref input, ref schema } => { let input_rel = self.create_execution_plan(input)?; let rel = FilterRelation { input: input_rel, expr: expr.clone(), schema: schema.clone() }; Ok(Box::new(rel)) }, LogicalPlan::Projection { ref expr, ref input, .. } => { let input_rel = self.create_execution_plan(&input)?; let input_schema = input_rel.schema().clone(); //TODO: seems to be duplicate of sql_to_rel code let project_columns: Vec<Field> = expr.iter().map(|e| { match e { &Expr::TupleValue(i) => input_schema.columns[i].clone(), &Expr::ScalarFunction {ref name, .. } => Field { name: name.clone(), data_type: DataType::Double, //TODO: hard-coded .. no function metadata yet nullable: true }, _ => unimplemented!("Unsupported projection expression") } }).collect(); let project_schema = Schema { columns: project_columns }; let rel = ProjectRelation { input: input_rel, expr: expr.clone(), schema: project_schema, }; Ok(Box::new(rel)) } LogicalPlan::Limit { limit, ref input, ref schema, .. } => { let input_rel = self.create_execution_plan(input)?; let rel = LimitRelation { input: input_rel, limit: limit, schema: schema.clone() }; Ok(Box::new(rel)) } } } /// Evaluate a relational expression against a tuple pub fn evaluate(&self, tuple: &Row, tt: &Schema, rex: &Expr) -> Result<Value, Box<ExecutionError>> { match rex { &Expr::BinaryExpr { ref left, ref op, ref right } => { let left_value = self.evaluate(tuple, tt, left)?; let right_value = self.evaluate(tuple, tt, right)?; match op { &Operator::Eq => Ok(Value::Boolean(left_value == right_value)), &Operator::NotEq => Ok(Value::Boolean(left_value != right_value)), &Operator::Lt => Ok(Value::Boolean(left_value < right_value)), &Operator::LtEq => Ok(Value::Boolean(left_value <= right_value)), &Operator::Gt => Ok(Value::Boolean(left_value > right_value)), &Operator::GtEq => Ok(Value::Boolean(left_value >= right_value)), } }, &Expr::TupleValue(index) => Ok(tuple.values[index].clone()), &Expr::Literal(ref value) => Ok(value.clone()), &Expr::ScalarFunction { ref name, ref args } => { // evaluate the arguments to the function let arg_values : Vec<Value> = args.iter() .map(|a| self.evaluate(tuple, tt, &a)) .collect::<Result<Vec<Value>, Box<ExecutionError>>>()?; let func = self.load_function_impl(name.as_ref())?; match func.execute(arg_values) { Ok(value) => Ok(value), Err(_) => Err(Box::new(ExecutionError::Custom("TBD".to_string()))) //TODO: fix } } } } /// load a function implementation fn load_function_impl(&self, function_name: &str) -> Result<Box<ScalarFunction>,Box<ExecutionError>> { //TODO: this is a huge hack since the functions have already been registered with the // execution context ... I need to implement this so it dynamically loads the functions match function_name.to_lowercase().as_ref() { "sqrt" => Ok(Box::new(SqrtFunction {})), "st_point" => Ok(Box::new(STPointFunc {})), "st_astext" => Ok(Box::new(STAsText {})), _ => Err(Box::new(ExecutionError::Custom(format!("Unknown function {}", function_name)))) } } pub fn udf(&self, name: &str, args: Vec<Expr>) -> Expr { Expr::ScalarFunction { name: name.to_string(), args: args.clone() } } } pub struct DF { ctx: Box<ExecutionContext>, plan: Box<LogicalPlan> } impl DataFrame for DF { fn
(&self, expr: Vec<Expr>) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Projection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn filter(&self, expr: Expr) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Selection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn write(&self, filename: &str) -> Result<(), DataFrameError> { let execution_plan = self.ctx.create_execution_plan(&self.plan)?; // create output file // println!("Writing csv to {}", filename); let mut file = File::create(filename)?; // implement execution here for now but should be a common method for processing a plan let it = execution_plan.scan(&self.ctx); it.for_each(|t| { match t { Ok(tuple) => { let csv = format!("{}\n", tuple.to_string()); file.write(&csv.into_bytes()).unwrap(); //TODO: remove unwrap }, Err(e) => panic!(format!("Error processing tuple: {:?}", e)) //TODO: error handling } }); Ok(()) } fn col(&self, column_name: &str) -> Result<Expr, DataFrameError> { match self.plan.schema().column(column_name) { Some((i,_)) => Ok(Expr::TupleValue(i)), _ => Err(DataFrameError::InvalidColumn(column_name.to_string())) } } fn schema(&self) -> Schema { self.plan.schema().clone() } fn repartition(&self, _n: u32) -> Result<Box<DataFrame>, DataFrameError> { unimplemented!() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_sqrt() { let mut ctx = create_context(); ctx.define_function(&SqrtFunction {}); let df = ctx.sql(&"SELECT id, sqrt(id) FROM people").unwrap(); df.write("_sqrt_out.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_sql_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_Point(lat, lng) FROM uk_cities").unwrap(); df.write("_uk_cities_sql.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_df_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let schema = Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)]); let df = ctx.load("test/data/uk_cities.csv", &schema).unwrap(); // create an expression for invoking a scalar function // let func_expr = Expr::ScalarFunction { // name: "ST_Point".to_string(), // args: vec![df.col("lat").unwrap(), df.col("lng").unwrap()] // }; // invoke custom code as a scalar UDF let func_expr = ctx.udf("ST_Point",vec![ df.col("lat").unwrap(), df.col("lng").unwrap()] ); let df2 = df.select(vec![func_expr]).unwrap(); df2.write("_uk_cities_df.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_chaining_functions() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_AsText(ST_Point(lat, lng)) FROM uk_cities").unwrap(); df.write("_uk_cities_wkt.csv").unwrap(); //TODO: check that generated file has expected contents } fn create_context() -> ExecutionContext { // create execution context let mut ctx = ExecutionContext::new(); // define schemas for test data ctx.define_schema("people", &Schema::new(vec![ Field::new("id", DataType::UnsignedLong, false), Field::new("name", DataType::String, false)])); ctx.define_schema("uk_cities", &Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)])); ctx } }
select
identifier_name
exec.rs
// Copyright 2018 Grove Enterprises LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashMap; use std::io::Error; use std::io::BufReader; use std::io::prelude::*; use std::iter::Iterator; use std::fs::File; use std::string::String; use std::convert::*; extern crate csv; use super::csv::StringRecord; use super::api::*; use super::rel::*; use super::parser::*; use super::sqltorel::*; use super::dataframe::*; use super::functions::math::*; use super::functions::geospatial::*; #[derive(Debug)] pub enum ExecutionError { IoError(Error), CsvError(csv::Error), ParserError(ParserError), Custom(String) } impl From<Error> for ExecutionError { fn from(e: Error) -> Self { ExecutionError::IoError(e) } } impl From<String> for ExecutionError { fn from(e: String) -> Self { ExecutionError::Custom(e) } } impl From<ParserError> for ExecutionError { fn from(e: ParserError) -> Self { ExecutionError::ParserError(e) } } /// Represents a csv file with a known schema #[derive(Debug)] pub struct CsvRelation { file: File, schema: Schema } pub struct FilterRelation { schema: Schema, input: Box<SimpleRelation>, expr: Expr } pub struct ProjectRelation { schema: Schema, input: Box<SimpleRelation>, expr: Vec<Expr> } pub struct LimitRelation { schema: Schema, input: Box<SimpleRelation>, limit: usize, } impl<'a> CsvRelation { pub fn open(file: File, schema: Schema) -> Result<Self,ExecutionError> { Ok(CsvRelation { file, schema }) } /// Convert StringRecord into our internal tuple type based on the known schema fn create_tuple(&self, r: &StringRecord) -> Result<Row,ExecutionError> { assert_eq!(self.schema.columns.len(), r.len()); let values = self.schema.columns.iter().zip(r.into_iter()).map(|(c,s)| match c.data_type { //TODO: remove unwrap use here DataType::UnsignedLong => Value::UnsignedLong(s.parse::<u64>().unwrap()), DataType::String => Value::String(s.to_string()), DataType::Double => Value::Double(s.parse::<f64>().unwrap()), _ => panic!("csv unsupported type") }).collect(); Ok(Row::new(values)) } } /// trait for all relations (a relation is essentially just an iterator over tuples with /// a known schema) pub trait SimpleRelation { /// scan all records in this relation fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a>; /// get the schema for this relation fn schema<'a>(&'a self) -> &'a Schema; } impl SimpleRelation for CsvRelation { fn scan<'a>(&'a self, _ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a> { let buf_reader = BufReader::new(&self.file); let csv_reader = csv::Reader::from_reader(buf_reader); let record_iter = csv_reader.into_records(); let tuple_iter = record_iter.map(move|r| match r { Ok(record) => self.create_tuple(&record), Err(e) => Err(ExecutionError::CsvError(e)) }); Box::new(tuple_iter) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for FilterRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).filter(move|t| match t { &Ok(ref tuple) => match ctx.evaluate(tuple, &self.schema, &self.expr) { Ok(Value::Boolean(b)) => b, _ => panic!("Predicate expression evaluated to non-boolean value") }, _ => true // let errors through the filter so they can be handled later } )) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for ProjectRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { let foo = self.input.scan(ctx).map(move|r| match r { Ok(tuple) => { let values = self.expr.iter() .map(|e| match e { &Expr::TupleValue(i) => tuple.values[i].clone(), //TODO: relation delegating back to execution context seems wrong way around _ => ctx.evaluate(&tuple,&self.schema, e).unwrap() //TODO: remove unwrap //unimplemented!("Unsupported expression for projection") }) .collect(); Ok(Row::new(values)) }, Err(_) => r }); Box::new(foo) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for LimitRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).take(self.limit)) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } /// Execution plans are sent to worker nodes for execution pub enum ExecutionPlan { /// Run a query and return the results to the client Interactive { plan: LogicalPlan }, /// Partition the relation Partition { plan: LogicalPlan, partition_count: usize, partition_expr: Expr } } #[derive(Debug,Clone)] pub struct ExecutionContext { schemas: HashMap<String, Schema>, functions: HashMap<String, FunctionMeta>, } impl ExecutionContext { pub fn new() -> Self { ExecutionContext { schemas: HashMap::new(), functions: HashMap::new() } } pub fn define_schema(&mut self, name: &str, schema: &Schema) { self.schemas.insert(name.to_string(), schema.clone()); } pub fn define_function(&mut self, func: &ScalarFunction) { let fm = FunctionMeta { name: func.name(), args: func.args(), return_type: func.return_type() }; self.functions.insert(fm.name.to_lowercase(), fm); } pub fn sql(&self, sql: &str) -> Result<Box<DataFrame>, ExecutionError> { // parse SQL into AST let ast = Parser::parse_sql(String::from(sql))?; // create a query planner let query_planner = SqlToRel::new(self.schemas.clone()); //TODO: pass reference to schemas // plan the query (create a logical relational plan) let plan = query_planner.sql_to_rel(&ast)?; // return the DataFrame Ok(Box::new(DF { ctx: Box::new(self.clone()), plan: plan })) //TODO: don't clone context } /// Open a CSV file ///TODO: this is building a relational plan not an execution plan so shouldn't really be here pub fn load(&self, filename: &str, schema: &Schema) -> Result<Box<DataFrame>, ExecutionError> { let plan = LogicalPlan::CsvFile { filename: filename.to_string(), schema: schema.clone() }; Ok(Box::new(DF { ctx: Box::new((*self).clone()), plan: Box::new(plan) })) } pub fn register_table(&mut self, name: String, schema: Schema) { self.schemas.insert(name, schema); } pub fn create_execution_plan(&self, plan: &LogicalPlan) -> Result<Box<SimpleRelation>,ExecutionError> { match *plan { LogicalPlan::EmptyRelation => { panic!() }, LogicalPlan::TableScan { ref table_name, ref schema, .. } => { // for now, tables are csv files let file = File::open(format!("test/data/{}.csv", table_name))?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::CsvFile { ref filename, ref schema } => { let file = File::open(filename)?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::Selection { ref expr, ref input, ref schema } => { let input_rel = self.create_execution_plan(input)?; let rel = FilterRelation { input: input_rel, expr: expr.clone(), schema: schema.clone() }; Ok(Box::new(rel)) }, LogicalPlan::Projection { ref expr, ref input, .. } => { let input_rel = self.create_execution_plan(&input)?; let input_schema = input_rel.schema().clone(); //TODO: seems to be duplicate of sql_to_rel code let project_columns: Vec<Field> = expr.iter().map(|e| { match e { &Expr::TupleValue(i) => input_schema.columns[i].clone(), &Expr::ScalarFunction {ref name, .. } => Field { name: name.clone(), data_type: DataType::Double, //TODO: hard-coded .. no function metadata yet nullable: true }, _ => unimplemented!("Unsupported projection expression") } }).collect(); let project_schema = Schema { columns: project_columns }; let rel = ProjectRelation { input: input_rel, expr: expr.clone(), schema: project_schema, }; Ok(Box::new(rel)) } LogicalPlan::Limit { limit, ref input, ref schema, .. } => { let input_rel = self.create_execution_plan(input)?; let rel = LimitRelation { input: input_rel, limit: limit, schema: schema.clone() }; Ok(Box::new(rel)) } } } /// Evaluate a relational expression against a tuple pub fn evaluate(&self, tuple: &Row, tt: &Schema, rex: &Expr) -> Result<Value, Box<ExecutionError>> { match rex { &Expr::BinaryExpr { ref left, ref op, ref right } => { let left_value = self.evaluate(tuple, tt, left)?; let right_value = self.evaluate(tuple, tt, right)?; match op { &Operator::Eq => Ok(Value::Boolean(left_value == right_value)), &Operator::NotEq => Ok(Value::Boolean(left_value != right_value)), &Operator::Lt => Ok(Value::Boolean(left_value < right_value)), &Operator::LtEq => Ok(Value::Boolean(left_value <= right_value)), &Operator::Gt => Ok(Value::Boolean(left_value > right_value)), &Operator::GtEq => Ok(Value::Boolean(left_value >= right_value)), } }, &Expr::TupleValue(index) => Ok(tuple.values[index].clone()), &Expr::Literal(ref value) => Ok(value.clone()), &Expr::ScalarFunction { ref name, ref args } => { // evaluate the arguments to the function let arg_values : Vec<Value> = args.iter() .map(|a| self.evaluate(tuple, tt, &a)) .collect::<Result<Vec<Value>, Box<ExecutionError>>>()?; let func = self.load_function_impl(name.as_ref())?; match func.execute(arg_values) { Ok(value) => Ok(value), Err(_) => Err(Box::new(ExecutionError::Custom("TBD".to_string()))) //TODO: fix } } } } /// load a function implementation fn load_function_impl(&self, function_name: &str) -> Result<Box<ScalarFunction>,Box<ExecutionError>>
pub fn udf(&self, name: &str, args: Vec<Expr>) -> Expr { Expr::ScalarFunction { name: name.to_string(), args: args.clone() } } } pub struct DF { ctx: Box<ExecutionContext>, plan: Box<LogicalPlan> } impl DataFrame for DF { fn select(&self, expr: Vec<Expr>) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Projection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn filter(&self, expr: Expr) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Selection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn write(&self, filename: &str) -> Result<(), DataFrameError> { let execution_plan = self.ctx.create_execution_plan(&self.plan)?; // create output file // println!("Writing csv to {}", filename); let mut file = File::create(filename)?; // implement execution here for now but should be a common method for processing a plan let it = execution_plan.scan(&self.ctx); it.for_each(|t| { match t { Ok(tuple) => { let csv = format!("{}\n", tuple.to_string()); file.write(&csv.into_bytes()).unwrap(); //TODO: remove unwrap }, Err(e) => panic!(format!("Error processing tuple: {:?}", e)) //TODO: error handling } }); Ok(()) } fn col(&self, column_name: &str) -> Result<Expr, DataFrameError> { match self.plan.schema().column(column_name) { Some((i,_)) => Ok(Expr::TupleValue(i)), _ => Err(DataFrameError::InvalidColumn(column_name.to_string())) } } fn schema(&self) -> Schema { self.plan.schema().clone() } fn repartition(&self, _n: u32) -> Result<Box<DataFrame>, DataFrameError> { unimplemented!() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_sqrt() { let mut ctx = create_context(); ctx.define_function(&SqrtFunction {}); let df = ctx.sql(&"SELECT id, sqrt(id) FROM people").unwrap(); df.write("_sqrt_out.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_sql_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_Point(lat, lng) FROM uk_cities").unwrap(); df.write("_uk_cities_sql.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_df_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let schema = Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)]); let df = ctx.load("test/data/uk_cities.csv", &schema).unwrap(); // create an expression for invoking a scalar function // let func_expr = Expr::ScalarFunction { // name: "ST_Point".to_string(), // args: vec![df.col("lat").unwrap(), df.col("lng").unwrap()] // }; // invoke custom code as a scalar UDF let func_expr = ctx.udf("ST_Point",vec![ df.col("lat").unwrap(), df.col("lng").unwrap()] ); let df2 = df.select(vec![func_expr]).unwrap(); df2.write("_uk_cities_df.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_chaining_functions() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_AsText(ST_Point(lat, lng)) FROM uk_cities").unwrap(); df.write("_uk_cities_wkt.csv").unwrap(); //TODO: check that generated file has expected contents } fn create_context() -> ExecutionContext { // create execution context let mut ctx = ExecutionContext::new(); // define schemas for test data ctx.define_schema("people", &Schema::new(vec![ Field::new("id", DataType::UnsignedLong, false), Field::new("name", DataType::String, false)])); ctx.define_schema("uk_cities", &Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)])); ctx } }
{ //TODO: this is a huge hack since the functions have already been registered with the // execution context ... I need to implement this so it dynamically loads the functions match function_name.to_lowercase().as_ref() { "sqrt" => Ok(Box::new(SqrtFunction {})), "st_point" => Ok(Box::new(STPointFunc {})), "st_astext" => Ok(Box::new(STAsText {})), _ => Err(Box::new(ExecutionError::Custom(format!("Unknown function {}", function_name)))) } }
identifier_body
refresh.go
// Copyright 2016-2022, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "errors" "fmt" "os" "strings" survey "github.com/AlecAivazis/survey/v2" terminal "github.com/AlecAivazis/survey/v2/terminal" "github.com/spf13/cobra" "github.com/pulumi/pulumi/pkg/v3/backend" "github.com/pulumi/pulumi/pkg/v3/backend/display" "github.com/pulumi/pulumi/pkg/v3/engine" "github.com/pulumi/pulumi/pkg/v3/resource/deploy" "github.com/pulumi/pulumi/pkg/v3/resource/stack" "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" "github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors" "github.com/pulumi/pulumi/sdk/v3/go/common/resource" "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/result" "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" ) func
() *cobra.Command { var debug bool var expectNop bool var message string var execKind string var execAgent string var stackName string // Flags for remote operations. remoteArgs := RemoteArgs{} // Flags for engine.UpdateOptions. var jsonDisplay bool var diffDisplay bool var eventLogPath string var parallel int var showConfig bool var showReplacementSteps bool var showSames bool var skipPreview bool var suppressOutputs bool var suppressPermalink string var yes bool var targets *[]string // Flags for handling pending creates var skipPendingCreates bool var clearPendingCreates bool var importPendingCreates *[]string use, cmdArgs := "refresh", cmdutil.NoArgs if remoteSupported() { use, cmdArgs = "refresh [url]", cmdutil.MaximumNArgs(1) } cmd := &cobra.Command{ Use: use, Short: "Refresh the resources in a stack", Long: "Refresh the resources in a stack.\n" + "\n" + "This command compares the current stack's resource state with the state known to exist in\n" + "the actual cloud provider. Any such changes are adopted into the current stack. Note that if\n" + "the program text isn't updated accordingly, subsequent updates may still appear to be out of\n" + "synch with respect to the cloud provider's source of truth.\n" + "\n" + "The program to run is loaded from the project in the current directory. Use the `-C` or\n" + "`--cwd` flag to use a different directory.", Args: cmdArgs, Run: cmdutil.RunResultFunc(func(cmd *cobra.Command, args []string) result.Result { ctx := commandContext() // Remote implies we're skipping previews. if remoteArgs.remote { skipPreview = true } yes = yes || skipPreview || skipConfirmations() interactive := cmdutil.Interactive() if !interactive && !yes { return result.FromError( errors.New("--yes or --skip-preview must be passed in to proceed when running in non-interactive mode")) } opts, err := updateFlagsToOptions(interactive, skipPreview, yes) if err != nil { return result.FromError(err) } displayType := display.DisplayProgress if diffDisplay { displayType = display.DisplayDiff } opts.Display = display.Options{ Color: cmdutil.GetGlobalColorization(), ShowConfig: showConfig, ShowReplacementSteps: showReplacementSteps, ShowSameResources: showSames, SuppressOutputs: suppressOutputs, IsInteractive: interactive, Type: displayType, EventLogPath: eventLogPath, Debug: debug, JSONDisplay: jsonDisplay, } // we only suppress permalinks if the user passes true. the default is an empty string // which we pass as 'false' if suppressPermalink == "true" { opts.Display.SuppressPermalink = true } else { opts.Display.SuppressPermalink = false } if remoteArgs.remote { if len(args) == 0 { return result.FromError(errors.New("must specify remote URL")) } err = validateUnsupportedRemoteFlags(expectNop, nil, false, "", jsonDisplay, nil, nil, "", showConfig, showReplacementSteps, showSames, false, suppressOutputs, "default", targets, nil, nil, false, "", stackConfigFile) if err != nil { return result.FromError(err) } return runDeployment(ctx, opts.Display, apitype.Refresh, stackName, args[0], remoteArgs) } filestateBackend, err := isFilestateBackend(opts.Display) if err != nil { return result.FromError(err) } // by default, we are going to suppress the permalink when using self-managed backends // this can be re-enabled by explicitly passing "false" to the `suppress-permalink` flag if suppressPermalink != "false" && filestateBackend { opts.Display.SuppressPermalink = true } s, err := requireStack(ctx, stackName, stackOfferNew, opts.Display) if err != nil { return result.FromError(err) } proj, root, err := readProject() if err != nil { return result.FromError(err) } m, err := getUpdateMetadata(message, root, execKind, execAgent, false, cmd.Flags()) if err != nil { return result.FromError(fmt.Errorf("gathering environment metadata: %w", err)) } cfg, sm, err := getStackConfiguration(ctx, s, proj, nil) if err != nil { return result.FromError(fmt.Errorf("getting stack configuration: %w", err)) } decrypter, err := sm.Decrypter() if err != nil { return result.FromError(fmt.Errorf("getting stack decrypter: %w", err)) } stackName := s.Ref().Name().String() configErr := workspace.ValidateStackConfigAndApplyProjectConfig(stackName, proj, cfg.Config, decrypter) if configErr != nil { return result.FromError(fmt.Errorf("validating stack config: %w", configErr)) } if skipPendingCreates && clearPendingCreates { return result.FromError(fmt.Errorf( "cannot set both --skip-pending-creates and --clear-pending-creates")) } // First we handle explicit create->imports we were given if importPendingCreates != nil && len(*importPendingCreates) > 0 { stderr := opts.Display.Stderr if stderr == nil { stderr = os.Stderr } if unused, result := pendingCreatesToImports(ctx, s, yes, opts.Display, *importPendingCreates); result != nil { return result } else if len(unused) > 1 { fmt.Fprintf(stderr, "%s\n- \"%s\"\n", opts.Display.Color.Colorize(colors.Highlight( "warning: the following urns did not correspond to a pending create", "warning", colors.SpecWarning)), strings.Join(unused, "\"\n- \"")) } else if len(unused) > 0 { fmt.Fprintf(stderr, "%s: \"%s\" did not correspond to a pending create\n", opts.Display.Color.Colorize(colors.Highlight("warning", "warning", colors.SpecWarning)), unused[0]) } } snap, err := s.Snapshot(ctx, stack.DefaultSecretsProvider) if err != nil { return result.FromError(fmt.Errorf("getting snapshot: %w", err)) } // We then allow the user to interactively handle remaining pending creates. if interactive && hasPendingCreates(snap) && !skipPendingCreates { if result := filterMapPendingCreates(ctx, s, opts.Display, yes, interactiveFixPendingCreate); result != nil { return result } } // We remove remaining pending creates if clearPendingCreates && hasPendingCreates(snap) { // Remove all pending creates. removePendingCreates := func(op resource.Operation) (*resource.Operation, error) { return nil, nil } result := filterMapPendingCreates(ctx, s, opts.Display, yes, removePendingCreates) if result != nil { return result } } targetUrns := []string{} targetUrns = append(targetUrns, *targets...) opts.Engine = engine.UpdateOptions{ Parallel: parallel, Debug: debug, UseLegacyDiff: useLegacyDiff(), DisableProviderPreview: disableProviderPreview(), DisableResourceReferences: disableResourceReferences(), DisableOutputValues: disableOutputValues(), Targets: deploy.NewUrnTargets(targetUrns), Experimental: hasExperimentalCommands(), } changes, res := s.Refresh(ctx, backend.UpdateOperation{ Proj: proj, Root: root, M: m, Opts: opts, StackConfiguration: cfg, SecretsManager: sm, SecretsProvider: stack.DefaultSecretsProvider, Scopes: backend.CancellationScopes, }) switch { case res != nil && res.Error() == context.Canceled: return result.FromError(errors.New("refresh cancelled")) case res != nil: return PrintEngineResult(res) case expectNop && changes != nil && engine.HasChanges(changes): return result.FromError(errors.New("error: no changes were expected but changes occurred")) default: return nil } }), } cmd.PersistentFlags().BoolVarP( &debug, "debug", "d", false, "Print detailed debugging output during resource operations") cmd.PersistentFlags().BoolVar( &expectNop, "expect-no-changes", false, "Return an error if any changes occur during this update") cmd.PersistentFlags().StringVarP( &stackName, "stack", "s", "", "The name of the stack to operate on. Defaults to the current stack") cmd.PersistentFlags().StringVar( &stackConfigFile, "config-file", "", "Use the configuration values in the specified file rather than detecting the file name") cmd.PersistentFlags().StringVarP( &message, "message", "m", "", "Optional message to associate with the update operation") targets = cmd.PersistentFlags().StringArrayP( "target", "t", []string{}, "Specify a single resource URN to refresh. Multiple resource can be specified using: --target urn1 --target urn2") // Flags for engine.UpdateOptions. cmd.PersistentFlags().BoolVar( &diffDisplay, "diff", false, "Display operation as a rich diff showing the overall change") cmd.Flags().BoolVarP( &jsonDisplay, "json", "j", false, "Serialize the refresh diffs, operations, and overall output as JSON") cmd.PersistentFlags().IntVarP( &parallel, "parallel", "p", defaultParallel, "Allow P resource operations to run in parallel at once (1 for no parallelism). Defaults to unbounded.") cmd.PersistentFlags().BoolVar( &showReplacementSteps, "show-replacement-steps", false, "Show detailed resource replacement creates and deletes instead of a single step") cmd.PersistentFlags().BoolVar( &showSames, "show-sames", false, "Show resources that needn't be updated because they haven't changed, alongside those that do") cmd.PersistentFlags().BoolVarP( &skipPreview, "skip-preview", "f", false, "Do not calculate a preview before performing the refresh") cmd.PersistentFlags().BoolVar( &suppressOutputs, "suppress-outputs", false, "Suppress display of stack outputs (in case they contain sensitive values)") cmd.PersistentFlags().StringVar( &suppressPermalink, "suppress-permalink", "", "Suppress display of the state permalink") cmd.Flag("suppress-permalink").NoOptDefVal = "false" cmd.PersistentFlags().BoolVarP( &yes, "yes", "y", false, "Automatically approve and perform the refresh after previewing it") // Flags for pending creates cmd.PersistentFlags().BoolVar( &skipPendingCreates, "skip-pending-creates", false, "Skip importing pending creates in interactive mode") cmd.PersistentFlags().BoolVar( &clearPendingCreates, "clear-pending-creates", false, "Clear all pending creates, dropping them from the state") importPendingCreates = cmd.PersistentFlags().StringArray( "import-pending-creates", nil, "A list of form [[URN ID]...] describing the provider IDs of pending creates") // Remote flags remoteArgs.applyFlags(cmd) if hasDebugCommands() { cmd.PersistentFlags().StringVar( &eventLogPath, "event-log", "", "Log events to a file at this path") } // internal flags cmd.PersistentFlags().StringVar(&execKind, "exec-kind", "", "") // ignore err, only happens if flag does not exist _ = cmd.PersistentFlags().MarkHidden("exec-kind") cmd.PersistentFlags().StringVar(&execAgent, "exec-agent", "", "") // ignore err, only happens if flag does not exist _ = cmd.PersistentFlags().MarkHidden("exec-agent") return cmd } type editPendingOp = func(op resource.Operation) (*resource.Operation, error) // filterMapPendingCreates applies f to each pending create. If f returns nil, then the op // is deleted. Otherwise is is replaced by the returned op. func filterMapPendingCreates( ctx context.Context, s backend.Stack, opts display.Options, yes bool, f editPendingOp, ) result.Result { return totalStateEdit(ctx, s, yes, opts, func(opts display.Options, snap *deploy.Snapshot) error { var pending []resource.Operation for _, op := range snap.PendingOperations { if op.Resource == nil { return fmt.Errorf("found operation without resource") } if op.Type != resource.OperationTypeCreating { pending = append(pending, op) continue } op, err := f(op) if err != nil { return err } if op != nil { pending = append(pending, *op) } } snap.PendingOperations = pending return nil }) } // Apply the CLI args from --import-pending-creates [[URN ID]...]. If an error was found, // it is returned. The list of URNs that were not mapped to a pending create is also // returned. func pendingCreatesToImports(ctx context.Context, s backend.Stack, yes bool, opts display.Options, importToCreates []string, ) ([]string, result.Result) { // A map from URN to ID if len(importToCreates)%2 != 0 { return nil, result.Errorf("each URN must be followed by an ID: found an odd number of entries") } alteredOps := make(map[string]string, len(importToCreates)/2) for i := 0; i < len(importToCreates); i += 2 { alteredOps[importToCreates[i]] = importToCreates[i+1] } result := filterMapPendingCreates(ctx, s, opts, yes, func(op resource.Operation) (*resource.Operation, error) { if id, ok := alteredOps[string(op.Resource.URN)]; ok { op.Resource.ID = resource.ID(id) op.Type = resource.OperationTypeImporting delete(alteredOps, string(op.Resource.URN)) return &op, nil } return &op, nil }) unusedKeys := make([]string, len(alteredOps)) for k := range alteredOps { unusedKeys = append(unusedKeys, k) } return unusedKeys, result } func hasPendingCreates(snap *deploy.Snapshot) bool { if snap == nil { return false } for _, op := range snap.PendingOperations { if op.Type == resource.OperationTypeCreating { return true } } return false } func interactiveFixPendingCreate(op resource.Operation) (*resource.Operation, error) { for { option := "" options := []string{ "clear (the CREATE failed; remove the pending CREATE)", "skip (do nothing)", "import (the CREATE succeeded; provide a resource ID and complete the CREATE operation)", } if err := survey.AskOne(&survey.Select{ Message: fmt.Sprintf("Options for pending CREATE of %s", op.Resource.URN), Options: options, }, &option, nil); err != nil { return nil, fmt.Errorf("no option selected: %w", err) } var err error switch option { case options[0]: return nil, nil case options[1]: return &op, nil case options[2]: var id string err = survey.AskOne(&survey.Input{ Message: "ID: ", }, &id, nil) if err == nil { op.Resource.ID = resource.ID(id) op.Type = resource.OperationTypeImporting return &op, nil } default: return nil, fmt.Errorf("unknown option: %q", option) } if errors.Is(err, terminal.InterruptErr) { continue } return nil, err } }
newRefreshCmd
identifier_name
refresh.go
// Copyright 2016-2022, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "errors" "fmt" "os" "strings" survey "github.com/AlecAivazis/survey/v2" terminal "github.com/AlecAivazis/survey/v2/terminal" "github.com/spf13/cobra" "github.com/pulumi/pulumi/pkg/v3/backend" "github.com/pulumi/pulumi/pkg/v3/backend/display" "github.com/pulumi/pulumi/pkg/v3/engine" "github.com/pulumi/pulumi/pkg/v3/resource/deploy" "github.com/pulumi/pulumi/pkg/v3/resource/stack" "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" "github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors" "github.com/pulumi/pulumi/sdk/v3/go/common/resource" "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/result" "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" ) func newRefreshCmd() *cobra.Command { var debug bool var expectNop bool var message string var execKind string var execAgent string var stackName string // Flags for remote operations. remoteArgs := RemoteArgs{} // Flags for engine.UpdateOptions. var jsonDisplay bool var diffDisplay bool var eventLogPath string var parallel int var showConfig bool var showReplacementSteps bool var showSames bool var skipPreview bool var suppressOutputs bool var suppressPermalink string var yes bool var targets *[]string // Flags for handling pending creates var skipPendingCreates bool var clearPendingCreates bool var importPendingCreates *[]string use, cmdArgs := "refresh", cmdutil.NoArgs if remoteSupported() { use, cmdArgs = "refresh [url]", cmdutil.MaximumNArgs(1) } cmd := &cobra.Command{ Use: use, Short: "Refresh the resources in a stack", Long: "Refresh the resources in a stack.\n" + "\n" + "This command compares the current stack's resource state with the state known to exist in\n" + "the actual cloud provider. Any such changes are adopted into the current stack. Note that if\n" + "the program text isn't updated accordingly, subsequent updates may still appear to be out of\n" + "synch with respect to the cloud provider's source of truth.\n" + "\n" + "The program to run is loaded from the project in the current directory. Use the `-C` or\n" + "`--cwd` flag to use a different directory.", Args: cmdArgs, Run: cmdutil.RunResultFunc(func(cmd *cobra.Command, args []string) result.Result { ctx := commandContext() // Remote implies we're skipping previews. if remoteArgs.remote { skipPreview = true } yes = yes || skipPreview || skipConfirmations() interactive := cmdutil.Interactive() if !interactive && !yes { return result.FromError( errors.New("--yes or --skip-preview must be passed in to proceed when running in non-interactive mode")) } opts, err := updateFlagsToOptions(interactive, skipPreview, yes) if err != nil { return result.FromError(err) } displayType := display.DisplayProgress if diffDisplay { displayType = display.DisplayDiff } opts.Display = display.Options{ Color: cmdutil.GetGlobalColorization(), ShowConfig: showConfig, ShowReplacementSteps: showReplacementSteps, ShowSameResources: showSames, SuppressOutputs: suppressOutputs, IsInteractive: interactive, Type: displayType, EventLogPath: eventLogPath, Debug: debug, JSONDisplay: jsonDisplay, } // we only suppress permalinks if the user passes true. the default is an empty string // which we pass as 'false' if suppressPermalink == "true" { opts.Display.SuppressPermalink = true } else { opts.Display.SuppressPermalink = false } if remoteArgs.remote { if len(args) == 0 { return result.FromError(errors.New("must specify remote URL")) } err = validateUnsupportedRemoteFlags(expectNop, nil, false, "", jsonDisplay, nil, nil, "", showConfig, showReplacementSteps, showSames, false, suppressOutputs, "default", targets, nil, nil, false, "", stackConfigFile) if err != nil { return result.FromError(err) } return runDeployment(ctx, opts.Display, apitype.Refresh, stackName, args[0], remoteArgs) } filestateBackend, err := isFilestateBackend(opts.Display) if err != nil { return result.FromError(err) } // by default, we are going to suppress the permalink when using self-managed backends // this can be re-enabled by explicitly passing "false" to the `suppress-permalink` flag if suppressPermalink != "false" && filestateBackend { opts.Display.SuppressPermalink = true } s, err := requireStack(ctx, stackName, stackOfferNew, opts.Display) if err != nil { return result.FromError(err) } proj, root, err := readProject() if err != nil { return result.FromError(err) } m, err := getUpdateMetadata(message, root, execKind, execAgent, false, cmd.Flags()) if err != nil { return result.FromError(fmt.Errorf("gathering environment metadata: %w", err)) } cfg, sm, err := getStackConfiguration(ctx, s, proj, nil) if err != nil { return result.FromError(fmt.Errorf("getting stack configuration: %w", err)) } decrypter, err := sm.Decrypter() if err != nil { return result.FromError(fmt.Errorf("getting stack decrypter: %w", err)) } stackName := s.Ref().Name().String() configErr := workspace.ValidateStackConfigAndApplyProjectConfig(stackName, proj, cfg.Config, decrypter) if configErr != nil { return result.FromError(fmt.Errorf("validating stack config: %w", configErr)) } if skipPendingCreates && clearPendingCreates { return result.FromError(fmt.Errorf( "cannot set both --skip-pending-creates and --clear-pending-creates")) } // First we handle explicit create->imports we were given if importPendingCreates != nil && len(*importPendingCreates) > 0 { stderr := opts.Display.Stderr if stderr == nil { stderr = os.Stderr } if unused, result := pendingCreatesToImports(ctx, s, yes, opts.Display, *importPendingCreates); result != nil { return result } else if len(unused) > 1 { fmt.Fprintf(stderr, "%s\n- \"%s\"\n", opts.Display.Color.Colorize(colors.Highlight( "warning: the following urns did not correspond to a pending create", "warning", colors.SpecWarning)), strings.Join(unused, "\"\n- \"")) } else if len(unused) > 0 { fmt.Fprintf(stderr, "%s: \"%s\" did not correspond to a pending create\n", opts.Display.Color.Colorize(colors.Highlight("warning", "warning", colors.SpecWarning)), unused[0]) } } snap, err := s.Snapshot(ctx, stack.DefaultSecretsProvider) if err != nil { return result.FromError(fmt.Errorf("getting snapshot: %w", err)) } // We then allow the user to interactively handle remaining pending creates. if interactive && hasPendingCreates(snap) && !skipPendingCreates { if result := filterMapPendingCreates(ctx, s, opts.Display, yes, interactiveFixPendingCreate); result != nil { return result } } // We remove remaining pending creates if clearPendingCreates && hasPendingCreates(snap) { // Remove all pending creates. removePendingCreates := func(op resource.Operation) (*resource.Operation, error) { return nil, nil } result := filterMapPendingCreates(ctx, s, opts.Display, yes, removePendingCreates) if result != nil { return result } } targetUrns := []string{} targetUrns = append(targetUrns, *targets...) opts.Engine = engine.UpdateOptions{ Parallel: parallel, Debug: debug, UseLegacyDiff: useLegacyDiff(), DisableProviderPreview: disableProviderPreview(), DisableResourceReferences: disableResourceReferences(), DisableOutputValues: disableOutputValues(), Targets: deploy.NewUrnTargets(targetUrns), Experimental: hasExperimentalCommands(), } changes, res := s.Refresh(ctx, backend.UpdateOperation{ Proj: proj, Root: root, M: m, Opts: opts, StackConfiguration: cfg, SecretsManager: sm, SecretsProvider: stack.DefaultSecretsProvider, Scopes: backend.CancellationScopes, }) switch { case res != nil && res.Error() == context.Canceled: return result.FromError(errors.New("refresh cancelled")) case res != nil: return PrintEngineResult(res) case expectNop && changes != nil && engine.HasChanges(changes): return result.FromError(errors.New("error: no changes were expected but changes occurred")) default: return nil } }), } cmd.PersistentFlags().BoolVarP( &debug, "debug", "d", false, "Print detailed debugging output during resource operations") cmd.PersistentFlags().BoolVar( &expectNop, "expect-no-changes", false, "Return an error if any changes occur during this update") cmd.PersistentFlags().StringVarP( &stackName, "stack", "s", "", "The name of the stack to operate on. Defaults to the current stack") cmd.PersistentFlags().StringVar( &stackConfigFile, "config-file", "", "Use the configuration values in the specified file rather than detecting the file name") cmd.PersistentFlags().StringVarP( &message, "message", "m", "", "Optional message to associate with the update operation") targets = cmd.PersistentFlags().StringArrayP( "target", "t", []string{}, "Specify a single resource URN to refresh. Multiple resource can be specified using: --target urn1 --target urn2") // Flags for engine.UpdateOptions. cmd.PersistentFlags().BoolVar( &diffDisplay, "diff", false, "Display operation as a rich diff showing the overall change") cmd.Flags().BoolVarP( &jsonDisplay, "json", "j", false, "Serialize the refresh diffs, operations, and overall output as JSON") cmd.PersistentFlags().IntVarP( &parallel, "parallel", "p", defaultParallel, "Allow P resource operations to run in parallel at once (1 for no parallelism). Defaults to unbounded.") cmd.PersistentFlags().BoolVar( &showReplacementSteps, "show-replacement-steps", false, "Show detailed resource replacement creates and deletes instead of a single step") cmd.PersistentFlags().BoolVar( &showSames, "show-sames", false, "Show resources that needn't be updated because they haven't changed, alongside those that do") cmd.PersistentFlags().BoolVarP( &skipPreview, "skip-preview", "f", false, "Do not calculate a preview before performing the refresh") cmd.PersistentFlags().BoolVar( &suppressOutputs, "suppress-outputs", false, "Suppress display of stack outputs (in case they contain sensitive values)") cmd.PersistentFlags().StringVar( &suppressPermalink, "suppress-permalink", "", "Suppress display of the state permalink") cmd.Flag("suppress-permalink").NoOptDefVal = "false" cmd.PersistentFlags().BoolVarP( &yes, "yes", "y", false, "Automatically approve and perform the refresh after previewing it") // Flags for pending creates cmd.PersistentFlags().BoolVar( &skipPendingCreates, "skip-pending-creates", false, "Skip importing pending creates in interactive mode") cmd.PersistentFlags().BoolVar( &clearPendingCreates, "clear-pending-creates", false, "Clear all pending creates, dropping them from the state") importPendingCreates = cmd.PersistentFlags().StringArray( "import-pending-creates", nil, "A list of form [[URN ID]...] describing the provider IDs of pending creates") // Remote flags remoteArgs.applyFlags(cmd) if hasDebugCommands() { cmd.PersistentFlags().StringVar( &eventLogPath, "event-log", "", "Log events to a file at this path") } // internal flags cmd.PersistentFlags().StringVar(&execKind, "exec-kind", "", "") // ignore err, only happens if flag does not exist _ = cmd.PersistentFlags().MarkHidden("exec-kind") cmd.PersistentFlags().StringVar(&execAgent, "exec-agent", "", "") // ignore err, only happens if flag does not exist _ = cmd.PersistentFlags().MarkHidden("exec-agent") return cmd } type editPendingOp = func(op resource.Operation) (*resource.Operation, error) // filterMapPendingCreates applies f to each pending create. If f returns nil, then the op // is deleted. Otherwise is is replaced by the returned op. func filterMapPendingCreates( ctx context.Context, s backend.Stack, opts display.Options, yes bool, f editPendingOp, ) result.Result { return totalStateEdit(ctx, s, yes, opts, func(opts display.Options, snap *deploy.Snapshot) error { var pending []resource.Operation for _, op := range snap.PendingOperations { if op.Resource == nil { return fmt.Errorf("found operation without resource") } if op.Type != resource.OperationTypeCreating
op, err := f(op) if err != nil { return err } if op != nil { pending = append(pending, *op) } } snap.PendingOperations = pending return nil }) } // Apply the CLI args from --import-pending-creates [[URN ID]...]. If an error was found, // it is returned. The list of URNs that were not mapped to a pending create is also // returned. func pendingCreatesToImports(ctx context.Context, s backend.Stack, yes bool, opts display.Options, importToCreates []string, ) ([]string, result.Result) { // A map from URN to ID if len(importToCreates)%2 != 0 { return nil, result.Errorf("each URN must be followed by an ID: found an odd number of entries") } alteredOps := make(map[string]string, len(importToCreates)/2) for i := 0; i < len(importToCreates); i += 2 { alteredOps[importToCreates[i]] = importToCreates[i+1] } result := filterMapPendingCreates(ctx, s, opts, yes, func(op resource.Operation) (*resource.Operation, error) { if id, ok := alteredOps[string(op.Resource.URN)]; ok { op.Resource.ID = resource.ID(id) op.Type = resource.OperationTypeImporting delete(alteredOps, string(op.Resource.URN)) return &op, nil } return &op, nil }) unusedKeys := make([]string, len(alteredOps)) for k := range alteredOps { unusedKeys = append(unusedKeys, k) } return unusedKeys, result } func hasPendingCreates(snap *deploy.Snapshot) bool { if snap == nil { return false } for _, op := range snap.PendingOperations { if op.Type == resource.OperationTypeCreating { return true } } return false } func interactiveFixPendingCreate(op resource.Operation) (*resource.Operation, error) { for { option := "" options := []string{ "clear (the CREATE failed; remove the pending CREATE)", "skip (do nothing)", "import (the CREATE succeeded; provide a resource ID and complete the CREATE operation)", } if err := survey.AskOne(&survey.Select{ Message: fmt.Sprintf("Options for pending CREATE of %s", op.Resource.URN), Options: options, }, &option, nil); err != nil { return nil, fmt.Errorf("no option selected: %w", err) } var err error switch option { case options[0]: return nil, nil case options[1]: return &op, nil case options[2]: var id string err = survey.AskOne(&survey.Input{ Message: "ID: ", }, &id, nil) if err == nil { op.Resource.ID = resource.ID(id) op.Type = resource.OperationTypeImporting return &op, nil } default: return nil, fmt.Errorf("unknown option: %q", option) } if errors.Is(err, terminal.InterruptErr) { continue } return nil, err } }
{ pending = append(pending, op) continue }
conditional_block
refresh.go
// Copyright 2016-2022, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "errors" "fmt" "os" "strings" survey "github.com/AlecAivazis/survey/v2" terminal "github.com/AlecAivazis/survey/v2/terminal" "github.com/spf13/cobra" "github.com/pulumi/pulumi/pkg/v3/backend" "github.com/pulumi/pulumi/pkg/v3/backend/display" "github.com/pulumi/pulumi/pkg/v3/engine" "github.com/pulumi/pulumi/pkg/v3/resource/deploy" "github.com/pulumi/pulumi/pkg/v3/resource/stack" "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" "github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors" "github.com/pulumi/pulumi/sdk/v3/go/common/resource" "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/result" "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" ) func newRefreshCmd() *cobra.Command { var debug bool var expectNop bool var message string var execKind string var execAgent string var stackName string // Flags for remote operations. remoteArgs := RemoteArgs{} // Flags for engine.UpdateOptions. var jsonDisplay bool var diffDisplay bool var eventLogPath string var parallel int var showConfig bool var showReplacementSteps bool var showSames bool var skipPreview bool var suppressOutputs bool var suppressPermalink string var yes bool var targets *[]string // Flags for handling pending creates var skipPendingCreates bool var clearPendingCreates bool var importPendingCreates *[]string use, cmdArgs := "refresh", cmdutil.NoArgs if remoteSupported() { use, cmdArgs = "refresh [url]", cmdutil.MaximumNArgs(1) } cmd := &cobra.Command{ Use: use, Short: "Refresh the resources in a stack", Long: "Refresh the resources in a stack.\n" + "\n" + "This command compares the current stack's resource state with the state known to exist in\n" + "the actual cloud provider. Any such changes are adopted into the current stack. Note that if\n" + "the program text isn't updated accordingly, subsequent updates may still appear to be out of\n" + "synch with respect to the cloud provider's source of truth.\n" + "\n" + "The program to run is loaded from the project in the current directory. Use the `-C` or\n" + "`--cwd` flag to use a different directory.", Args: cmdArgs, Run: cmdutil.RunResultFunc(func(cmd *cobra.Command, args []string) result.Result { ctx := commandContext() // Remote implies we're skipping previews. if remoteArgs.remote { skipPreview = true } yes = yes || skipPreview || skipConfirmations() interactive := cmdutil.Interactive() if !interactive && !yes { return result.FromError( errors.New("--yes or --skip-preview must be passed in to proceed when running in non-interactive mode")) } opts, err := updateFlagsToOptions(interactive, skipPreview, yes) if err != nil { return result.FromError(err) } displayType := display.DisplayProgress if diffDisplay { displayType = display.DisplayDiff } opts.Display = display.Options{ Color: cmdutil.GetGlobalColorization(), ShowConfig: showConfig, ShowReplacementSteps: showReplacementSteps, ShowSameResources: showSames, SuppressOutputs: suppressOutputs, IsInteractive: interactive, Type: displayType, EventLogPath: eventLogPath, Debug: debug, JSONDisplay: jsonDisplay, } // we only suppress permalinks if the user passes true. the default is an empty string // which we pass as 'false' if suppressPermalink == "true" { opts.Display.SuppressPermalink = true } else { opts.Display.SuppressPermalink = false } if remoteArgs.remote { if len(args) == 0 { return result.FromError(errors.New("must specify remote URL")) } err = validateUnsupportedRemoteFlags(expectNop, nil, false, "", jsonDisplay, nil, nil, "", showConfig, showReplacementSteps, showSames, false, suppressOutputs, "default", targets, nil, nil, false, "", stackConfigFile) if err != nil { return result.FromError(err) } return runDeployment(ctx, opts.Display, apitype.Refresh, stackName, args[0], remoteArgs) } filestateBackend, err := isFilestateBackend(opts.Display) if err != nil { return result.FromError(err) } // by default, we are going to suppress the permalink when using self-managed backends // this can be re-enabled by explicitly passing "false" to the `suppress-permalink` flag if suppressPermalink != "false" && filestateBackend { opts.Display.SuppressPermalink = true } s, err := requireStack(ctx, stackName, stackOfferNew, opts.Display) if err != nil { return result.FromError(err) } proj, root, err := readProject() if err != nil { return result.FromError(err) } m, err := getUpdateMetadata(message, root, execKind, execAgent, false, cmd.Flags()) if err != nil { return result.FromError(fmt.Errorf("gathering environment metadata: %w", err)) } cfg, sm, err := getStackConfiguration(ctx, s, proj, nil) if err != nil { return result.FromError(fmt.Errorf("getting stack configuration: %w", err)) } decrypter, err := sm.Decrypter() if err != nil { return result.FromError(fmt.Errorf("getting stack decrypter: %w", err)) } stackName := s.Ref().Name().String() configErr := workspace.ValidateStackConfigAndApplyProjectConfig(stackName, proj, cfg.Config, decrypter) if configErr != nil { return result.FromError(fmt.Errorf("validating stack config: %w", configErr)) } if skipPendingCreates && clearPendingCreates { return result.FromError(fmt.Errorf( "cannot set both --skip-pending-creates and --clear-pending-creates")) } // First we handle explicit create->imports we were given if importPendingCreates != nil && len(*importPendingCreates) > 0 { stderr := opts.Display.Stderr if stderr == nil { stderr = os.Stderr } if unused, result := pendingCreatesToImports(ctx, s, yes, opts.Display, *importPendingCreates); result != nil { return result } else if len(unused) > 1 { fmt.Fprintf(stderr, "%s\n- \"%s\"\n", opts.Display.Color.Colorize(colors.Highlight( "warning: the following urns did not correspond to a pending create", "warning", colors.SpecWarning)), strings.Join(unused, "\"\n- \"")) } else if len(unused) > 0 { fmt.Fprintf(stderr, "%s: \"%s\" did not correspond to a pending create\n", opts.Display.Color.Colorize(colors.Highlight("warning", "warning", colors.SpecWarning)), unused[0]) } } snap, err := s.Snapshot(ctx, stack.DefaultSecretsProvider) if err != nil { return result.FromError(fmt.Errorf("getting snapshot: %w", err)) } // We then allow the user to interactively handle remaining pending creates. if interactive && hasPendingCreates(snap) && !skipPendingCreates { if result := filterMapPendingCreates(ctx, s, opts.Display, yes, interactiveFixPendingCreate); result != nil { return result } } // We remove remaining pending creates if clearPendingCreates && hasPendingCreates(snap) { // Remove all pending creates. removePendingCreates := func(op resource.Operation) (*resource.Operation, error) { return nil, nil } result := filterMapPendingCreates(ctx, s, opts.Display, yes, removePendingCreates) if result != nil { return result } } targetUrns := []string{} targetUrns = append(targetUrns, *targets...) opts.Engine = engine.UpdateOptions{ Parallel: parallel, Debug: debug, UseLegacyDiff: useLegacyDiff(), DisableProviderPreview: disableProviderPreview(), DisableResourceReferences: disableResourceReferences(), DisableOutputValues: disableOutputValues(), Targets: deploy.NewUrnTargets(targetUrns), Experimental: hasExperimentalCommands(), } changes, res := s.Refresh(ctx, backend.UpdateOperation{ Proj: proj, Root: root, M: m, Opts: opts, StackConfiguration: cfg, SecretsManager: sm, SecretsProvider: stack.DefaultSecretsProvider, Scopes: backend.CancellationScopes, }) switch { case res != nil && res.Error() == context.Canceled: return result.FromError(errors.New("refresh cancelled")) case res != nil: return PrintEngineResult(res) case expectNop && changes != nil && engine.HasChanges(changes): return result.FromError(errors.New("error: no changes were expected but changes occurred")) default: return nil } }), } cmd.PersistentFlags().BoolVarP( &debug, "debug", "d", false, "Print detailed debugging output during resource operations") cmd.PersistentFlags().BoolVar( &expectNop, "expect-no-changes", false, "Return an error if any changes occur during this update") cmd.PersistentFlags().StringVarP( &stackName, "stack", "s", "", "The name of the stack to operate on. Defaults to the current stack") cmd.PersistentFlags().StringVar( &stackConfigFile, "config-file", "", "Use the configuration values in the specified file rather than detecting the file name") cmd.PersistentFlags().StringVarP( &message, "message", "m", "", "Optional message to associate with the update operation") targets = cmd.PersistentFlags().StringArrayP( "target", "t", []string{}, "Specify a single resource URN to refresh. Multiple resource can be specified using: --target urn1 --target urn2") // Flags for engine.UpdateOptions. cmd.PersistentFlags().BoolVar( &diffDisplay, "diff", false, "Display operation as a rich diff showing the overall change") cmd.Flags().BoolVarP( &jsonDisplay, "json", "j", false, "Serialize the refresh diffs, operations, and overall output as JSON") cmd.PersistentFlags().IntVarP( &parallel, "parallel", "p", defaultParallel, "Allow P resource operations to run in parallel at once (1 for no parallelism). Defaults to unbounded.") cmd.PersistentFlags().BoolVar( &showReplacementSteps, "show-replacement-steps", false, "Show detailed resource replacement creates and deletes instead of a single step") cmd.PersistentFlags().BoolVar( &showSames, "show-sames", false, "Show resources that needn't be updated because they haven't changed, alongside those that do") cmd.PersistentFlags().BoolVarP( &skipPreview, "skip-preview", "f", false, "Do not calculate a preview before performing the refresh") cmd.PersistentFlags().BoolVar( &suppressOutputs, "suppress-outputs", false, "Suppress display of stack outputs (in case they contain sensitive values)") cmd.PersistentFlags().StringVar( &suppressPermalink, "suppress-permalink", "", "Suppress display of the state permalink") cmd.Flag("suppress-permalink").NoOptDefVal = "false" cmd.PersistentFlags().BoolVarP( &yes, "yes", "y", false, "Automatically approve and perform the refresh after previewing it") // Flags for pending creates cmd.PersistentFlags().BoolVar( &skipPendingCreates, "skip-pending-creates", false, "Skip importing pending creates in interactive mode") cmd.PersistentFlags().BoolVar( &clearPendingCreates, "clear-pending-creates", false, "Clear all pending creates, dropping them from the state") importPendingCreates = cmd.PersistentFlags().StringArray( "import-pending-creates", nil, "A list of form [[URN ID]...] describing the provider IDs of pending creates") // Remote flags remoteArgs.applyFlags(cmd) if hasDebugCommands() { cmd.PersistentFlags().StringVar( &eventLogPath, "event-log", "", "Log events to a file at this path") } // internal flags cmd.PersistentFlags().StringVar(&execKind, "exec-kind", "", "") // ignore err, only happens if flag does not exist _ = cmd.PersistentFlags().MarkHidden("exec-kind") cmd.PersistentFlags().StringVar(&execAgent, "exec-agent", "", "") // ignore err, only happens if flag does not exist _ = cmd.PersistentFlags().MarkHidden("exec-agent") return cmd } type editPendingOp = func(op resource.Operation) (*resource.Operation, error) // filterMapPendingCreates applies f to each pending create. If f returns nil, then the op // is deleted. Otherwise is is replaced by the returned op. func filterMapPendingCreates( ctx context.Context, s backend.Stack, opts display.Options, yes bool, f editPendingOp, ) result.Result
// Apply the CLI args from --import-pending-creates [[URN ID]...]. If an error was found, // it is returned. The list of URNs that were not mapped to a pending create is also // returned. func pendingCreatesToImports(ctx context.Context, s backend.Stack, yes bool, opts display.Options, importToCreates []string, ) ([]string, result.Result) { // A map from URN to ID if len(importToCreates)%2 != 0 { return nil, result.Errorf("each URN must be followed by an ID: found an odd number of entries") } alteredOps := make(map[string]string, len(importToCreates)/2) for i := 0; i < len(importToCreates); i += 2 { alteredOps[importToCreates[i]] = importToCreates[i+1] } result := filterMapPendingCreates(ctx, s, opts, yes, func(op resource.Operation) (*resource.Operation, error) { if id, ok := alteredOps[string(op.Resource.URN)]; ok { op.Resource.ID = resource.ID(id) op.Type = resource.OperationTypeImporting delete(alteredOps, string(op.Resource.URN)) return &op, nil } return &op, nil }) unusedKeys := make([]string, len(alteredOps)) for k := range alteredOps { unusedKeys = append(unusedKeys, k) } return unusedKeys, result } func hasPendingCreates(snap *deploy.Snapshot) bool { if snap == nil { return false } for _, op := range snap.PendingOperations { if op.Type == resource.OperationTypeCreating { return true } } return false } func interactiveFixPendingCreate(op resource.Operation) (*resource.Operation, error) { for { option := "" options := []string{ "clear (the CREATE failed; remove the pending CREATE)", "skip (do nothing)", "import (the CREATE succeeded; provide a resource ID and complete the CREATE operation)", } if err := survey.AskOne(&survey.Select{ Message: fmt.Sprintf("Options for pending CREATE of %s", op.Resource.URN), Options: options, }, &option, nil); err != nil { return nil, fmt.Errorf("no option selected: %w", err) } var err error switch option { case options[0]: return nil, nil case options[1]: return &op, nil case options[2]: var id string err = survey.AskOne(&survey.Input{ Message: "ID: ", }, &id, nil) if err == nil { op.Resource.ID = resource.ID(id) op.Type = resource.OperationTypeImporting return &op, nil } default: return nil, fmt.Errorf("unknown option: %q", option) } if errors.Is(err, terminal.InterruptErr) { continue } return nil, err } }
{ return totalStateEdit(ctx, s, yes, opts, func(opts display.Options, snap *deploy.Snapshot) error { var pending []resource.Operation for _, op := range snap.PendingOperations { if op.Resource == nil { return fmt.Errorf("found operation without resource") } if op.Type != resource.OperationTypeCreating { pending = append(pending, op) continue } op, err := f(op) if err != nil { return err } if op != nil { pending = append(pending, *op) } } snap.PendingOperations = pending return nil }) }
identifier_body
refresh.go
// Copyright 2016-2022, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "errors" "fmt" "os" "strings" survey "github.com/AlecAivazis/survey/v2" terminal "github.com/AlecAivazis/survey/v2/terminal" "github.com/spf13/cobra" "github.com/pulumi/pulumi/pkg/v3/backend" "github.com/pulumi/pulumi/pkg/v3/backend/display" "github.com/pulumi/pulumi/pkg/v3/engine" "github.com/pulumi/pulumi/pkg/v3/resource/deploy" "github.com/pulumi/pulumi/pkg/v3/resource/stack" "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" "github.com/pulumi/pulumi/sdk/v3/go/common/diag/colors" "github.com/pulumi/pulumi/sdk/v3/go/common/resource" "github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/result" "github.com/pulumi/pulumi/sdk/v3/go/common/workspace" ) func newRefreshCmd() *cobra.Command { var debug bool var expectNop bool var message string var execKind string var execAgent string var stackName string // Flags for remote operations. remoteArgs := RemoteArgs{} // Flags for engine.UpdateOptions. var jsonDisplay bool var diffDisplay bool var eventLogPath string var parallel int var showConfig bool var showReplacementSteps bool var showSames bool var skipPreview bool var suppressOutputs bool var suppressPermalink string var yes bool var targets *[]string // Flags for handling pending creates var skipPendingCreates bool var clearPendingCreates bool var importPendingCreates *[]string use, cmdArgs := "refresh", cmdutil.NoArgs if remoteSupported() { use, cmdArgs = "refresh [url]", cmdutil.MaximumNArgs(1) } cmd := &cobra.Command{ Use: use,
"the program text isn't updated accordingly, subsequent updates may still appear to be out of\n" + "synch with respect to the cloud provider's source of truth.\n" + "\n" + "The program to run is loaded from the project in the current directory. Use the `-C` or\n" + "`--cwd` flag to use a different directory.", Args: cmdArgs, Run: cmdutil.RunResultFunc(func(cmd *cobra.Command, args []string) result.Result { ctx := commandContext() // Remote implies we're skipping previews. if remoteArgs.remote { skipPreview = true } yes = yes || skipPreview || skipConfirmations() interactive := cmdutil.Interactive() if !interactive && !yes { return result.FromError( errors.New("--yes or --skip-preview must be passed in to proceed when running in non-interactive mode")) } opts, err := updateFlagsToOptions(interactive, skipPreview, yes) if err != nil { return result.FromError(err) } displayType := display.DisplayProgress if diffDisplay { displayType = display.DisplayDiff } opts.Display = display.Options{ Color: cmdutil.GetGlobalColorization(), ShowConfig: showConfig, ShowReplacementSteps: showReplacementSteps, ShowSameResources: showSames, SuppressOutputs: suppressOutputs, IsInteractive: interactive, Type: displayType, EventLogPath: eventLogPath, Debug: debug, JSONDisplay: jsonDisplay, } // we only suppress permalinks if the user passes true. the default is an empty string // which we pass as 'false' if suppressPermalink == "true" { opts.Display.SuppressPermalink = true } else { opts.Display.SuppressPermalink = false } if remoteArgs.remote { if len(args) == 0 { return result.FromError(errors.New("must specify remote URL")) } err = validateUnsupportedRemoteFlags(expectNop, nil, false, "", jsonDisplay, nil, nil, "", showConfig, showReplacementSteps, showSames, false, suppressOutputs, "default", targets, nil, nil, false, "", stackConfigFile) if err != nil { return result.FromError(err) } return runDeployment(ctx, opts.Display, apitype.Refresh, stackName, args[0], remoteArgs) } filestateBackend, err := isFilestateBackend(opts.Display) if err != nil { return result.FromError(err) } // by default, we are going to suppress the permalink when using self-managed backends // this can be re-enabled by explicitly passing "false" to the `suppress-permalink` flag if suppressPermalink != "false" && filestateBackend { opts.Display.SuppressPermalink = true } s, err := requireStack(ctx, stackName, stackOfferNew, opts.Display) if err != nil { return result.FromError(err) } proj, root, err := readProject() if err != nil { return result.FromError(err) } m, err := getUpdateMetadata(message, root, execKind, execAgent, false, cmd.Flags()) if err != nil { return result.FromError(fmt.Errorf("gathering environment metadata: %w", err)) } cfg, sm, err := getStackConfiguration(ctx, s, proj, nil) if err != nil { return result.FromError(fmt.Errorf("getting stack configuration: %w", err)) } decrypter, err := sm.Decrypter() if err != nil { return result.FromError(fmt.Errorf("getting stack decrypter: %w", err)) } stackName := s.Ref().Name().String() configErr := workspace.ValidateStackConfigAndApplyProjectConfig(stackName, proj, cfg.Config, decrypter) if configErr != nil { return result.FromError(fmt.Errorf("validating stack config: %w", configErr)) } if skipPendingCreates && clearPendingCreates { return result.FromError(fmt.Errorf( "cannot set both --skip-pending-creates and --clear-pending-creates")) } // First we handle explicit create->imports we were given if importPendingCreates != nil && len(*importPendingCreates) > 0 { stderr := opts.Display.Stderr if stderr == nil { stderr = os.Stderr } if unused, result := pendingCreatesToImports(ctx, s, yes, opts.Display, *importPendingCreates); result != nil { return result } else if len(unused) > 1 { fmt.Fprintf(stderr, "%s\n- \"%s\"\n", opts.Display.Color.Colorize(colors.Highlight( "warning: the following urns did not correspond to a pending create", "warning", colors.SpecWarning)), strings.Join(unused, "\"\n- \"")) } else if len(unused) > 0 { fmt.Fprintf(stderr, "%s: \"%s\" did not correspond to a pending create\n", opts.Display.Color.Colorize(colors.Highlight("warning", "warning", colors.SpecWarning)), unused[0]) } } snap, err := s.Snapshot(ctx, stack.DefaultSecretsProvider) if err != nil { return result.FromError(fmt.Errorf("getting snapshot: %w", err)) } // We then allow the user to interactively handle remaining pending creates. if interactive && hasPendingCreates(snap) && !skipPendingCreates { if result := filterMapPendingCreates(ctx, s, opts.Display, yes, interactiveFixPendingCreate); result != nil { return result } } // We remove remaining pending creates if clearPendingCreates && hasPendingCreates(snap) { // Remove all pending creates. removePendingCreates := func(op resource.Operation) (*resource.Operation, error) { return nil, nil } result := filterMapPendingCreates(ctx, s, opts.Display, yes, removePendingCreates) if result != nil { return result } } targetUrns := []string{} targetUrns = append(targetUrns, *targets...) opts.Engine = engine.UpdateOptions{ Parallel: parallel, Debug: debug, UseLegacyDiff: useLegacyDiff(), DisableProviderPreview: disableProviderPreview(), DisableResourceReferences: disableResourceReferences(), DisableOutputValues: disableOutputValues(), Targets: deploy.NewUrnTargets(targetUrns), Experimental: hasExperimentalCommands(), } changes, res := s.Refresh(ctx, backend.UpdateOperation{ Proj: proj, Root: root, M: m, Opts: opts, StackConfiguration: cfg, SecretsManager: sm, SecretsProvider: stack.DefaultSecretsProvider, Scopes: backend.CancellationScopes, }) switch { case res != nil && res.Error() == context.Canceled: return result.FromError(errors.New("refresh cancelled")) case res != nil: return PrintEngineResult(res) case expectNop && changes != nil && engine.HasChanges(changes): return result.FromError(errors.New("error: no changes were expected but changes occurred")) default: return nil } }), } cmd.PersistentFlags().BoolVarP( &debug, "debug", "d", false, "Print detailed debugging output during resource operations") cmd.PersistentFlags().BoolVar( &expectNop, "expect-no-changes", false, "Return an error if any changes occur during this update") cmd.PersistentFlags().StringVarP( &stackName, "stack", "s", "", "The name of the stack to operate on. Defaults to the current stack") cmd.PersistentFlags().StringVar( &stackConfigFile, "config-file", "", "Use the configuration values in the specified file rather than detecting the file name") cmd.PersistentFlags().StringVarP( &message, "message", "m", "", "Optional message to associate with the update operation") targets = cmd.PersistentFlags().StringArrayP( "target", "t", []string{}, "Specify a single resource URN to refresh. Multiple resource can be specified using: --target urn1 --target urn2") // Flags for engine.UpdateOptions. cmd.PersistentFlags().BoolVar( &diffDisplay, "diff", false, "Display operation as a rich diff showing the overall change") cmd.Flags().BoolVarP( &jsonDisplay, "json", "j", false, "Serialize the refresh diffs, operations, and overall output as JSON") cmd.PersistentFlags().IntVarP( &parallel, "parallel", "p", defaultParallel, "Allow P resource operations to run in parallel at once (1 for no parallelism). Defaults to unbounded.") cmd.PersistentFlags().BoolVar( &showReplacementSteps, "show-replacement-steps", false, "Show detailed resource replacement creates and deletes instead of a single step") cmd.PersistentFlags().BoolVar( &showSames, "show-sames", false, "Show resources that needn't be updated because they haven't changed, alongside those that do") cmd.PersistentFlags().BoolVarP( &skipPreview, "skip-preview", "f", false, "Do not calculate a preview before performing the refresh") cmd.PersistentFlags().BoolVar( &suppressOutputs, "suppress-outputs", false, "Suppress display of stack outputs (in case they contain sensitive values)") cmd.PersistentFlags().StringVar( &suppressPermalink, "suppress-permalink", "", "Suppress display of the state permalink") cmd.Flag("suppress-permalink").NoOptDefVal = "false" cmd.PersistentFlags().BoolVarP( &yes, "yes", "y", false, "Automatically approve and perform the refresh after previewing it") // Flags for pending creates cmd.PersistentFlags().BoolVar( &skipPendingCreates, "skip-pending-creates", false, "Skip importing pending creates in interactive mode") cmd.PersistentFlags().BoolVar( &clearPendingCreates, "clear-pending-creates", false, "Clear all pending creates, dropping them from the state") importPendingCreates = cmd.PersistentFlags().StringArray( "import-pending-creates", nil, "A list of form [[URN ID]...] describing the provider IDs of pending creates") // Remote flags remoteArgs.applyFlags(cmd) if hasDebugCommands() { cmd.PersistentFlags().StringVar( &eventLogPath, "event-log", "", "Log events to a file at this path") } // internal flags cmd.PersistentFlags().StringVar(&execKind, "exec-kind", "", "") // ignore err, only happens if flag does not exist _ = cmd.PersistentFlags().MarkHidden("exec-kind") cmd.PersistentFlags().StringVar(&execAgent, "exec-agent", "", "") // ignore err, only happens if flag does not exist _ = cmd.PersistentFlags().MarkHidden("exec-agent") return cmd } type editPendingOp = func(op resource.Operation) (*resource.Operation, error) // filterMapPendingCreates applies f to each pending create. If f returns nil, then the op // is deleted. Otherwise is is replaced by the returned op. func filterMapPendingCreates( ctx context.Context, s backend.Stack, opts display.Options, yes bool, f editPendingOp, ) result.Result { return totalStateEdit(ctx, s, yes, opts, func(opts display.Options, snap *deploy.Snapshot) error { var pending []resource.Operation for _, op := range snap.PendingOperations { if op.Resource == nil { return fmt.Errorf("found operation without resource") } if op.Type != resource.OperationTypeCreating { pending = append(pending, op) continue } op, err := f(op) if err != nil { return err } if op != nil { pending = append(pending, *op) } } snap.PendingOperations = pending return nil }) } // Apply the CLI args from --import-pending-creates [[URN ID]...]. If an error was found, // it is returned. The list of URNs that were not mapped to a pending create is also // returned. func pendingCreatesToImports(ctx context.Context, s backend.Stack, yes bool, opts display.Options, importToCreates []string, ) ([]string, result.Result) { // A map from URN to ID if len(importToCreates)%2 != 0 { return nil, result.Errorf("each URN must be followed by an ID: found an odd number of entries") } alteredOps := make(map[string]string, len(importToCreates)/2) for i := 0; i < len(importToCreates); i += 2 { alteredOps[importToCreates[i]] = importToCreates[i+1] } result := filterMapPendingCreates(ctx, s, opts, yes, func(op resource.Operation) (*resource.Operation, error) { if id, ok := alteredOps[string(op.Resource.URN)]; ok { op.Resource.ID = resource.ID(id) op.Type = resource.OperationTypeImporting delete(alteredOps, string(op.Resource.URN)) return &op, nil } return &op, nil }) unusedKeys := make([]string, len(alteredOps)) for k := range alteredOps { unusedKeys = append(unusedKeys, k) } return unusedKeys, result } func hasPendingCreates(snap *deploy.Snapshot) bool { if snap == nil { return false } for _, op := range snap.PendingOperations { if op.Type == resource.OperationTypeCreating { return true } } return false } func interactiveFixPendingCreate(op resource.Operation) (*resource.Operation, error) { for { option := "" options := []string{ "clear (the CREATE failed; remove the pending CREATE)", "skip (do nothing)", "import (the CREATE succeeded; provide a resource ID and complete the CREATE operation)", } if err := survey.AskOne(&survey.Select{ Message: fmt.Sprintf("Options for pending CREATE of %s", op.Resource.URN), Options: options, }, &option, nil); err != nil { return nil, fmt.Errorf("no option selected: %w", err) } var err error switch option { case options[0]: return nil, nil case options[1]: return &op, nil case options[2]: var id string err = survey.AskOne(&survey.Input{ Message: "ID: ", }, &id, nil) if err == nil { op.Resource.ID = resource.ID(id) op.Type = resource.OperationTypeImporting return &op, nil } default: return nil, fmt.Errorf("unknown option: %q", option) } if errors.Is(err, terminal.InterruptErr) { continue } return nil, err } }
Short: "Refresh the resources in a stack", Long: "Refresh the resources in a stack.\n" + "\n" + "This command compares the current stack's resource state with the state known to exist in\n" + "the actual cloud provider. Any such changes are adopted into the current stack. Note that if\n" +
random_line_split
main.rs
extern crate hsl; extern crate rand; extern crate sdl2; use sdl2::audio::{AudioCVT, AudioSpecDesired, AudioSpecWAV, AudioQueue}; use sdl2::event::Event; use sdl2::image::{INIT_PNG, LoadSurface}; use sdl2::gfx::rotozoom::RotozoomSurface; use sdl2::keyboard::Keycode; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{TextureQuery}; use sdl2::surface::Surface; use sdl2::video::FullscreenType; use hsl::HSL; use rand::{thread_rng, Rng}; use std::collections::{HashSet, HashMap}; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::time::Duration; macro_rules! rect( ($x:expr, $y:expr, $w:expr, $h:expr) => ( Rect::new($x as i32, $y as i32, $w as u32, $h as u32) ) ); trait PositionStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect; fn reset(&mut self)
} struct RandomPositionStrategy {} impl PositionStrategy for RandomPositionStrategy { // Return a random position that fits rect within rect fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { let rx: f64 = thread_rng().gen(); let ry: f64 = thread_rng().gen(); let posx = rx * (within_rect.width() - 1 * rect.width()) as f64; let posy = ry * (within_rect.height() - 1 * rect.height()) as f64; rect!(posx as f64, posy as f64, rect.width(), rect.height()) } } struct LeftToRightStrategy { next_x: u32, next_y: u32, } impl PositionStrategy for LeftToRightStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { if self.next_x > within_rect.right() as u32 { self.next_x = 0; self.next_y = self.next_y + rect.height() as u32; } if self.next_y > within_rect.bottom() as u32 { self.next_y = 0; } let y = self.next_y; let x = self.next_x; self.next_x = x + rect.width(); rect!(x, y, rect.width(), rect.height()) } fn reset(&mut self) { self.next_x = 0; self.next_y = 0; } } fn random_colour(c: Color) -> Color { let not_near_hsl = HSL::from_rgb(&[c.r, c.g, c.b]); let mut generated = not_near_hsl.clone(); while (generated.h - not_near_hsl.h).abs() < 40. { let h: f64 = thread_rng().gen(); generated = HSL { h: h * 360.0, s: 1_f64, l: 0.5_f64, }; } let rgb = generated.to_rgb(); return Color::RGB(rgb.0, rgb.1, rgb.2); } fn load_sound(note: &str) -> AudioSpecWAV { // Load a sound let filename = format!("{}.wav", note); let path: PathBuf = ["./sounds", &filename].iter().collect(); let wav_file: Cow<'static, Path> = Cow::from(path); AudioSpecWAV::load_wav(wav_file.clone()) .expect("Could not load test WAV file") } fn load_image(fname: &str) -> Surface { // Load an image let filename = format!("{}.png", fname); let path: PathBuf = ["./images", &filename].iter().collect(); let image_file: Cow<'static, Path> = Cow::from(path); Surface::from_file(image_file.clone()) .expect("Could not load image file") } pub fn main() { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let audio_subsystem = sdl_context.audio().unwrap(); let _image_context = sdl2::image::init(INIT_PNG); let (window_width, window_height) = (800, 600); let mut window = video_subsystem .window("Bish Bash Bosh", window_width, window_height) .position_centered() .opengl() .build() .unwrap(); window.set_fullscreen(FullscreenType::Desktop).unwrap(); window.set_grab(true); let (window_width, window_height) = window.size(); let mut canvas = window.into_canvas().build().unwrap(); let texture_creator = canvas.texture_creator(); let mut event_pump = sdl_context.event_pump().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); // Load a font let mut font = ttf_context.load_font("DejaVuSans-Bold.ttf", 112).unwrap(); font.set_style(sdl2::ttf::STYLE_BOLD); let desired_spec = AudioSpecDesired { freq: Some(44_100), channels: Some(1), // mono samples: None, // default }; let audio_queue: AudioQueue<u8> = audio_subsystem .open_queue(None, &desired_spec) .unwrap(); // let mut position_strategy = RandomPositionStrategy { }; let mut position_strategy = LeftToRightStrategy { next_x: 0 , next_y: window_height / 3}; canvas.set_draw_color(Color::RGB(255, 0, 0)); canvas.clear(); canvas.present(); // Keep track of all displayed characters, and their postitions let mut drawables = vec![]; let drawable_keys: HashSet<String> = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ].iter() .map(|s| s.to_string()) .collect(); let noisy_keys: HashMap<String, String> = [ ("F1", "37a"), ("F2", "38b"), ("F3", "39bb"), ("F4", "40c"), ("F5", "41c"), ("F6", "42d"), ("F7", "43e"), ("F8", "44eb"), ("F9", "45f"), ("F10", "46f"), ("F11", "47g"), ("F12", "48g"), ("A", "alpha-a"), ("B", "alpha-b"), ("C", "alpha-c"), ("D", "alpha-d"), ("E", "alpha-e"), ("F", "alpha-f"), ("G", "alpha-g"), ("H", "alpha-h"), ("I", "alpha-i"), ("J", "alpha-j"), ("K", "alpha-k"), ("L", "alpha-l"), ("M", "alpha-m"), ("N", "alpha-n"), ("O", "alpha-o"), ("P", "alpha-p"), ("Q", "alpha-q"), ("R", "alpha-r"), ("S", "alpha-s"), ("T", "alpha-t"), ("U", "alpha-u"), ("V", "alpha-v"), ("W", "alpha-w"), ("X", "alpha-x"), ("Y", "alpha-y"), ("Z", "alpha-z"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let images: HashMap<String, String> = [ ("T", "T"), ("B", "buzz"), ("C", "chase"), ("D", "dumbo"), ("G", "geo"), ("H", "harrison"), ("I", "igglepiggle"), ("M", "mickey"), ("P", "peppa"), ("S", "simba"), ("U", "upsiedaisy"), ("W", "woody"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let mut background_color = random_colour(Color::RGB(255, 255, 255)); 'running: loop { for event in event_pump.poll_iter() { match event { Event::Quit { .. } | Event::KeyDown { keycode: Some(Keycode::Escape), repeat: true, .. } => break 'running, Event::KeyDown { keycode: Some(Keycode::Return), repeat: false, .. } => { position_strategy.reset(); drawables.clear(); background_color = random_colour(Color::RGB(255, 255, 255)); } Event::KeyDown { keycode: Some(key), repeat: false, .. } => { if drawable_keys.contains(&key.name()) { let colour = random_colour(background_color); let surface = font.render(&key.name()).blended(colour).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height, .. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); //rect!(150, 150, width, height); drawables.push((texture, target)); } if let Some(note) = noisy_keys.get(&key.name()) { let wav = load_sound(&note); let spec = audio_queue.spec(); let cvt = AudioCVT::new( wav.format, wav.channels, wav.freq, spec.format, spec.channels, spec.freq, ).expect("Could not convert WAV file"); let data = cvt.convert(wav.buffer().to_vec()); audio_queue.clear(); audio_queue.queue(&data); // Start playback audio_queue.resume(); } if let Some(filename) = images.get(&key.name()) { let mut surface = load_image(&filename); let sf = (100f64 / surface.height() as f64); println!("{}", sf ); let surface = surface.rotozoom(0f64, sf, false).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height, .. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); drawables.push((texture, target)); } } _ => {} } } // Draw the chars canvas.set_draw_color(background_color); canvas.clear(); for &(ref texture, target) in drawables.iter() { canvas.copy(&texture, None, Some(target.clone())).unwrap(); } canvas.present(); ::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60)); } }
{ }
identifier_body
main.rs
extern crate hsl; extern crate rand; extern crate sdl2; use sdl2::audio::{AudioCVT, AudioSpecDesired, AudioSpecWAV, AudioQueue}; use sdl2::event::Event; use sdl2::image::{INIT_PNG, LoadSurface}; use sdl2::gfx::rotozoom::RotozoomSurface; use sdl2::keyboard::Keycode; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{TextureQuery}; use sdl2::surface::Surface; use sdl2::video::FullscreenType; use hsl::HSL; use rand::{thread_rng, Rng}; use std::collections::{HashSet, HashMap}; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::time::Duration; macro_rules! rect( ($x:expr, $y:expr, $w:expr, $h:expr) => ( Rect::new($x as i32, $y as i32, $w as u32, $h as u32) ) ); trait PositionStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect; fn reset(&mut self) { } } struct RandomPositionStrategy {} impl PositionStrategy for RandomPositionStrategy { // Return a random position that fits rect within rect fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { let rx: f64 = thread_rng().gen(); let ry: f64 = thread_rng().gen(); let posx = rx * (within_rect.width() - 1 * rect.width()) as f64; let posy = ry * (within_rect.height() - 1 * rect.height()) as f64; rect!(posx as f64, posy as f64, rect.width(), rect.height()) } } struct LeftToRightStrategy { next_x: u32, next_y: u32, } impl PositionStrategy for LeftToRightStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { if self.next_x > within_rect.right() as u32 { self.next_x = 0; self.next_y = self.next_y + rect.height() as u32; } if self.next_y > within_rect.bottom() as u32 { self.next_y = 0; } let y = self.next_y; let x = self.next_x; self.next_x = x + rect.width(); rect!(x, y, rect.width(), rect.height()) } fn reset(&mut self) { self.next_x = 0; self.next_y = 0; } } fn random_colour(c: Color) -> Color { let not_near_hsl = HSL::from_rgb(&[c.r, c.g, c.b]); let mut generated = not_near_hsl.clone(); while (generated.h - not_near_hsl.h).abs() < 40. { let h: f64 = thread_rng().gen(); generated = HSL { h: h * 360.0, s: 1_f64, l: 0.5_f64, }; } let rgb = generated.to_rgb(); return Color::RGB(rgb.0, rgb.1, rgb.2); } fn load_sound(note: &str) -> AudioSpecWAV { // Load a sound let filename = format!("{}.wav", note); let path: PathBuf = ["./sounds", &filename].iter().collect(); let wav_file: Cow<'static, Path> = Cow::from(path); AudioSpecWAV::load_wav(wav_file.clone()) .expect("Could not load test WAV file") } fn load_image(fname: &str) -> Surface { // Load an image let filename = format!("{}.png", fname); let path: PathBuf = ["./images", &filename].iter().collect(); let image_file: Cow<'static, Path> = Cow::from(path); Surface::from_file(image_file.clone()) .expect("Could not load image file") } pub fn main() { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let audio_subsystem = sdl_context.audio().unwrap(); let _image_context = sdl2::image::init(INIT_PNG); let (window_width, window_height) = (800, 600); let mut window = video_subsystem .window("Bish Bash Bosh", window_width, window_height) .position_centered() .opengl() .build() .unwrap(); window.set_fullscreen(FullscreenType::Desktop).unwrap(); window.set_grab(true); let (window_width, window_height) = window.size(); let mut canvas = window.into_canvas().build().unwrap(); let texture_creator = canvas.texture_creator(); let mut event_pump = sdl_context.event_pump().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); // Load a font let mut font = ttf_context.load_font("DejaVuSans-Bold.ttf", 112).unwrap(); font.set_style(sdl2::ttf::STYLE_BOLD); let desired_spec = AudioSpecDesired { freq: Some(44_100), channels: Some(1), // mono samples: None, // default }; let audio_queue: AudioQueue<u8> = audio_subsystem .open_queue(None, &desired_spec) .unwrap(); // let mut position_strategy = RandomPositionStrategy { }; let mut position_strategy = LeftToRightStrategy { next_x: 0 , next_y: window_height / 3}; canvas.set_draw_color(Color::RGB(255, 0, 0)); canvas.clear(); canvas.present(); // Keep track of all displayed characters, and their postitions let mut drawables = vec![]; let drawable_keys: HashSet<String> = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ].iter() .map(|s| s.to_string()) .collect(); let noisy_keys: HashMap<String, String> = [ ("F1", "37a"), ("F2", "38b"), ("F3", "39bb"), ("F4", "40c"), ("F5", "41c"), ("F6", "42d"), ("F7", "43e"), ("F8", "44eb"), ("F9", "45f"), ("F10", "46f"), ("F11", "47g"), ("F12", "48g"), ("A", "alpha-a"), ("B", "alpha-b"), ("C", "alpha-c"), ("D", "alpha-d"), ("E", "alpha-e"), ("F", "alpha-f"), ("G", "alpha-g"), ("H", "alpha-h"), ("I", "alpha-i"), ("J", "alpha-j"), ("K", "alpha-k"), ("L", "alpha-l"), ("M", "alpha-m"), ("N", "alpha-n"), ("O", "alpha-o"), ("P", "alpha-p"), ("Q", "alpha-q"), ("R", "alpha-r"), ("S", "alpha-s"), ("T", "alpha-t"), ("U", "alpha-u"), ("V", "alpha-v"), ("W", "alpha-w"), ("X", "alpha-x"), ("Y", "alpha-y"), ("Z", "alpha-z"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let images: HashMap<String, String> = [ ("T", "T"), ("B", "buzz"), ("C", "chase"), ("D", "dumbo"), ("G", "geo"), ("H", "harrison"), ("I", "igglepiggle"), ("M", "mickey"), ("P", "peppa"), ("S", "simba"),
.collect(); let mut background_color = random_colour(Color::RGB(255, 255, 255)); 'running: loop { for event in event_pump.poll_iter() { match event { Event::Quit { .. } | Event::KeyDown { keycode: Some(Keycode::Escape), repeat: true, .. } => break 'running, Event::KeyDown { keycode: Some(Keycode::Return), repeat: false, .. } => { position_strategy.reset(); drawables.clear(); background_color = random_colour(Color::RGB(255, 255, 255)); } Event::KeyDown { keycode: Some(key), repeat: false, .. } => { if drawable_keys.contains(&key.name()) { let colour = random_colour(background_color); let surface = font.render(&key.name()).blended(colour).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height, .. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); //rect!(150, 150, width, height); drawables.push((texture, target)); } if let Some(note) = noisy_keys.get(&key.name()) { let wav = load_sound(&note); let spec = audio_queue.spec(); let cvt = AudioCVT::new( wav.format, wav.channels, wav.freq, spec.format, spec.channels, spec.freq, ).expect("Could not convert WAV file"); let data = cvt.convert(wav.buffer().to_vec()); audio_queue.clear(); audio_queue.queue(&data); // Start playback audio_queue.resume(); } if let Some(filename) = images.get(&key.name()) { let mut surface = load_image(&filename); let sf = (100f64 / surface.height() as f64); println!("{}", sf ); let surface = surface.rotozoom(0f64, sf, false).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height, .. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); drawables.push((texture, target)); } } _ => {} } } // Draw the chars canvas.set_draw_color(background_color); canvas.clear(); for &(ref texture, target) in drawables.iter() { canvas.copy(&texture, None, Some(target.clone())).unwrap(); } canvas.present(); ::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60)); } }
("U", "upsiedaisy"), ("W", "woody"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string()))
random_line_split
main.rs
extern crate hsl; extern crate rand; extern crate sdl2; use sdl2::audio::{AudioCVT, AudioSpecDesired, AudioSpecWAV, AudioQueue}; use sdl2::event::Event; use sdl2::image::{INIT_PNG, LoadSurface}; use sdl2::gfx::rotozoom::RotozoomSurface; use sdl2::keyboard::Keycode; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{TextureQuery}; use sdl2::surface::Surface; use sdl2::video::FullscreenType; use hsl::HSL; use rand::{thread_rng, Rng}; use std::collections::{HashSet, HashMap}; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::time::Duration; macro_rules! rect( ($x:expr, $y:expr, $w:expr, $h:expr) => ( Rect::new($x as i32, $y as i32, $w as u32, $h as u32) ) ); trait PositionStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect; fn
(&mut self) { } } struct RandomPositionStrategy {} impl PositionStrategy for RandomPositionStrategy { // Return a random position that fits rect within rect fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { let rx: f64 = thread_rng().gen(); let ry: f64 = thread_rng().gen(); let posx = rx * (within_rect.width() - 1 * rect.width()) as f64; let posy = ry * (within_rect.height() - 1 * rect.height()) as f64; rect!(posx as f64, posy as f64, rect.width(), rect.height()) } } struct LeftToRightStrategy { next_x: u32, next_y: u32, } impl PositionStrategy for LeftToRightStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { if self.next_x > within_rect.right() as u32 { self.next_x = 0; self.next_y = self.next_y + rect.height() as u32; } if self.next_y > within_rect.bottom() as u32 { self.next_y = 0; } let y = self.next_y; let x = self.next_x; self.next_x = x + rect.width(); rect!(x, y, rect.width(), rect.height()) } fn reset(&mut self) { self.next_x = 0; self.next_y = 0; } } fn random_colour(c: Color) -> Color { let not_near_hsl = HSL::from_rgb(&[c.r, c.g, c.b]); let mut generated = not_near_hsl.clone(); while (generated.h - not_near_hsl.h).abs() < 40. { let h: f64 = thread_rng().gen(); generated = HSL { h: h * 360.0, s: 1_f64, l: 0.5_f64, }; } let rgb = generated.to_rgb(); return Color::RGB(rgb.0, rgb.1, rgb.2); } fn load_sound(note: &str) -> AudioSpecWAV { // Load a sound let filename = format!("{}.wav", note); let path: PathBuf = ["./sounds", &filename].iter().collect(); let wav_file: Cow<'static, Path> = Cow::from(path); AudioSpecWAV::load_wav(wav_file.clone()) .expect("Could not load test WAV file") } fn load_image(fname: &str) -> Surface { // Load an image let filename = format!("{}.png", fname); let path: PathBuf = ["./images", &filename].iter().collect(); let image_file: Cow<'static, Path> = Cow::from(path); Surface::from_file(image_file.clone()) .expect("Could not load image file") } pub fn main() { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let audio_subsystem = sdl_context.audio().unwrap(); let _image_context = sdl2::image::init(INIT_PNG); let (window_width, window_height) = (800, 600); let mut window = video_subsystem .window("Bish Bash Bosh", window_width, window_height) .position_centered() .opengl() .build() .unwrap(); window.set_fullscreen(FullscreenType::Desktop).unwrap(); window.set_grab(true); let (window_width, window_height) = window.size(); let mut canvas = window.into_canvas().build().unwrap(); let texture_creator = canvas.texture_creator(); let mut event_pump = sdl_context.event_pump().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); // Load a font let mut font = ttf_context.load_font("DejaVuSans-Bold.ttf", 112).unwrap(); font.set_style(sdl2::ttf::STYLE_BOLD); let desired_spec = AudioSpecDesired { freq: Some(44_100), channels: Some(1), // mono samples: None, // default }; let audio_queue: AudioQueue<u8> = audio_subsystem .open_queue(None, &desired_spec) .unwrap(); // let mut position_strategy = RandomPositionStrategy { }; let mut position_strategy = LeftToRightStrategy { next_x: 0 , next_y: window_height / 3}; canvas.set_draw_color(Color::RGB(255, 0, 0)); canvas.clear(); canvas.present(); // Keep track of all displayed characters, and their postitions let mut drawables = vec![]; let drawable_keys: HashSet<String> = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ].iter() .map(|s| s.to_string()) .collect(); let noisy_keys: HashMap<String, String> = [ ("F1", "37a"), ("F2", "38b"), ("F3", "39bb"), ("F4", "40c"), ("F5", "41c"), ("F6", "42d"), ("F7", "43e"), ("F8", "44eb"), ("F9", "45f"), ("F10", "46f"), ("F11", "47g"), ("F12", "48g"), ("A", "alpha-a"), ("B", "alpha-b"), ("C", "alpha-c"), ("D", "alpha-d"), ("E", "alpha-e"), ("F", "alpha-f"), ("G", "alpha-g"), ("H", "alpha-h"), ("I", "alpha-i"), ("J", "alpha-j"), ("K", "alpha-k"), ("L", "alpha-l"), ("M", "alpha-m"), ("N", "alpha-n"), ("O", "alpha-o"), ("P", "alpha-p"), ("Q", "alpha-q"), ("R", "alpha-r"), ("S", "alpha-s"), ("T", "alpha-t"), ("U", "alpha-u"), ("V", "alpha-v"), ("W", "alpha-w"), ("X", "alpha-x"), ("Y", "alpha-y"), ("Z", "alpha-z"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let images: HashMap<String, String> = [ ("T", "T"), ("B", "buzz"), ("C", "chase"), ("D", "dumbo"), ("G", "geo"), ("H", "harrison"), ("I", "igglepiggle"), ("M", "mickey"), ("P", "peppa"), ("S", "simba"), ("U", "upsiedaisy"), ("W", "woody"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let mut background_color = random_colour(Color::RGB(255, 255, 255)); 'running: loop { for event in event_pump.poll_iter() { match event { Event::Quit { .. } | Event::KeyDown { keycode: Some(Keycode::Escape), repeat: true, .. } => break 'running, Event::KeyDown { keycode: Some(Keycode::Return), repeat: false, .. } => { position_strategy.reset(); drawables.clear(); background_color = random_colour(Color::RGB(255, 255, 255)); } Event::KeyDown { keycode: Some(key), repeat: false, .. } => { if drawable_keys.contains(&key.name()) { let colour = random_colour(background_color); let surface = font.render(&key.name()).blended(colour).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height, .. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); //rect!(150, 150, width, height); drawables.push((texture, target)); } if let Some(note) = noisy_keys.get(&key.name()) { let wav = load_sound(&note); let spec = audio_queue.spec(); let cvt = AudioCVT::new( wav.format, wav.channels, wav.freq, spec.format, spec.channels, spec.freq, ).expect("Could not convert WAV file"); let data = cvt.convert(wav.buffer().to_vec()); audio_queue.clear(); audio_queue.queue(&data); // Start playback audio_queue.resume(); } if let Some(filename) = images.get(&key.name()) { let mut surface = load_image(&filename); let sf = (100f64 / surface.height() as f64); println!("{}", sf ); let surface = surface.rotozoom(0f64, sf, false).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height, .. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); drawables.push((texture, target)); } } _ => {} } } // Draw the chars canvas.set_draw_color(background_color); canvas.clear(); for &(ref texture, target) in drawables.iter() { canvas.copy(&texture, None, Some(target.clone())).unwrap(); } canvas.present(); ::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60)); } }
reset
identifier_name
main.rs
extern crate hsl; extern crate rand; extern crate sdl2; use sdl2::audio::{AudioCVT, AudioSpecDesired, AudioSpecWAV, AudioQueue}; use sdl2::event::Event; use sdl2::image::{INIT_PNG, LoadSurface}; use sdl2::gfx::rotozoom::RotozoomSurface; use sdl2::keyboard::Keycode; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{TextureQuery}; use sdl2::surface::Surface; use sdl2::video::FullscreenType; use hsl::HSL; use rand::{thread_rng, Rng}; use std::collections::{HashSet, HashMap}; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::time::Duration; macro_rules! rect( ($x:expr, $y:expr, $w:expr, $h:expr) => ( Rect::new($x as i32, $y as i32, $w as u32, $h as u32) ) ); trait PositionStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect; fn reset(&mut self) { } } struct RandomPositionStrategy {} impl PositionStrategy for RandomPositionStrategy { // Return a random position that fits rect within rect fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { let rx: f64 = thread_rng().gen(); let ry: f64 = thread_rng().gen(); let posx = rx * (within_rect.width() - 1 * rect.width()) as f64; let posy = ry * (within_rect.height() - 1 * rect.height()) as f64; rect!(posx as f64, posy as f64, rect.width(), rect.height()) } } struct LeftToRightStrategy { next_x: u32, next_y: u32, } impl PositionStrategy for LeftToRightStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { if self.next_x > within_rect.right() as u32 { self.next_x = 0; self.next_y = self.next_y + rect.height() as u32; } if self.next_y > within_rect.bottom() as u32 { self.next_y = 0; } let y = self.next_y; let x = self.next_x; self.next_x = x + rect.width(); rect!(x, y, rect.width(), rect.height()) } fn reset(&mut self) { self.next_x = 0; self.next_y = 0; } } fn random_colour(c: Color) -> Color { let not_near_hsl = HSL::from_rgb(&[c.r, c.g, c.b]); let mut generated = not_near_hsl.clone(); while (generated.h - not_near_hsl.h).abs() < 40. { let h: f64 = thread_rng().gen(); generated = HSL { h: h * 360.0, s: 1_f64, l: 0.5_f64, }; } let rgb = generated.to_rgb(); return Color::RGB(rgb.0, rgb.1, rgb.2); } fn load_sound(note: &str) -> AudioSpecWAV { // Load a sound let filename = format!("{}.wav", note); let path: PathBuf = ["./sounds", &filename].iter().collect(); let wav_file: Cow<'static, Path> = Cow::from(path); AudioSpecWAV::load_wav(wav_file.clone()) .expect("Could not load test WAV file") } fn load_image(fname: &str) -> Surface { // Load an image let filename = format!("{}.png", fname); let path: PathBuf = ["./images", &filename].iter().collect(); let image_file: Cow<'static, Path> = Cow::from(path); Surface::from_file(image_file.clone()) .expect("Could not load image file") } pub fn main() { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let audio_subsystem = sdl_context.audio().unwrap(); let _image_context = sdl2::image::init(INIT_PNG); let (window_width, window_height) = (800, 600); let mut window = video_subsystem .window("Bish Bash Bosh", window_width, window_height) .position_centered() .opengl() .build() .unwrap(); window.set_fullscreen(FullscreenType::Desktop).unwrap(); window.set_grab(true); let (window_width, window_height) = window.size(); let mut canvas = window.into_canvas().build().unwrap(); let texture_creator = canvas.texture_creator(); let mut event_pump = sdl_context.event_pump().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); // Load a font let mut font = ttf_context.load_font("DejaVuSans-Bold.ttf", 112).unwrap(); font.set_style(sdl2::ttf::STYLE_BOLD); let desired_spec = AudioSpecDesired { freq: Some(44_100), channels: Some(1), // mono samples: None, // default }; let audio_queue: AudioQueue<u8> = audio_subsystem .open_queue(None, &desired_spec) .unwrap(); // let mut position_strategy = RandomPositionStrategy { }; let mut position_strategy = LeftToRightStrategy { next_x: 0 , next_y: window_height / 3}; canvas.set_draw_color(Color::RGB(255, 0, 0)); canvas.clear(); canvas.present(); // Keep track of all displayed characters, and their postitions let mut drawables = vec![]; let drawable_keys: HashSet<String> = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ].iter() .map(|s| s.to_string()) .collect(); let noisy_keys: HashMap<String, String> = [ ("F1", "37a"), ("F2", "38b"), ("F3", "39bb"), ("F4", "40c"), ("F5", "41c"), ("F6", "42d"), ("F7", "43e"), ("F8", "44eb"), ("F9", "45f"), ("F10", "46f"), ("F11", "47g"), ("F12", "48g"), ("A", "alpha-a"), ("B", "alpha-b"), ("C", "alpha-c"), ("D", "alpha-d"), ("E", "alpha-e"), ("F", "alpha-f"), ("G", "alpha-g"), ("H", "alpha-h"), ("I", "alpha-i"), ("J", "alpha-j"), ("K", "alpha-k"), ("L", "alpha-l"), ("M", "alpha-m"), ("N", "alpha-n"), ("O", "alpha-o"), ("P", "alpha-p"), ("Q", "alpha-q"), ("R", "alpha-r"), ("S", "alpha-s"), ("T", "alpha-t"), ("U", "alpha-u"), ("V", "alpha-v"), ("W", "alpha-w"), ("X", "alpha-x"), ("Y", "alpha-y"), ("Z", "alpha-z"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let images: HashMap<String, String> = [ ("T", "T"), ("B", "buzz"), ("C", "chase"), ("D", "dumbo"), ("G", "geo"), ("H", "harrison"), ("I", "igglepiggle"), ("M", "mickey"), ("P", "peppa"), ("S", "simba"), ("U", "upsiedaisy"), ("W", "woody"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let mut background_color = random_colour(Color::RGB(255, 255, 255)); 'running: loop { for event in event_pump.poll_iter() { match event { Event::Quit { .. } | Event::KeyDown { keycode: Some(Keycode::Escape), repeat: true, .. } => break 'running, Event::KeyDown { keycode: Some(Keycode::Return), repeat: false, .. } => { position_strategy.reset(); drawables.clear(); background_color = random_colour(Color::RGB(255, 255, 255)); } Event::KeyDown { keycode: Some(key), repeat: false, .. } => { if drawable_keys.contains(&key.name()) { let colour = random_colour(background_color); let surface = font.render(&key.name()).blended(colour).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height, .. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); //rect!(150, 150, width, height); drawables.push((texture, target)); } if let Some(note) = noisy_keys.get(&key.name()) { let wav = load_sound(&note); let spec = audio_queue.spec(); let cvt = AudioCVT::new( wav.format, wav.channels, wav.freq, spec.format, spec.channels, spec.freq, ).expect("Could not convert WAV file"); let data = cvt.convert(wav.buffer().to_vec()); audio_queue.clear(); audio_queue.queue(&data); // Start playback audio_queue.resume(); } if let Some(filename) = images.get(&key.name())
} _ => {} } } // Draw the chars canvas.set_draw_color(background_color); canvas.clear(); for &(ref texture, target) in drawables.iter() { canvas.copy(&texture, None, Some(target.clone())).unwrap(); } canvas.present(); ::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60)); } }
{ let mut surface = load_image(&filename); let sf = (100f64 / surface.height() as f64); println!("{}", sf ); let surface = surface.rotozoom(0f64, sf, false).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height, .. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); drawables.push((texture, target)); }
conditional_block
system.rs
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. use crate::{ AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; use frame_support::storage; use frame_support::{decl_module, decl_storage}; use frame_system::Trait; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ hashing::blake2_256, storage::changes_root as storage_changes_root, storage::root as storage_root, trie, }; use sp_runtime::{ generic, traits::Header as _, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, ApplyExtrinsicResult, }; use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin {} } decl_storage! { trait Store for Module<T: Trait> as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec<u8>; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option<BlockNumber>; ParentHash get(fn parent_hash): Hash; NewAuthorities get(fn new_authorities): Option<Vec<AuthorityId>>; NewChangesTrieConfig get(fn new_changes_trie_config): Option<Option<ChangesTrieConfiguration>>; StorageDigest get(fn storage_digest): Option<Digest>; Authorities get(fn authorities) config(): Vec<AuthorityId>; } } pub fn balance_of_key(who: AccountId) -> Vec<u8> { who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) } pub fn initialize_block(header: &Header) { // populate environment. <Number>::put(&header.number); <ParentHash>::put(&header.parent_hash); <StorageDigest>::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest // so that it'll be included in execution proof if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { let _: Option<u32> = storage::unhashed::get(&v); } } pub fn authorities() -> Vec<AuthorityId> { Authorities::get() } pub fn get_block_number() -> Option<BlockNumber> { Number::get() } pub fn take_block_number() -> Option<BlockNumber> { Number::take() } #[derive(Copy, Clone)] enum Mode { Verify, Overwrite, } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } pub fn execute_block(mut block: Block) { execute_block_with_state_root_handler(&mut block, Mode::Verify); } fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { let header = &mut block.header; initialize_block(header); // execute transactions block.extrinsics.iter().for_each(|e| { let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); }); let new_header = finalize_block(); if let Mode::Overwrite = mode { header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); assert!( new_header.state_root == header.state_root, "Storage root must match that calculated.", ); } if let Mode::Overwrite = mode { header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); assert!( new_header.extrinsics_root == header.extrinsics_root, "Transaction trie root must be valid.", ); } } /// The block executor. pub struct BlockExecutor; impl frame_executive::ExecuteBlock<Block> for BlockExecutor { fn execute_block(block: Block) { execute_block(block); } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); let requires = if tx.nonce != expected_nonce && tx.nonce > 0 { vec![encode(&tx.from, tx.nonce - 1)] } else { vec![] }; let provides = vec![encode(&tx.from, tx.nonce)]; Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } /// Finalize the block. pub fn finalize_block() -> Header
inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { use sp_runtime::traits::BlindCheckable; utx.clone().check().map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) } fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => Err(InvalidTransaction::ExhaustsResources.into()), Extrinsic::Transfer { ref transfer, .. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), } } fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if !(tx.nonce == expected_nonce) { return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); // check sender balance let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); // enact transfer if !(tx.amount <= from_balance) { return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); Ok(Ok(())) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) } fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicResult { match value { Some(value) => storage::unhashed::put_raw(key, value), None => storage::unhashed::kill(key), } Ok(Ok(())) } fn execute_changes_trie_config_update( new_config: Option<ChangesTrieConfiguration>, ) -> ApplyExtrinsicResult { match new_config.clone() { Some(new_config) => storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } <NewChangesTrieConfig>::put(new_config); Ok(Ok(())) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; if given != expected { println!( "Hash: given={}, expected={}", HexDisplay::from(given.as_fixed_bytes()), HexDisplay::from(expected.as_fixed_bytes()), ); } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { if given != expected { sp_runtime::print("Hash not equal"); sp_runtime::print(given.as_bytes()); sp_runtime::print(expected.as_bytes()); } } #[cfg(test)] mod tests { use super::*; use crate::{wasm_binary_unwrap, Header, Transfer}; use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, NeverNativeValue, }; use sp_io::hashing::twox_128; use sp_io::TestExternalities; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; // Declare an instance of the native executor dispatch for the test runtime. native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); fn executor() -> NativeExecutor<NativeDispatch> { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } fn new_test_ext() -> TestExternalities { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), sp_core::storage::Storage { top: map![ twox_128(b"latest").to_vec() => vec![69u8; 32], twox_128(b"sys:auth").to_vec() => authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], children_default: map![], }, ) } fn block_import_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let h = Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }; let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); block_executor(b, &mut new_test_ext()); } #[test] fn block_import_works_native() { block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_works_wasm() { block_import_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } fn block_import_with_transaction_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 69, nonce: 0, } .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); dummy_ext.execute_with(|| polish_block(&mut b1)); let mut b2 = Block { header: Header { parent_hash: b1.header.hash(), number: 2, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![ Transfer { from: AccountKeyring::Bob.into(), to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, } .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, } .into_signed_tx(), ], }; dummy_ext.execute_with(|| polish_block(&mut b2)); drop(dummy_ext); let mut t = new_test_ext(); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); }); block_executor(b1, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); }); block_executor(b2, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); }); } #[test] fn block_import_with_transaction_works_native() { block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_with_transaction_works_wasm() { block_import_with_transaction_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } }
{ let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); let number = <Number>::take().expect("Number is set by `initialize_block`"); let parent_hash = <ParentHash>::take(); let mut digest = <StorageDigest>::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = <NewAuthorities>::take(); let new_changes_trie_config = <NewChangesTrieConfig>::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); if let Some(storage_changes_root) = storage_changes_root { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); } if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( generic::ChangesTrieSignal::NewConfiguration(new_config), )); } Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[
identifier_body
system.rs
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. use crate::{ AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; use frame_support::storage; use frame_support::{decl_module, decl_storage}; use frame_system::Trait; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ hashing::blake2_256, storage::changes_root as storage_changes_root, storage::root as storage_root, trie, }; use sp_runtime::{ generic, traits::Header as _, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, ApplyExtrinsicResult, }; use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin {} } decl_storage! { trait Store for Module<T: Trait> as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec<u8>; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option<BlockNumber>; ParentHash get(fn parent_hash): Hash; NewAuthorities get(fn new_authorities): Option<Vec<AuthorityId>>; NewChangesTrieConfig get(fn new_changes_trie_config): Option<Option<ChangesTrieConfiguration>>; StorageDigest get(fn storage_digest): Option<Digest>; Authorities get(fn authorities) config(): Vec<AuthorityId>; } } pub fn balance_of_key(who: AccountId) -> Vec<u8> { who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) } pub fn initialize_block(header: &Header) { // populate environment. <Number>::put(&header.number); <ParentHash>::put(&header.parent_hash); <StorageDigest>::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest // so that it'll be included in execution proof if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { let _: Option<u32> = storage::unhashed::get(&v); } } pub fn authorities() -> Vec<AuthorityId> { Authorities::get() } pub fn get_block_number() -> Option<BlockNumber> { Number::get() } pub fn take_block_number() -> Option<BlockNumber> { Number::take() } #[derive(Copy, Clone)] enum Mode { Verify, Overwrite, } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } pub fn execute_block(mut block: Block) { execute_block_with_state_root_handler(&mut block, Mode::Verify); } fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { let header = &mut block.header; initialize_block(header); // execute transactions block.extrinsics.iter().for_each(|e| { let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); }); let new_header = finalize_block(); if let Mode::Overwrite = mode { header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); assert!( new_header.state_root == header.state_root, "Storage root must match that calculated.", ); } if let Mode::Overwrite = mode { header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); assert!( new_header.extrinsics_root == header.extrinsics_root, "Transaction trie root must be valid.", ); } } /// The block executor. pub struct BlockExecutor; impl frame_executive::ExecuteBlock<Block> for BlockExecutor { fn execute_block(block: Block) { execute_block(block); } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); let requires = if tx.nonce != expected_nonce && tx.nonce > 0
else { vec![] }; let provides = vec![encode(&tx.from, tx.nonce)]; Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } /// Finalize the block. pub fn finalize_block() -> Header { let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); let number = <Number>::take().expect("Number is set by `initialize_block`"); let parent_hash = <ParentHash>::take(); let mut digest = <StorageDigest>::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = <NewAuthorities>::take(); let new_changes_trie_config = <NewChangesTrieConfig>::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); if let Some(storage_changes_root) = storage_changes_root { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); } if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( generic::ChangesTrieSignal::NewConfiguration(new_config), )); } Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { use sp_runtime::traits::BlindCheckable; utx.clone().check().map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) } fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => Err(InvalidTransaction::ExhaustsResources.into()), Extrinsic::Transfer { ref transfer, .. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), } } fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if !(tx.nonce == expected_nonce) { return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); // check sender balance let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); // enact transfer if !(tx.amount <= from_balance) { return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); Ok(Ok(())) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) } fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicResult { match value { Some(value) => storage::unhashed::put_raw(key, value), None => storage::unhashed::kill(key), } Ok(Ok(())) } fn execute_changes_trie_config_update( new_config: Option<ChangesTrieConfiguration>, ) -> ApplyExtrinsicResult { match new_config.clone() { Some(new_config) => storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } <NewChangesTrieConfig>::put(new_config); Ok(Ok(())) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; if given != expected { println!( "Hash: given={}, expected={}", HexDisplay::from(given.as_fixed_bytes()), HexDisplay::from(expected.as_fixed_bytes()), ); } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { if given != expected { sp_runtime::print("Hash not equal"); sp_runtime::print(given.as_bytes()); sp_runtime::print(expected.as_bytes()); } } #[cfg(test)] mod tests { use super::*; use crate::{wasm_binary_unwrap, Header, Transfer}; use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, NeverNativeValue, }; use sp_io::hashing::twox_128; use sp_io::TestExternalities; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; // Declare an instance of the native executor dispatch for the test runtime. native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); fn executor() -> NativeExecutor<NativeDispatch> { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } fn new_test_ext() -> TestExternalities { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), sp_core::storage::Storage { top: map![ twox_128(b"latest").to_vec() => vec![69u8; 32], twox_128(b"sys:auth").to_vec() => authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], children_default: map![], }, ) } fn block_import_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let h = Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }; let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); block_executor(b, &mut new_test_ext()); } #[test] fn block_import_works_native() { block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_works_wasm() { block_import_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } fn block_import_with_transaction_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 69, nonce: 0, } .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); dummy_ext.execute_with(|| polish_block(&mut b1)); let mut b2 = Block { header: Header { parent_hash: b1.header.hash(), number: 2, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![ Transfer { from: AccountKeyring::Bob.into(), to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, } .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, } .into_signed_tx(), ], }; dummy_ext.execute_with(|| polish_block(&mut b2)); drop(dummy_ext); let mut t = new_test_ext(); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); }); block_executor(b1, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); }); block_executor(b2, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); }); } #[test] fn block_import_with_transaction_works_native() { block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_with_transaction_works_wasm() { block_import_with_transaction_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } }
{ vec![encode(&tx.from, tx.nonce - 1)] }
conditional_block
system.rs
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. use crate::{ AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; use frame_support::storage; use frame_support::{decl_module, decl_storage}; use frame_system::Trait; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ hashing::blake2_256, storage::changes_root as storage_changes_root, storage::root as storage_root, trie, }; use sp_runtime::{ generic, traits::Header as _, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, ApplyExtrinsicResult, }; use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin {} } decl_storage! { trait Store for Module<T: Trait> as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec<u8>; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option<BlockNumber>; ParentHash get(fn parent_hash): Hash; NewAuthorities get(fn new_authorities): Option<Vec<AuthorityId>>; NewChangesTrieConfig get(fn new_changes_trie_config): Option<Option<ChangesTrieConfiguration>>; StorageDigest get(fn storage_digest): Option<Digest>; Authorities get(fn authorities) config(): Vec<AuthorityId>; } } pub fn balance_of_key(who: AccountId) -> Vec<u8> { who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) } pub fn initialize_block(header: &Header) { // populate environment. <Number>::put(&header.number); <ParentHash>::put(&header.parent_hash); <StorageDigest>::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest // so that it'll be included in execution proof if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { let _: Option<u32> = storage::unhashed::get(&v); } } pub fn authorities() -> Vec<AuthorityId> { Authorities::get() } pub fn get_block_number() -> Option<BlockNumber> { Number::get() } pub fn take_block_number() -> Option<BlockNumber> { Number::take() } #[derive(Copy, Clone)] enum
{ Verify, Overwrite, } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } pub fn execute_block(mut block: Block) { execute_block_with_state_root_handler(&mut block, Mode::Verify); } fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { let header = &mut block.header; initialize_block(header); // execute transactions block.extrinsics.iter().for_each(|e| { let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); }); let new_header = finalize_block(); if let Mode::Overwrite = mode { header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); assert!( new_header.state_root == header.state_root, "Storage root must match that calculated.", ); } if let Mode::Overwrite = mode { header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); assert!( new_header.extrinsics_root == header.extrinsics_root, "Transaction trie root must be valid.", ); } } /// The block executor. pub struct BlockExecutor; impl frame_executive::ExecuteBlock<Block> for BlockExecutor { fn execute_block(block: Block) { execute_block(block); } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); let requires = if tx.nonce != expected_nonce && tx.nonce > 0 { vec![encode(&tx.from, tx.nonce - 1)] } else { vec![] }; let provides = vec![encode(&tx.from, tx.nonce)]; Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } /// Finalize the block. pub fn finalize_block() -> Header { let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); let number = <Number>::take().expect("Number is set by `initialize_block`"); let parent_hash = <ParentHash>::take(); let mut digest = <StorageDigest>::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = <NewAuthorities>::take(); let new_changes_trie_config = <NewChangesTrieConfig>::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); if let Some(storage_changes_root) = storage_changes_root { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); } if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( generic::ChangesTrieSignal::NewConfiguration(new_config), )); } Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { use sp_runtime::traits::BlindCheckable; utx.clone().check().map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) } fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => Err(InvalidTransaction::ExhaustsResources.into()), Extrinsic::Transfer { ref transfer, .. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), } } fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if !(tx.nonce == expected_nonce) { return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); // check sender balance let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); // enact transfer if !(tx.amount <= from_balance) { return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); Ok(Ok(())) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) } fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicResult { match value { Some(value) => storage::unhashed::put_raw(key, value), None => storage::unhashed::kill(key), } Ok(Ok(())) } fn execute_changes_trie_config_update( new_config: Option<ChangesTrieConfiguration>, ) -> ApplyExtrinsicResult { match new_config.clone() { Some(new_config) => storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } <NewChangesTrieConfig>::put(new_config); Ok(Ok(())) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; if given != expected { println!( "Hash: given={}, expected={}", HexDisplay::from(given.as_fixed_bytes()), HexDisplay::from(expected.as_fixed_bytes()), ); } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { if given != expected { sp_runtime::print("Hash not equal"); sp_runtime::print(given.as_bytes()); sp_runtime::print(expected.as_bytes()); } } #[cfg(test)] mod tests { use super::*; use crate::{wasm_binary_unwrap, Header, Transfer}; use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, NeverNativeValue, }; use sp_io::hashing::twox_128; use sp_io::TestExternalities; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; // Declare an instance of the native executor dispatch for the test runtime. native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); fn executor() -> NativeExecutor<NativeDispatch> { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } fn new_test_ext() -> TestExternalities { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), sp_core::storage::Storage { top: map![ twox_128(b"latest").to_vec() => vec![69u8; 32], twox_128(b"sys:auth").to_vec() => authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], children_default: map![], }, ) } fn block_import_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let h = Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }; let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); block_executor(b, &mut new_test_ext()); } #[test] fn block_import_works_native() { block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_works_wasm() { block_import_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } fn block_import_with_transaction_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 69, nonce: 0, } .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); dummy_ext.execute_with(|| polish_block(&mut b1)); let mut b2 = Block { header: Header { parent_hash: b1.header.hash(), number: 2, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![ Transfer { from: AccountKeyring::Bob.into(), to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, } .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, } .into_signed_tx(), ], }; dummy_ext.execute_with(|| polish_block(&mut b2)); drop(dummy_ext); let mut t = new_test_ext(); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); }); block_executor(b1, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); }); block_executor(b2, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); }); } #[test] fn block_import_with_transaction_works_native() { block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_with_transaction_works_wasm() { block_import_with_transaction_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } }
Mode
identifier_name
system.rs
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. use crate::{ AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; use frame_support::storage; use frame_support::{decl_module, decl_storage}; use frame_system::Trait; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ hashing::blake2_256, storage::changes_root as storage_changes_root, storage::root as storage_root, trie, }; use sp_runtime::{ generic, traits::Header as _, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, ApplyExtrinsicResult, }; use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin {} } decl_storage! { trait Store for Module<T: Trait> as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec<u8>; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option<BlockNumber>; ParentHash get(fn parent_hash): Hash;
} } pub fn balance_of_key(who: AccountId) -> Vec<u8> { who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) } pub fn initialize_block(header: &Header) { // populate environment. <Number>::put(&header.number); <ParentHash>::put(&header.parent_hash); <StorageDigest>::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest // so that it'll be included in execution proof if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { let _: Option<u32> = storage::unhashed::get(&v); } } pub fn authorities() -> Vec<AuthorityId> { Authorities::get() } pub fn get_block_number() -> Option<BlockNumber> { Number::get() } pub fn take_block_number() -> Option<BlockNumber> { Number::take() } #[derive(Copy, Clone)] enum Mode { Verify, Overwrite, } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } pub fn execute_block(mut block: Block) { execute_block_with_state_root_handler(&mut block, Mode::Verify); } fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { let header = &mut block.header; initialize_block(header); // execute transactions block.extrinsics.iter().for_each(|e| { let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); }); let new_header = finalize_block(); if let Mode::Overwrite = mode { header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); assert!( new_header.state_root == header.state_root, "Storage root must match that calculated.", ); } if let Mode::Overwrite = mode { header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); assert!( new_header.extrinsics_root == header.extrinsics_root, "Transaction trie root must be valid.", ); } } /// The block executor. pub struct BlockExecutor; impl frame_executive::ExecuteBlock<Block> for BlockExecutor { fn execute_block(block: Block) { execute_block(block); } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); let requires = if tx.nonce != expected_nonce && tx.nonce > 0 { vec![encode(&tx.from, tx.nonce - 1)] } else { vec![] }; let provides = vec![encode(&tx.from, tx.nonce)]; Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } /// Finalize the block. pub fn finalize_block() -> Header { let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); let number = <Number>::take().expect("Number is set by `initialize_block`"); let parent_hash = <ParentHash>::take(); let mut digest = <StorageDigest>::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = <NewAuthorities>::take(); let new_changes_trie_config = <NewChangesTrieConfig>::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); if let Some(storage_changes_root) = storage_changes_root { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); } if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( generic::ChangesTrieSignal::NewConfiguration(new_config), )); } Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { use sp_runtime::traits::BlindCheckable; utx.clone().check().map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) } fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => Err(InvalidTransaction::ExhaustsResources.into()), Extrinsic::Transfer { ref transfer, .. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), } } fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if !(tx.nonce == expected_nonce) { return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); // check sender balance let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); // enact transfer if !(tx.amount <= from_balance) { return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); Ok(Ok(())) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) } fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicResult { match value { Some(value) => storage::unhashed::put_raw(key, value), None => storage::unhashed::kill(key), } Ok(Ok(())) } fn execute_changes_trie_config_update( new_config: Option<ChangesTrieConfiguration>, ) -> ApplyExtrinsicResult { match new_config.clone() { Some(new_config) => storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } <NewChangesTrieConfig>::put(new_config); Ok(Ok(())) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; if given != expected { println!( "Hash: given={}, expected={}", HexDisplay::from(given.as_fixed_bytes()), HexDisplay::from(expected.as_fixed_bytes()), ); } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { if given != expected { sp_runtime::print("Hash not equal"); sp_runtime::print(given.as_bytes()); sp_runtime::print(expected.as_bytes()); } } #[cfg(test)] mod tests { use super::*; use crate::{wasm_binary_unwrap, Header, Transfer}; use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, NeverNativeValue, }; use sp_io::hashing::twox_128; use sp_io::TestExternalities; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; // Declare an instance of the native executor dispatch for the test runtime. native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); fn executor() -> NativeExecutor<NativeDispatch> { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } fn new_test_ext() -> TestExternalities { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), sp_core::storage::Storage { top: map![ twox_128(b"latest").to_vec() => vec![69u8; 32], twox_128(b"sys:auth").to_vec() => authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], children_default: map![], }, ) } fn block_import_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let h = Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }; let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); block_executor(b, &mut new_test_ext()); } #[test] fn block_import_works_native() { block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_works_wasm() { block_import_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } fn block_import_with_transaction_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 69, nonce: 0, } .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); dummy_ext.execute_with(|| polish_block(&mut b1)); let mut b2 = Block { header: Header { parent_hash: b1.header.hash(), number: 2, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![ Transfer { from: AccountKeyring::Bob.into(), to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, } .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, } .into_signed_tx(), ], }; dummy_ext.execute_with(|| polish_block(&mut b2)); drop(dummy_ext); let mut t = new_test_ext(); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); }); block_executor(b1, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); }); block_executor(b2, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); }); } #[test] fn block_import_with_transaction_works_native() { block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_with_transaction_works_wasm() { block_import_with_transaction_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } }
NewAuthorities get(fn new_authorities): Option<Vec<AuthorityId>>; NewChangesTrieConfig get(fn new_changes_trie_config): Option<Option<ChangesTrieConfiguration>>; StorageDigest get(fn storage_digest): Option<Digest>; Authorities get(fn authorities) config(): Vec<AuthorityId>;
random_line_split
main.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # from functions import * #import objects here import api import db import comm import stats from config import * from functions import * from traceback import format_tb as traceback_format def
(): cryptowatch_api.close() transaction_api.close() print("") print("-" * 15) print("closed Api connection") database.commit() database.close() print("closed db connection") print(time.ctime()) print("<---- QUITTING ---->") """ start cryptowatch - make an api object - make a db object - query api and insert into db in a loop """ stats_file_name = '/dev/shm/gdax.stats' communicator = comm.Comm('Trade_bot_test_run', 'email@example.com') #leaving as example #database = db.Bitmarket(DB_USER, DB_PASSWORD, dbname='bitmarket', dbhost='aruba') # database = db.Gdax(DBUSER, DBPASSWORD, dbname='gdax', dbhost='localhost') #cryptowatch_api = api.BitmarketPublic() #transaction_api = api.BitmarketPrivate(BITMARKET_KEYS['public'], BITMARKET_KEYS['private']) #cryptowatch_api.add_proxy(PROXIES["sirius"]["ip"], PROXIES["sirius"]["port"]) #transaction_api.add_proxy(PROXIES["sirius"]["ip"], PROXIES["sirius"]["port"]) statistics = stats.Stats(database) statistics_hourly = stats.Hourly(database) print("bringing trade table up to speed...") t0 = time.time() public_apis = (cryptowatch_api,) databases = (database,) calling_frequencies = (0.61,) results = catchup_multi(public_apis, databases, calling_frequencies, True) t1 = time.time() print("new trades pulled: ", results) print("that took", int(t1 - t0), "seconds") error_stats = {"otherErrors": 0, "noErrors": 0} imperative_table = {1: (0, 1), 2: (1, 0), 3: (1, 1), 4: (0, 0)} t0 = time.time() cancellation_trigger = 0 # these might be overridden by setmyvars() # imperative overrides algo signals # use it when a sell or buy is needed no matter the market situation # imperative values: 0 - auto; 1 - buy; 2- sell; 3 - both; 4 - none # auto trades normally - no imperative # buy and sell do only buying and selling respectively # both = janusz # none no trading at all, even if there are signals imperative = 0 exec_buy = 0 exec_sell = 0 trading_timeout = 240 activity_indicator = "" previous_strategy = current_strategy = "none" newline_timeout = 240 next_newline_time = time.time() orders_save_timeout = 30 next_orders_save_time = 0 bid_placement = 2 # zero-based ask_placement = 2 # zero-based while 100 > max_dict_val(error_stats): try: n = set_my_vars("settings.txt", globals()) trades = catchup_multi(public_apis, databases, calling_frequencies) if trades[tuple(trades.keys())[0]] > 0: activity_indicator = "+" else: activity_indicator = "." # cancel orders if it's time if time.time() > cancellation_trigger: cancellation_trigger = time.time() + trading_timeout cancellation_response = transaction_api.cancel_all_orders() activity_indicator += "x" if len(cancellation_response["success"]) > 0: database.delete_placed_orders(cancellation_response["success"]) # TODO: placed trade history pull here, and matching also here # end cancel orders try: orderbook = cryptowatch_api.get_order_book() database.put_turnaround(orderbook.as_rtt()) # only save orders once every N seconds if next_orders_save_time < time.time(): database.put_orders(orderbook.as_tuples(inner=300)) next_orders_save_time = time.time() + orders_save_timeout # Now place some orders # get the balances available_funds = transaction_api.get_balances() current_strategy = "janusz" exec_sell, exec_buy = 1, 1 # here we override algos to account for imperative settings if imperative in imperative_table: exec_sell, exec_buy = imperative_table[imperative] # calculate the bid and ask price # but if the calculated price makes us the taker, make the price so that we are makers bid_price = round( float(orderbook.get_one(bid_placement, "bids")[0]) - 0.01, 2 ) ask_price = round( float(orderbook.get_one(ask_placement, "asks")[0]) + 0.01, 2 ) if available_funds["BTC"] >= 0.001 and exec_sell == 1: resp = transaction_api.place_order( "BTCPLN", "sell", str(available_funds["BTC"]), ask_price ) if isinstance(resp, dict) and "success" in resp.keys(): activity_indicator += "s" # TODO: logging instead of printing # print('Order placed', resp['side'], resp['product_id'], resp['size'], 'at', resp['price']) database.put_placed_order( ( resp["order_params"]["order_id"], resp["order_params"]["market"], resp["order_params"]["type"], resp["order_params"]["amount"], resp["order_params"]["rate"], resp["time"], current_strategy, ) ) else: # activity_indicator = 'S' print("No luck selling!", resp) if available_funds["PLN"] > 10 and exec_buy == 1: amt = round((available_funds["PLN"] - 5) / float(bid_price), 6) resp = transaction_api.place_order("BTCPLN", "buy", str(amt), bid_price) if isinstance(resp, dict) and "success" in resp.keys(): activity_indicator += "b" # print('Order placed', resp['side'], resp['product_id'], resp['size'], 'at', resp['price']) database.put_placed_order( ( resp["order_params"]["order_id"], resp["order_params"]["market"], resp["order_params"]["type"], resp["order_params"]["amount"], resp["order_params"]["rate"], resp["time"], current_strategy, ) ) else: # activity_indicator = 'B' print("No luck buying!", resp) # finished placing trades if time.time() > next_newline_time: print("") print(time.ctime(), end=" ") next_newline_time += newline_timeout hi_bid = float(orderbook.get_one(0, "bids")[0]) my_balances = { "PLN": available_funds["PLN"], "BTC": available_funds["BTC"], "TOT": float(available_funds["PLN"]) + float(available_funds["BTC"]) * hi_bid, "calc_price_highest_bid": hi_bid, } save_stats( globals(), stats_file_name, "current_strategy", "ask_price", "bid_price", "my_balances", ) print(activity_indicator, sep="", end="", flush=True) del orderbook except api.data.EmptyData as err: database.put_error(err, int(time.time())) except api.pycurl.error as e: if e.args[0] not in error_stats.keys(): error_stats[e.args[0]] = 1 else: error_stats[e.args[0]] += 1 db_id = database.put_error(e, int(time.time())) print(e) print("error counts:", error_stats) if e.args[0] == 7 and e.args[1].split()[4] in [ p["ip"] for h, p in PROXIES.items() ]: print("PROXY REFUSED CONNECTION - SWITCHING TO OWN ADDRESS") cryptowatch_api.remove_proxy() transaction_api.remove_proxy() communicator.send_message( "Proxy at {} refused connection.\n switching to own IP \n".format( e.args[1].split()[4] ) ) elif e.args[0] in (6, 7): pass # wait for the network to get up maybe elif e.args[0] == 28 and error_stats[28] < 10: pass # don't send an email on an occasional timeout elif e.args[0] == 28 and error_stats[28] == 10: cryptowatch_api.reset_connection() transaction_api.reset_connection() communicator.send_message("Timeout occured. Reseting connections") else: preface = "pycurl" tb_str = "\n".join(traceback_format(e.__traceback__)) communicator.send_error( "{} error: {} occurred.\n Error stats are \n{}\nDB log ID is: {}\nHere is traceback: {}".format( preface, str(e), str(error_stats), str(db_id), tb_str ) ) except KeyboardInterrupt: wrap_up() exit(0) except Exception as e: print(e) db_id = database.put_error(e, int(time.time())) error_stats["otherErrors"] += 1 print("error counts:", error_stats) if isinstance(e, api.NotJSON): preface = "Not a JSON" else: preface = "some other" tb_str = "\n".join(traceback_format(e.__traceback__)) communicator.send_error( "{} error: {} occurred.\n Error stats are \n{}\nDB log ID is: {}\nHere is traceback: {}".format( preface, str(e), str(error_stats), str(db_id), tb_str ) ) else: error_stats["noErrors"] += 1 if 100 == error_stats["noErrors"]: error_stats = {"otherErrors": 0, "noErrors": 0, "NotJSON": 0} finally: try: database.commit() time.sleep(10) except KeyboardInterrupt: wrap_up() exit(1) msg = "Too many errors. Stopping script" print(msg) communicator.send_message(msg) wrap_up()
wrap_up
identifier_name
main.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # from functions import * #import objects here import api import db import comm import stats from config import * from functions import * from traceback import format_tb as traceback_format def wrap_up(): cryptowatch_api.close() transaction_api.close() print("") print("-" * 15) print("closed Api connection") database.commit() database.close() print("closed db connection") print(time.ctime()) print("<---- QUITTING ---->") """ start cryptowatch - make an api object - make a db object - query api and insert into db in a loop """ stats_file_name = '/dev/shm/gdax.stats' communicator = comm.Comm('Trade_bot_test_run', 'email@example.com') #leaving as example #database = db.Bitmarket(DB_USER, DB_PASSWORD, dbname='bitmarket', dbhost='aruba') # database = db.Gdax(DBUSER, DBPASSWORD, dbname='gdax', dbhost='localhost') #cryptowatch_api = api.BitmarketPublic() #transaction_api = api.BitmarketPrivate(BITMARKET_KEYS['public'], BITMARKET_KEYS['private']) #cryptowatch_api.add_proxy(PROXIES["sirius"]["ip"], PROXIES["sirius"]["port"]) #transaction_api.add_proxy(PROXIES["sirius"]["ip"], PROXIES["sirius"]["port"]) statistics = stats.Stats(database) statistics_hourly = stats.Hourly(database) print("bringing trade table up to speed...") t0 = time.time() public_apis = (cryptowatch_api,) databases = (database,) calling_frequencies = (0.61,) results = catchup_multi(public_apis, databases, calling_frequencies, True) t1 = time.time() print("new trades pulled: ", results) print("that took", int(t1 - t0), "seconds") error_stats = {"otherErrors": 0, "noErrors": 0} imperative_table = {1: (0, 1), 2: (1, 0), 3: (1, 1), 4: (0, 0)} t0 = time.time() cancellation_trigger = 0 # these might be overridden by setmyvars() # imperative overrides algo signals # use it when a sell or buy is needed no matter the market situation # imperative values: 0 - auto; 1 - buy; 2- sell; 3 - both; 4 - none # auto trades normally - no imperative # buy and sell do only buying and selling respectively # both = janusz # none no trading at all, even if there are signals imperative = 0 exec_buy = 0 exec_sell = 0 trading_timeout = 240 activity_indicator = "" previous_strategy = current_strategy = "none" newline_timeout = 240 next_newline_time = time.time() orders_save_timeout = 30 next_orders_save_time = 0 bid_placement = 2 # zero-based ask_placement = 2 # zero-based while 100 > max_dict_val(error_stats): try: n = set_my_vars("settings.txt", globals()) trades = catchup_multi(public_apis, databases, calling_frequencies) if trades[tuple(trades.keys())[0]] > 0: activity_indicator = "+" else: activity_indicator = "." # cancel orders if it's time if time.time() > cancellation_trigger: cancellation_trigger = time.time() + trading_timeout cancellation_response = transaction_api.cancel_all_orders() activity_indicator += "x" if len(cancellation_response["success"]) > 0: database.delete_placed_orders(cancellation_response["success"]) # TODO: placed trade history pull here, and matching also here # end cancel orders try: orderbook = cryptowatch_api.get_order_book() database.put_turnaround(orderbook.as_rtt()) # only save orders once every N seconds if next_orders_save_time < time.time(): database.put_orders(orderbook.as_tuples(inner=300)) next_orders_save_time = time.time() + orders_save_timeout # Now place some orders # get the balances available_funds = transaction_api.get_balances() current_strategy = "janusz" exec_sell, exec_buy = 1, 1 # here we override algos to account for imperative settings if imperative in imperative_table: exec_sell, exec_buy = imperative_table[imperative] # calculate the bid and ask price # but if the calculated price makes us the taker, make the price so that we are makers bid_price = round( float(orderbook.get_one(bid_placement, "bids")[0]) - 0.01, 2 ) ask_price = round( float(orderbook.get_one(ask_placement, "asks")[0]) + 0.01, 2 ) if available_funds["BTC"] >= 0.001 and exec_sell == 1: resp = transaction_api.place_order( "BTCPLN", "sell", str(available_funds["BTC"]), ask_price ) if isinstance(resp, dict) and "success" in resp.keys(): activity_indicator += "s" # TODO: logging instead of printing # print('Order placed', resp['side'], resp['product_id'], resp['size'], 'at', resp['price']) database.put_placed_order( ( resp["order_params"]["order_id"], resp["order_params"]["market"], resp["order_params"]["type"], resp["order_params"]["amount"], resp["order_params"]["rate"], resp["time"], current_strategy, ) ) else: # activity_indicator = 'S' print("No luck selling!", resp) if available_funds["PLN"] > 10 and exec_buy == 1: amt = round((available_funds["PLN"] - 5) / float(bid_price), 6) resp = transaction_api.place_order("BTCPLN", "buy", str(amt), bid_price) if isinstance(resp, dict) and "success" in resp.keys(): activity_indicator += "b" # print('Order placed', resp['side'], resp['product_id'], resp['size'], 'at', resp['price']) database.put_placed_order( ( resp["order_params"]["order_id"], resp["order_params"]["market"], resp["order_params"]["type"], resp["order_params"]["amount"], resp["order_params"]["rate"], resp["time"], current_strategy, ) ) else: # activity_indicator = 'B' print("No luck buying!", resp) # finished placing trades if time.time() > next_newline_time: print("") print(time.ctime(), end=" ") next_newline_time += newline_timeout hi_bid = float(orderbook.get_one(0, "bids")[0]) my_balances = { "PLN": available_funds["PLN"], "BTC": available_funds["BTC"], "TOT": float(available_funds["PLN"]) + float(available_funds["BTC"]) * hi_bid, "calc_price_highest_bid": hi_bid, } save_stats( globals(), stats_file_name, "current_strategy", "ask_price", "bid_price", "my_balances", ) print(activity_indicator, sep="", end="", flush=True) del orderbook except api.data.EmptyData as err: database.put_error(err, int(time.time())) except api.pycurl.error as e: if e.args[0] not in error_stats.keys(): error_stats[e.args[0]] = 1 else: error_stats[e.args[0]] += 1 db_id = database.put_error(e, int(time.time())) print(e) print("error counts:", error_stats) if e.args[0] == 7 and e.args[1].split()[4] in [ p["ip"] for h, p in PROXIES.items() ]: print("PROXY REFUSED CONNECTION - SWITCHING TO OWN ADDRESS") cryptowatch_api.remove_proxy() transaction_api.remove_proxy() communicator.send_message( "Proxy at {} refused connection.\n switching to own IP \n".format( e.args[1].split()[4] ) ) elif e.args[0] in (6, 7):
elif e.args[0] == 28 and error_stats[28] < 10: pass # don't send an email on an occasional timeout elif e.args[0] == 28 and error_stats[28] == 10: cryptowatch_api.reset_connection() transaction_api.reset_connection() communicator.send_message("Timeout occured. Reseting connections") else: preface = "pycurl" tb_str = "\n".join(traceback_format(e.__traceback__)) communicator.send_error( "{} error: {} occurred.\n Error stats are \n{}\nDB log ID is: {}\nHere is traceback: {}".format( preface, str(e), str(error_stats), str(db_id), tb_str ) ) except KeyboardInterrupt: wrap_up() exit(0) except Exception as e: print(e) db_id = database.put_error(e, int(time.time())) error_stats["otherErrors"] += 1 print("error counts:", error_stats) if isinstance(e, api.NotJSON): preface = "Not a JSON" else: preface = "some other" tb_str = "\n".join(traceback_format(e.__traceback__)) communicator.send_error( "{} error: {} occurred.\n Error stats are \n{}\nDB log ID is: {}\nHere is traceback: {}".format( preface, str(e), str(error_stats), str(db_id), tb_str ) ) else: error_stats["noErrors"] += 1 if 100 == error_stats["noErrors"]: error_stats = {"otherErrors": 0, "noErrors": 0, "NotJSON": 0} finally: try: database.commit() time.sleep(10) except KeyboardInterrupt: wrap_up() exit(1) msg = "Too many errors. Stopping script" print(msg) communicator.send_message(msg) wrap_up()
pass # wait for the network to get up maybe
conditional_block
main.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # from functions import * #import objects here import api import db import comm import stats from config import * from functions import * from traceback import format_tb as traceback_format def wrap_up(): cryptowatch_api.close() transaction_api.close() print("") print("-" * 15) print("closed Api connection") database.commit() database.close() print("closed db connection") print(time.ctime()) print("<---- QUITTING ---->") """ start cryptowatch - make an api object - make a db object - query api and insert into db in a loop """ stats_file_name = '/dev/shm/gdax.stats' communicator = comm.Comm('Trade_bot_test_run', 'email@example.com') #leaving as example #database = db.Bitmarket(DB_USER, DB_PASSWORD, dbname='bitmarket', dbhost='aruba') # database = db.Gdax(DBUSER, DBPASSWORD, dbname='gdax', dbhost='localhost') #cryptowatch_api = api.BitmarketPublic() #transaction_api = api.BitmarketPrivate(BITMARKET_KEYS['public'], BITMARKET_KEYS['private']) #cryptowatch_api.add_proxy(PROXIES["sirius"]["ip"], PROXIES["sirius"]["port"]) #transaction_api.add_proxy(PROXIES["sirius"]["ip"], PROXIES["sirius"]["port"]) statistics = stats.Stats(database) statistics_hourly = stats.Hourly(database) print("bringing trade table up to speed...") t0 = time.time() public_apis = (cryptowatch_api,) databases = (database,) calling_frequencies = (0.61,) results = catchup_multi(public_apis, databases, calling_frequencies, True) t1 = time.time() print("new trades pulled: ", results) print("that took", int(t1 - t0), "seconds") error_stats = {"otherErrors": 0, "noErrors": 0} imperative_table = {1: (0, 1), 2: (1, 0), 3: (1, 1), 4: (0, 0)} t0 = time.time() cancellation_trigger = 0 # these might be overridden by setmyvars() # imperative overrides algo signals # use it when a sell or buy is needed no matter the market situation # imperative values: 0 - auto; 1 - buy; 2- sell; 3 - both; 4 - none # auto trades normally - no imperative # buy and sell do only buying and selling respectively # both = janusz # none no trading at all, even if there are signals imperative = 0 exec_buy = 0 exec_sell = 0 trading_timeout = 240 activity_indicator = "" previous_strategy = current_strategy = "none" newline_timeout = 240 next_newline_time = time.time() orders_save_timeout = 30 next_orders_save_time = 0 bid_placement = 2 # zero-based ask_placement = 2 # zero-based while 100 > max_dict_val(error_stats): try: n = set_my_vars("settings.txt", globals()) trades = catchup_multi(public_apis, databases, calling_frequencies) if trades[tuple(trades.keys())[0]] > 0: activity_indicator = "+" else: activity_indicator = "." # cancel orders if it's time if time.time() > cancellation_trigger: cancellation_trigger = time.time() + trading_timeout cancellation_response = transaction_api.cancel_all_orders() activity_indicator += "x" if len(cancellation_response["success"]) > 0: database.delete_placed_orders(cancellation_response["success"]) # TODO: placed trade history pull here, and matching also here # end cancel orders try: orderbook = cryptowatch_api.get_order_book() database.put_turnaround(orderbook.as_rtt()) # only save orders once every N seconds if next_orders_save_time < time.time(): database.put_orders(orderbook.as_tuples(inner=300)) next_orders_save_time = time.time() + orders_save_timeout # Now place some orders # get the balances available_funds = transaction_api.get_balances() current_strategy = "janusz" exec_sell, exec_buy = 1, 1 # here we override algos to account for imperative settings if imperative in imperative_table: exec_sell, exec_buy = imperative_table[imperative] # calculate the bid and ask price # but if the calculated price makes us the taker, make the price so that we are makers bid_price = round( float(orderbook.get_one(bid_placement, "bids")[0]) - 0.01, 2 ) ask_price = round( float(orderbook.get_one(ask_placement, "asks")[0]) + 0.01, 2 ) if available_funds["BTC"] >= 0.001 and exec_sell == 1: resp = transaction_api.place_order( "BTCPLN", "sell", str(available_funds["BTC"]), ask_price ) if isinstance(resp, dict) and "success" in resp.keys(): activity_indicator += "s" # TODO: logging instead of printing # print('Order placed', resp['side'], resp['product_id'], resp['size'], 'at', resp['price']) database.put_placed_order( ( resp["order_params"]["order_id"], resp["order_params"]["market"], resp["order_params"]["type"], resp["order_params"]["amount"], resp["order_params"]["rate"], resp["time"], current_strategy, ) ) else: # activity_indicator = 'S' print("No luck selling!", resp) if available_funds["PLN"] > 10 and exec_buy == 1: amt = round((available_funds["PLN"] - 5) / float(bid_price), 6) resp = transaction_api.place_order("BTCPLN", "buy", str(amt), bid_price) if isinstance(resp, dict) and "success" in resp.keys(): activity_indicator += "b" # print('Order placed', resp['side'], resp['product_id'], resp['size'], 'at', resp['price']) database.put_placed_order( ( resp["order_params"]["order_id"], resp["order_params"]["market"], resp["order_params"]["type"], resp["order_params"]["amount"], resp["order_params"]["rate"], resp["time"], current_strategy, ) ) else: # activity_indicator = 'B' print("No luck buying!", resp) # finished placing trades if time.time() > next_newline_time: print("") print(time.ctime(), end=" ") next_newline_time += newline_timeout hi_bid = float(orderbook.get_one(0, "bids")[0]) my_balances = { "PLN": available_funds["PLN"], "BTC": available_funds["BTC"], "TOT": float(available_funds["PLN"]) + float(available_funds["BTC"]) * hi_bid, "calc_price_highest_bid": hi_bid, } save_stats( globals(), stats_file_name, "current_strategy", "ask_price", "bid_price", "my_balances", ) print(activity_indicator, sep="", end="", flush=True) del orderbook except api.data.EmptyData as err: database.put_error(err, int(time.time())) except api.pycurl.error as e:
error_stats[e.args[0]] = 1 else: error_stats[e.args[0]] += 1 db_id = database.put_error(e, int(time.time())) print(e) print("error counts:", error_stats) if e.args[0] == 7 and e.args[1].split()[4] in [ p["ip"] for h, p in PROXIES.items() ]: print("PROXY REFUSED CONNECTION - SWITCHING TO OWN ADDRESS") cryptowatch_api.remove_proxy() transaction_api.remove_proxy() communicator.send_message( "Proxy at {} refused connection.\n switching to own IP \n".format( e.args[1].split()[4] ) ) elif e.args[0] in (6, 7): pass # wait for the network to get up maybe elif e.args[0] == 28 and error_stats[28] < 10: pass # don't send an email on an occasional timeout elif e.args[0] == 28 and error_stats[28] == 10: cryptowatch_api.reset_connection() transaction_api.reset_connection() communicator.send_message("Timeout occured. Reseting connections") else: preface = "pycurl" tb_str = "\n".join(traceback_format(e.__traceback__)) communicator.send_error( "{} error: {} occurred.\n Error stats are \n{}\nDB log ID is: {}\nHere is traceback: {}".format( preface, str(e), str(error_stats), str(db_id), tb_str ) ) except KeyboardInterrupt: wrap_up() exit(0) except Exception as e: print(e) db_id = database.put_error(e, int(time.time())) error_stats["otherErrors"] += 1 print("error counts:", error_stats) if isinstance(e, api.NotJSON): preface = "Not a JSON" else: preface = "some other" tb_str = "\n".join(traceback_format(e.__traceback__)) communicator.send_error( "{} error: {} occurred.\n Error stats are \n{}\nDB log ID is: {}\nHere is traceback: {}".format( preface, str(e), str(error_stats), str(db_id), tb_str ) ) else: error_stats["noErrors"] += 1 if 100 == error_stats["noErrors"]: error_stats = {"otherErrors": 0, "noErrors": 0, "NotJSON": 0} finally: try: database.commit() time.sleep(10) except KeyboardInterrupt: wrap_up() exit(1) msg = "Too many errors. Stopping script" print(msg) communicator.send_message(msg) wrap_up()
if e.args[0] not in error_stats.keys():
random_line_split
main.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # from functions import * #import objects here import api import db import comm import stats from config import * from functions import * from traceback import format_tb as traceback_format def wrap_up():
""" start cryptowatch - make an api object - make a db object - query api and insert into db in a loop """ stats_file_name = '/dev/shm/gdax.stats' communicator = comm.Comm('Trade_bot_test_run', 'email@example.com') #leaving as example #database = db.Bitmarket(DB_USER, DB_PASSWORD, dbname='bitmarket', dbhost='aruba') # database = db.Gdax(DBUSER, DBPASSWORD, dbname='gdax', dbhost='localhost') #cryptowatch_api = api.BitmarketPublic() #transaction_api = api.BitmarketPrivate(BITMARKET_KEYS['public'], BITMARKET_KEYS['private']) #cryptowatch_api.add_proxy(PROXIES["sirius"]["ip"], PROXIES["sirius"]["port"]) #transaction_api.add_proxy(PROXIES["sirius"]["ip"], PROXIES["sirius"]["port"]) statistics = stats.Stats(database) statistics_hourly = stats.Hourly(database) print("bringing trade table up to speed...") t0 = time.time() public_apis = (cryptowatch_api,) databases = (database,) calling_frequencies = (0.61,) results = catchup_multi(public_apis, databases, calling_frequencies, True) t1 = time.time() print("new trades pulled: ", results) print("that took", int(t1 - t0), "seconds") error_stats = {"otherErrors": 0, "noErrors": 0} imperative_table = {1: (0, 1), 2: (1, 0), 3: (1, 1), 4: (0, 0)} t0 = time.time() cancellation_trigger = 0 # these might be overridden by setmyvars() # imperative overrides algo signals # use it when a sell or buy is needed no matter the market situation # imperative values: 0 - auto; 1 - buy; 2- sell; 3 - both; 4 - none # auto trades normally - no imperative # buy and sell do only buying and selling respectively # both = janusz # none no trading at all, even if there are signals imperative = 0 exec_buy = 0 exec_sell = 0 trading_timeout = 240 activity_indicator = "" previous_strategy = current_strategy = "none" newline_timeout = 240 next_newline_time = time.time() orders_save_timeout = 30 next_orders_save_time = 0 bid_placement = 2 # zero-based ask_placement = 2 # zero-based while 100 > max_dict_val(error_stats): try: n = set_my_vars("settings.txt", globals()) trades = catchup_multi(public_apis, databases, calling_frequencies) if trades[tuple(trades.keys())[0]] > 0: activity_indicator = "+" else: activity_indicator = "." # cancel orders if it's time if time.time() > cancellation_trigger: cancellation_trigger = time.time() + trading_timeout cancellation_response = transaction_api.cancel_all_orders() activity_indicator += "x" if len(cancellation_response["success"]) > 0: database.delete_placed_orders(cancellation_response["success"]) # TODO: placed trade history pull here, and matching also here # end cancel orders try: orderbook = cryptowatch_api.get_order_book() database.put_turnaround(orderbook.as_rtt()) # only save orders once every N seconds if next_orders_save_time < time.time(): database.put_orders(orderbook.as_tuples(inner=300)) next_orders_save_time = time.time() + orders_save_timeout # Now place some orders # get the balances available_funds = transaction_api.get_balances() current_strategy = "janusz" exec_sell, exec_buy = 1, 1 # here we override algos to account for imperative settings if imperative in imperative_table: exec_sell, exec_buy = imperative_table[imperative] # calculate the bid and ask price # but if the calculated price makes us the taker, make the price so that we are makers bid_price = round( float(orderbook.get_one(bid_placement, "bids")[0]) - 0.01, 2 ) ask_price = round( float(orderbook.get_one(ask_placement, "asks")[0]) + 0.01, 2 ) if available_funds["BTC"] >= 0.001 and exec_sell == 1: resp = transaction_api.place_order( "BTCPLN", "sell", str(available_funds["BTC"]), ask_price ) if isinstance(resp, dict) and "success" in resp.keys(): activity_indicator += "s" # TODO: logging instead of printing # print('Order placed', resp['side'], resp['product_id'], resp['size'], 'at', resp['price']) database.put_placed_order( ( resp["order_params"]["order_id"], resp["order_params"]["market"], resp["order_params"]["type"], resp["order_params"]["amount"], resp["order_params"]["rate"], resp["time"], current_strategy, ) ) else: # activity_indicator = 'S' print("No luck selling!", resp) if available_funds["PLN"] > 10 and exec_buy == 1: amt = round((available_funds["PLN"] - 5) / float(bid_price), 6) resp = transaction_api.place_order("BTCPLN", "buy", str(amt), bid_price) if isinstance(resp, dict) and "success" in resp.keys(): activity_indicator += "b" # print('Order placed', resp['side'], resp['product_id'], resp['size'], 'at', resp['price']) database.put_placed_order( ( resp["order_params"]["order_id"], resp["order_params"]["market"], resp["order_params"]["type"], resp["order_params"]["amount"], resp["order_params"]["rate"], resp["time"], current_strategy, ) ) else: # activity_indicator = 'B' print("No luck buying!", resp) # finished placing trades if time.time() > next_newline_time: print("") print(time.ctime(), end=" ") next_newline_time += newline_timeout hi_bid = float(orderbook.get_one(0, "bids")[0]) my_balances = { "PLN": available_funds["PLN"], "BTC": available_funds["BTC"], "TOT": float(available_funds["PLN"]) + float(available_funds["BTC"]) * hi_bid, "calc_price_highest_bid": hi_bid, } save_stats( globals(), stats_file_name, "current_strategy", "ask_price", "bid_price", "my_balances", ) print(activity_indicator, sep="", end="", flush=True) del orderbook except api.data.EmptyData as err: database.put_error(err, int(time.time())) except api.pycurl.error as e: if e.args[0] not in error_stats.keys(): error_stats[e.args[0]] = 1 else: error_stats[e.args[0]] += 1 db_id = database.put_error(e, int(time.time())) print(e) print("error counts:", error_stats) if e.args[0] == 7 and e.args[1].split()[4] in [ p["ip"] for h, p in PROXIES.items() ]: print("PROXY REFUSED CONNECTION - SWITCHING TO OWN ADDRESS") cryptowatch_api.remove_proxy() transaction_api.remove_proxy() communicator.send_message( "Proxy at {} refused connection.\n switching to own IP \n".format( e.args[1].split()[4] ) ) elif e.args[0] in (6, 7): pass # wait for the network to get up maybe elif e.args[0] == 28 and error_stats[28] < 10: pass # don't send an email on an occasional timeout elif e.args[0] == 28 and error_stats[28] == 10: cryptowatch_api.reset_connection() transaction_api.reset_connection() communicator.send_message("Timeout occured. Reseting connections") else: preface = "pycurl" tb_str = "\n".join(traceback_format(e.__traceback__)) communicator.send_error( "{} error: {} occurred.\n Error stats are \n{}\nDB log ID is: {}\nHere is traceback: {}".format( preface, str(e), str(error_stats), str(db_id), tb_str ) ) except KeyboardInterrupt: wrap_up() exit(0) except Exception as e: print(e) db_id = database.put_error(e, int(time.time())) error_stats["otherErrors"] += 1 print("error counts:", error_stats) if isinstance(e, api.NotJSON): preface = "Not a JSON" else: preface = "some other" tb_str = "\n".join(traceback_format(e.__traceback__)) communicator.send_error( "{} error: {} occurred.\n Error stats are \n{}\nDB log ID is: {}\nHere is traceback: {}".format( preface, str(e), str(error_stats), str(db_id), tb_str ) ) else: error_stats["noErrors"] += 1 if 100 == error_stats["noErrors"]: error_stats = {"otherErrors": 0, "noErrors": 0, "NotJSON": 0} finally: try: database.commit() time.sleep(10) except KeyboardInterrupt: wrap_up() exit(1) msg = "Too many errors. Stopping script" print(msg) communicator.send_message(msg) wrap_up()
cryptowatch_api.close() transaction_api.close() print("") print("-" * 15) print("closed Api connection") database.commit() database.close() print("closed db connection") print(time.ctime()) print("<---- QUITTING ---->")
identifier_body
delaunay_triangulation.rs
/*! The following code has been modified from the original delaunator-rs project: https://github.com/mourner/delaunator-rs For a description of the data structure, including the halfedge connectivity, see: https://mapbox.github.io/delaunator/ # Description A very fast 2D [Delaunay Triangulation](https://en.wikipedia.org/wiki/Delaunay_triangulation) library for Rust. A port of [Delaunator](https://github.com/mapbox/delaunator). A triangle edge may be shared with another triangle. Instead of thinking about each edge A↔︎B, we will use two half-edges A→B and B→A. Having two half-edges is the key to everything this library provides. Half-edges e are the indices into both of delaunator’s outputs: delaunay.triangles[e] returns the point id where the half-edge starts delaunay.halfedges[e] returns the opposite half-edge in the adjacent triangle, or -1 if there is no adjacent triangle Triangle ids and half-edge ids are related. The half-edges of triangle t are 3*t, 3*t + 1, and 3*t + 2. The triangle of half-edge id e is floor(e/3 # Example ```rust use delaunator::triangulate; use structures::Point2D let points = vec![ Point2D { x: 0., y: 0. }, Point2D { x: 1., y: 0. }, Point2D { x: 1., y: 1. }, Point2D { x: 0., y: 1. }, ]; let result = triangulate(&points).expect("No triangulation exists."); println!("{:?}", result.triangles); // [0, 2, 1, 0, 3, 2] ``` */ use crate::structures::Point2D; use std::collections::HashSet; use std::f64; /// Represents the area outside of the triangulation. /// Halfedges on the convex hull (which don't have an adjacent halfedge) /// will have this value. pub const EMPTY: usize = usize::max_value(); /// A data structure used to perform Delaunay triangulation on /// a set of input vector points. Connectivity between points, /// triangles, and halfedges is as follows: /// /// - edge → edges: next_halfedge, prevHalfedge, halfedges[] /// - edge → points: triangles[] /// - edge → triangle: triangle_of_edge /// - triangle → edges: edges_of_triangle /// - triangle → points: points_of_triangle /// - triangle → triangles: triangles_adjacent_to_triangle /// - point → incoming edges: edges_around_point /// - point → outgoing edges: edges_around_point + halfedge[] /// - point → points: edges_around_point + triangles[] /// - point → triangles: edges_around_point + triangle_of_edge pub struct Triangulation { /// A vector of point indices where each triple represents a Delaunay triangle. /// All triangles are directed counter-clockwise. pub triangles: Vec<usize>, /// A vector of adjacent halfedge indices that allows traversing the triangulation graph. /// /// `i`-th half-edge in the array corresponds to vertex `triangles[i]` /// the half-edge is coming from. `halfedges[i]` is the index of a twin half-edge /// in an adjacent triangle (or `EMPTY` for outer half-edges on the convex hull). pub halfedges: Vec<usize>, /// A vector of indices that reference points on the convex hull of the triangulation, /// counter-clockwise. pub hull: Vec<usize>, } impl Triangulation { /// Constructs a new *Triangulation*. fn new(n: usize) -> Self { let max_triangles = 2 * n - 5; Self { triangles: Vec::with_capacity(max_triangles * 3), halfedges: Vec::with_capacity(max_triangles * 3), hull: Vec::new(), } } /// The number of triangles in the triangulation. pub fn len(&self) -> usize { self.triangles.len() / 3 } /// Next halfedge in a triangle. pub fn next_halfedge(&self, edge: usize) -> usize { if edge % 3 == 2 { edge - 2 } else { edge + 1 } } /// Previous halfedge in a triangle. pub fn prev_halfedge(&self, edge: usize) -> usize { if edge % 3 == 0 { edge + 2 } else { edge - 1 } } /// Returns the triangle of an edge. pub fn triangle_of_edge(&self, edge: usize) -> usize { edge / 3 } /// Returns the edges of a triangle. pub fn edges_of_triangle(&self, triangle: usize) -> [usize; 3] { [3 * triangle, 3 * triangle + 1, 3 * triangle + 2] } /// Returns the points of a triangle. pub fn points_of_triangle(&self, triangle: usize) -> [usize; 3] { // self.edges_of_tri
r. pub fn triangle_center(&self, points: &[Point2D], triangle: usize) -> Point2D { let p = self.points_of_triangle(triangle); points[p[0]].circumcenter(&points[p[1]], &points[p[2]]) } /// Returns the edges around a point connected to halfedge '*start*'. pub fn edges_around_point(&self, start: usize) -> Vec<usize> { let mut result = vec![]; let mut incoming = start; let mut outgoing: usize; // let mut i = 0; loop { if result.contains(&incoming) { break; } result.push(incoming); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { result.push(incoming); break; } // i += 1; // if i > 100 { // // println!("{} {} {}", outgoing, incoming, start); // break; // } } result } pub fn natural_neighbours_from_incoming_edge(&self, start: usize) -> Vec<usize> { let mut result = vec![]; //result.push(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { result.push(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } result } pub fn natural_neighbours_2nd_order(&self, start: usize) -> Vec<usize> { let mut set = HashSet::new(); let mut edges = vec![]; // result.push(self.triangles[self.next_halfedge(start)]); // set.insert(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; edges.push(outgoing); if incoming == EMPTY { break; } else if incoming == start { break; } } for start in edges { incoming = start; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } } set.into_iter().map(|i| i).collect() } /// Returns the indices of the adjacent triangles to a triangle. pub fn triangles_adjacent_to_triangle(&self, triangle: usize) -> Vec<usize> { let mut adjacent_triangles: Vec<usize> = vec![]; let mut opposite: usize; for e in self.edges_of_triangle(triangle).iter() { opposite = self.halfedges[*e]; if opposite != EMPTY { adjacent_triangles.push(self.triangle_of_edge(opposite)); } } adjacent_triangles } fn add_triangle( &mut self, i0: usize, i1: usize, i2: usize, a: usize, b: usize, c: usize, ) -> usize { let t = self.triangles.len(); self.triangles.push(i0); self.triangles.push(i1); self.triangles.push(i2); self.halfedges.push(a); self.halfedges.push(b); self.halfedges.push(c); if a != EMPTY { self.halfedges[a] = t; } if b != EMPTY { self.halfedges[b] = t + 1; } if c != EMPTY { self.halfedges[c] = t + 2; } t } fn legalize(&mut self, a: usize, points: &[Point2D], hull: &mut Hull) -> usize { let b = self.halfedges[a]; // if the pair of triangles doesn't satisfy the Delaunay condition // (p1 is inside the circumcircle of [p0, pl, pr]), flip them, // then do the same check/flip recursively for the new pair of triangles // // pl pl // /||\ / \ // al/ || \bl al/ \a // / || \ / \ // / a||b \ flip /___ar___\ // p0\ || /p1 => p0\---bl---/p1 // \ || / \ / // ar\ || /br b\ /br // \||/ \ / // pr pr // let ar = self.prev_halfedge(a); if b == EMPTY { return ar; } let al = self.next_halfedge(a); let bl = self.prev_halfedge(b); let p0 = self.triangles[ar]; let pr = self.triangles[a]; let pl = self.triangles[al]; let p1 = self.triangles[bl]; let illegal = (&points[p0]).in_circle(&points[pr], &points[pl], &points[p1]); if illegal { self.triangles[a] = p1; self.triangles[b] = p0; let hbl = self.halfedges[bl]; let har = self.halfedges[ar]; // edge swapped on the other side of the hull (rare); fix the halfedge reference if hbl == EMPTY { let mut e = hull.start; loop { if hull.tri[e] == bl { hull.tri[e] = a; break; } e = hull.next[e]; if e == hull.start || e == EMPTY { // notice, I added the || e == EMPTY after // finding a bug. I don't know about this. break; } } } self.halfedges[a] = hbl; self.halfedges[b] = har; self.halfedges[ar] = bl; if hbl != EMPTY { self.halfedges[hbl] = a; } if har != EMPTY { self.halfedges[har] = b; } if bl != EMPTY { self.halfedges[bl] = ar; } let br = self.next_halfedge(b); self.legalize(a, points, hull); return self.legalize(br, points, hull); } ar } } // data structure for tracking the edges of the advancing convex hull struct Hull { prev: Vec<usize>, next: Vec<usize>, tri: Vec<usize>, hash: Vec<usize>, start: usize, center: Point2D, } impl Hull { fn new(n: usize, center: Point2D, i0: usize, i1: usize, i2: usize, points: &[Point2D]) -> Self { let hash_len = (n as f64).sqrt() as usize; let mut hull = Self { prev: vec![0; n], // edge to prev edge next: vec![0; n], // edge to next edge tri: vec![0; n], // edge to adjacent halfedge hash: vec![EMPTY; hash_len], // angular edge hash start: i0, center, }; hull.next[i0] = i1; hull.prev[i2] = i1; hull.next[i1] = i2; hull.prev[i0] = i2; hull.next[i2] = i0; hull.prev[i1] = i0; hull.tri[i0] = 0; hull.tri[i1] = 1; hull.tri[i2] = 2; hull.hash_edge(&points[i0], i0); hull.hash_edge(&points[i1], i1); hull.hash_edge(&points[i2], i2); hull } fn hash_key(&self, p: &Point2D) -> usize { let dx = p.x - self.center.x; let dy = p.y - self.center.y; let p = dx / (dx.abs() + dy.abs()); let a = (if dy > 0.0 { 3.0 - p } else { 1.0 + p }) / 4.0; // [0..1] let len = self.hash.len(); (((len as f64) * a).floor() as usize) % len } fn hash_edge(&mut self, p: &Point2D, i: usize) { let key = self.hash_key(p); self.hash[key] = i; } fn find_visible_edge(&self, p: &Point2D, points: &[Point2D]) -> (usize, bool) { let mut start: usize = 0; let key = self.hash_key(p); let len = self.hash.len(); for j in 0..len { start = self.hash[(key + j) % len]; if start != EMPTY && self.next[start] != EMPTY { break; } } start = self.prev[start]; let mut e = start; while !p.orient(&points[e], &points[self.next[e]]) { e = self.next[e]; if e == start { return (EMPTY, false); } } (e, e == start) } } fn calc_bbox_center(points: &[Point2D]) -> Point2D { let mut min_x = f64::INFINITY; let mut min_y = f64::INFINITY; let mut max_x = f64::NEG_INFINITY; let mut max_y = f64::NEG_INFINITY; for p in points.iter() { min_x = min_x.min(p.x); min_y = min_y.min(p.y); max_x = max_x.max(p.x); max_y = max_y.max(p.y); } Point2D { x: (min_x + max_x) / 2.0, y: (min_y + max_y) / 2.0, } } fn find_closest_point(points: &[Point2D], p0: &Point2D) -> Option<usize> { let mut min_dist = f64::INFINITY; let mut k: usize = 0; for (i, p) in points.iter().enumerate() { let d = p0.distance_squared(p); if d > 0.0 && d < min_dist { k = i; min_dist = d; } } if min_dist == f64::INFINITY { None } else { Some(k) } } fn find_seed_triangle(points: &[Point2D]) -> Option<(usize, usize, usize)> { // pick a seed point close to the center let bbox_center = calc_bbox_center(points); let i0 = find_closest_point(points, &bbox_center)?; let p0 = &points[i0]; // find the point closest to the seed let i1 = find_closest_point(points, p0)?; let p1 = &points[i1]; // find the third point which forms the smallest circumcircle with the first two let mut min_radius = f64::INFINITY; let mut i2: usize = 0; for (i, p) in points.iter().enumerate() { if i == i0 || i == i1 { continue; } let r = p0.circumradius2(p1, p); if r < min_radius { i2 = i; min_radius = r; } } if min_radius == f64::INFINITY { None } else { // swap the order of the seed points for counter-clockwise orientation Some(if p0.orient(p1, &points[i2]) { (i0, i2, i1) } else { (i0, i1, i2) }) } } /// Triangulate a set of 2D points. /// Returns `None` if no triangulation exists for the input (e.g. all points are collinear). pub fn triangulate(points: &[Point2D]) -> Option<Triangulation> { let n = points.len(); let (i0, i1, i2) = find_seed_triangle(points)?; let center = (&points[i0]).circumcenter(&points[i1], &points[i2]); let mut triangulation = Triangulation::new(n); triangulation.add_triangle(i0, i1, i2, EMPTY, EMPTY, EMPTY); // sort the points by distance from the seed triangle circumcenter let mut dists: Vec<_> = points .iter() .enumerate() .map(|(i, point)| (i, center.distance_squared(point))) .collect(); dists.sort_unstable_by(|&(_, da), &(_, db)| da.partial_cmp(&db).unwrap()); let mut hull = Hull::new(n, center, i0, i1, i2, points); for (k, &(i, _)) in dists.iter().enumerate() { let p = &points[i]; // skip near-duplicates if k > 0 && p.nearly_equals(&points[dists[k - 1].0]) { continue; } // skip seed triangle points if i == i0 || i == i1 || i == i2 { continue; } // find a visible edge on the convex hull using edge hash let (mut e, walk_back) = hull.find_visible_edge(p, points); if e == EMPTY { continue; // likely a near-duplicate point; skip it } // add the first triangle from the point let t = triangulation.add_triangle(e, i, hull.next[e], EMPTY, EMPTY, hull.tri[e]); // recursively flip triangles from the point until they satisfy the Delaunay condition hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.tri[e] = t; // keep track of boundary triangles on the hull // walk forward through the hull, adding more triangles and flipping recursively let mut n = hull.next[e]; loop { let q = hull.next[n]; if !p.orient(&points[n], &points[q]) { break; } let t = triangulation.add_triangle(n, i, q, hull.tri[i], EMPTY, hull.tri[n]); hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.next[n] = EMPTY; // mark as removed n = q; } // walk backward from the other side, adding more triangles and flipping if walk_back { loop { let q = hull.prev[e]; if !p.orient(&points[q], &points[e]) { break; } let t = triangulation.add_triangle(q, i, e, EMPTY, hull.tri[e], hull.tri[q]); triangulation.legalize(t + 2, points, &mut hull); hull.tri[q] = t; hull.next[e] = EMPTY; // mark as removed e = q; } } // update the hull indices hull.prev[i] = e; hull.next[i] = n; hull.prev[n] = i; hull.next[e] = i; hull.start = e; // save the two new edges in the hash table hull.hash_edge(p, i); hull.hash_edge(&points[e], e); } // expose hull as a vector of point indices let mut e = hull.start; loop { triangulation.hull.push(e); e = hull.next[e]; if e == hull.start { break; } } triangulation.triangles.shrink_to_fit(); triangulation.halfedges.shrink_to_fit(); Some(triangulation) }
angle(t) // .into_iter() // .map(|e| self.triangles[*e]) // .collect() let e = self.edges_of_triangle(triangle); [ self.triangles[e[0]], self.triangles[e[1]], self.triangles[e[2]], ] } /// Triangle circumcente
identifier_body
delaunay_triangulation.rs
/*! The following code has been modified from the original delaunator-rs project: https://github.com/mourner/delaunator-rs For a description of the data structure, including the halfedge connectivity, see: https://mapbox.github.io/delaunator/ # Description A very fast 2D [Delaunay Triangulation](https://en.wikipedia.org/wiki/Delaunay_triangulation) library for Rust. A port of [Delaunator](https://github.com/mapbox/delaunator). A triangle edge may be shared with another triangle. Instead of thinking about each edge A↔︎B, we will use two half-edges A→B and B→A. Having two half-edges is the key to everything this library provides. Half-edges e are the indices into both of delaunator’s outputs: delaunay.triangles[e] returns the point id where the half-edge starts delaunay.halfedges[e] returns the opposite half-edge in the adjacent triangle, or -1 if there is no adjacent triangle Triangle ids and half-edge ids are related. The half-edges of triangle t are 3*t, 3*t + 1, and 3*t + 2. The triangle of half-edge id e is floor(e/3 # Example ```rust use delaunator::triangulate; use structures::Point2D let points = vec![ Point2D { x: 0., y: 0. }, Point2D { x: 1., y: 0. }, Point2D { x: 1., y: 1. }, Point2D { x: 0., y: 1. }, ]; let result = triangulate(&points).expect("No triangulation exists."); println!("{:?}", result.triangles); // [0, 2, 1, 0, 3, 2] ``` */ use crate::structures::Point2D; use std::collections::HashSet; use std::f64; /// Represents the area outside of the triangulation. /// Halfedges on the convex hull (which don't have an adjacent halfedge) /// will have this value. pub const EMPTY: usize = usize::max_value(); /// A data structure used to perform Delaunay triangulation on /// a set of input vector points. Connectivity between points, /// triangles, and halfedges is as follows: /// /// - edge → edges: next_halfedge, prevHalfedge, halfedges[] /// - edge → points: triangles[] /// - edge → triangle: triangle_of_edge /// - triangle → edges: edges_of_triangle /// - triangle → points: points_of_triangle /// - triangle → triangles: triangles_adjacent_to_triangle /// - point → incoming edges: edges_around_point /// - point → outgoing edges: edges_around_point + halfedge[] /// - point → points: edges_around_point + triangles[] /// - point → triangles: edges_around_point + triangle_of_edge pub struct Triangulation { /// A vector of point indices where each triple represents a Delaunay triangle. /// All triangles are directed counter-clockwise. pub triangles: Vec<usize>, /// A vector of adjacent halfedge indices that allows traversing the triangulation graph. /// /// `i`-th half-edge in the array corresponds to vertex `triangles[i]` /// the half-edge is coming from. `halfedges[i]` is the index of a twin half-edge /// in an adjacent triangle (or `EMPTY` for outer half-edges on the convex hull). pub halfedges: Vec<usize>, /// A vector of indices that reference points on the convex hull of the triangulation, /// counter-clockwise. pub hull: Vec<usize>, } impl Triangulation { /// Constructs a new *Triangulation*. fn new(n: usize) -> Self { let max_triangles = 2 * n - 5; Self { triangles: Vec::with_capacity(max_triangles * 3), halfedges: Vec::with_capacity(max_triangles * 3), hull: Vec::new(), } } /// The number of triangles in the triangulation. pub fn len(&self) -> usize { self.triangles.len() / 3 } /// Next halfedge in a triangle. pub fn next_halfedge(&self, edge: usize) -> usize { if edge % 3 == 2 { edge - 2 } else { edge + 1 } } /// Previous halfedge in a triangle. pub fn prev_halfedge(&self, edge: usize) -> usize { if edge % 3 == 0 { edge + 2 } else { edge - 1 } } /// Returns the triangle of an edge. pub fn triangle_of_edge(&self, edge: usize) -> usize { edge / 3 } /// Returns the edges of a triangle. pub fn edges_of_triangle(&self, triangle: usize) -> [usize; 3] { [3 * triangle, 3 * triangle + 1, 3 * triangle + 2] } /// Returns the points of a triangle. pub fn points_of_triangle(&self, triangle: usize) -> [usize; 3] { // self.edges_of_triangle(t) // .into_iter() // .map(|e| self.triangles[*e]) // .collect() let e = self.edges_of_triangle(triangle); [ self.triangles[e[0]], self.triangles[e[1]], self.triangles[e[2]], ] } /// Triangle circumcenter. pub fn triangle_center(&self, points: &[Point2D], triangle: usize) -> Point2D { let p = self.points_of_triangle(triangle); points[p[0]].circumcenter(&points[p[1]], &points[p[2]]) } /// Returns the edges around a point connected to halfedge '*start*'. pub fn edges_around_point(&self, start: usize) -> Vec<usize> { let mut result = vec![]; let mut incoming = start; let mut outgoing: usize; // let mut i = 0; loop { if result.contains(&incoming) { break; } result.push(incoming); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break;
result.push(incoming); break; } // i += 1; // if i > 100 { // // println!("{} {} {}", outgoing, incoming, start); // break; // } } result } pub fn natural_neighbours_from_incoming_edge(&self, start: usize) -> Vec<usize> { let mut result = vec![]; //result.push(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { result.push(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } result } pub fn natural_neighbours_2nd_order(&self, start: usize) -> Vec<usize> { let mut set = HashSet::new(); let mut edges = vec![]; // result.push(self.triangles[self.next_halfedge(start)]); // set.insert(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; edges.push(outgoing); if incoming == EMPTY { break; } else if incoming == start { break; } } for start in edges { incoming = start; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } } set.into_iter().map(|i| i).collect() } /// Returns the indices of the adjacent triangles to a triangle. pub fn triangles_adjacent_to_triangle(&self, triangle: usize) -> Vec<usize> { let mut adjacent_triangles: Vec<usize> = vec![]; let mut opposite: usize; for e in self.edges_of_triangle(triangle).iter() { opposite = self.halfedges[*e]; if opposite != EMPTY { adjacent_triangles.push(self.triangle_of_edge(opposite)); } } adjacent_triangles } fn add_triangle( &mut self, i0: usize, i1: usize, i2: usize, a: usize, b: usize, c: usize, ) -> usize { let t = self.triangles.len(); self.triangles.push(i0); self.triangles.push(i1); self.triangles.push(i2); self.halfedges.push(a); self.halfedges.push(b); self.halfedges.push(c); if a != EMPTY { self.halfedges[a] = t; } if b != EMPTY { self.halfedges[b] = t + 1; } if c != EMPTY { self.halfedges[c] = t + 2; } t } fn legalize(&mut self, a: usize, points: &[Point2D], hull: &mut Hull) -> usize { let b = self.halfedges[a]; // if the pair of triangles doesn't satisfy the Delaunay condition // (p1 is inside the circumcircle of [p0, pl, pr]), flip them, // then do the same check/flip recursively for the new pair of triangles // // pl pl // /||\ / \ // al/ || \bl al/ \a // / || \ / \ // / a||b \ flip /___ar___\ // p0\ || /p1 => p0\---bl---/p1 // \ || / \ / // ar\ || /br b\ /br // \||/ \ / // pr pr // let ar = self.prev_halfedge(a); if b == EMPTY { return ar; } let al = self.next_halfedge(a); let bl = self.prev_halfedge(b); let p0 = self.triangles[ar]; let pr = self.triangles[a]; let pl = self.triangles[al]; let p1 = self.triangles[bl]; let illegal = (&points[p0]).in_circle(&points[pr], &points[pl], &points[p1]); if illegal { self.triangles[a] = p1; self.triangles[b] = p0; let hbl = self.halfedges[bl]; let har = self.halfedges[ar]; // edge swapped on the other side of the hull (rare); fix the halfedge reference if hbl == EMPTY { let mut e = hull.start; loop { if hull.tri[e] == bl { hull.tri[e] = a; break; } e = hull.next[e]; if e == hull.start || e == EMPTY { // notice, I added the || e == EMPTY after // finding a bug. I don't know about this. break; } } } self.halfedges[a] = hbl; self.halfedges[b] = har; self.halfedges[ar] = bl; if hbl != EMPTY { self.halfedges[hbl] = a; } if har != EMPTY { self.halfedges[har] = b; } if bl != EMPTY { self.halfedges[bl] = ar; } let br = self.next_halfedge(b); self.legalize(a, points, hull); return self.legalize(br, points, hull); } ar } } // data structure for tracking the edges of the advancing convex hull struct Hull { prev: Vec<usize>, next: Vec<usize>, tri: Vec<usize>, hash: Vec<usize>, start: usize, center: Point2D, } impl Hull { fn new(n: usize, center: Point2D, i0: usize, i1: usize, i2: usize, points: &[Point2D]) -> Self { let hash_len = (n as f64).sqrt() as usize; let mut hull = Self { prev: vec![0; n], // edge to prev edge next: vec![0; n], // edge to next edge tri: vec![0; n], // edge to adjacent halfedge hash: vec![EMPTY; hash_len], // angular edge hash start: i0, center, }; hull.next[i0] = i1; hull.prev[i2] = i1; hull.next[i1] = i2; hull.prev[i0] = i2; hull.next[i2] = i0; hull.prev[i1] = i0; hull.tri[i0] = 0; hull.tri[i1] = 1; hull.tri[i2] = 2; hull.hash_edge(&points[i0], i0); hull.hash_edge(&points[i1], i1); hull.hash_edge(&points[i2], i2); hull } fn hash_key(&self, p: &Point2D) -> usize { let dx = p.x - self.center.x; let dy = p.y - self.center.y; let p = dx / (dx.abs() + dy.abs()); let a = (if dy > 0.0 { 3.0 - p } else { 1.0 + p }) / 4.0; // [0..1] let len = self.hash.len(); (((len as f64) * a).floor() as usize) % len } fn hash_edge(&mut self, p: &Point2D, i: usize) { let key = self.hash_key(p); self.hash[key] = i; } fn find_visible_edge(&self, p: &Point2D, points: &[Point2D]) -> (usize, bool) { let mut start: usize = 0; let key = self.hash_key(p); let len = self.hash.len(); for j in 0..len { start = self.hash[(key + j) % len]; if start != EMPTY && self.next[start] != EMPTY { break; } } start = self.prev[start]; let mut e = start; while !p.orient(&points[e], &points[self.next[e]]) { e = self.next[e]; if e == start { return (EMPTY, false); } } (e, e == start) } } fn calc_bbox_center(points: &[Point2D]) -> Point2D { let mut min_x = f64::INFINITY; let mut min_y = f64::INFINITY; let mut max_x = f64::NEG_INFINITY; let mut max_y = f64::NEG_INFINITY; for p in points.iter() { min_x = min_x.min(p.x); min_y = min_y.min(p.y); max_x = max_x.max(p.x); max_y = max_y.max(p.y); } Point2D { x: (min_x + max_x) / 2.0, y: (min_y + max_y) / 2.0, } } fn find_closest_point(points: &[Point2D], p0: &Point2D) -> Option<usize> { let mut min_dist = f64::INFINITY; let mut k: usize = 0; for (i, p) in points.iter().enumerate() { let d = p0.distance_squared(p); if d > 0.0 && d < min_dist { k = i; min_dist = d; } } if min_dist == f64::INFINITY { None } else { Some(k) } } fn find_seed_triangle(points: &[Point2D]) -> Option<(usize, usize, usize)> { // pick a seed point close to the center let bbox_center = calc_bbox_center(points); let i0 = find_closest_point(points, &bbox_center)?; let p0 = &points[i0]; // find the point closest to the seed let i1 = find_closest_point(points, p0)?; let p1 = &points[i1]; // find the third point which forms the smallest circumcircle with the first two let mut min_radius = f64::INFINITY; let mut i2: usize = 0; for (i, p) in points.iter().enumerate() { if i == i0 || i == i1 { continue; } let r = p0.circumradius2(p1, p); if r < min_radius { i2 = i; min_radius = r; } } if min_radius == f64::INFINITY { None } else { // swap the order of the seed points for counter-clockwise orientation Some(if p0.orient(p1, &points[i2]) { (i0, i2, i1) } else { (i0, i1, i2) }) } } /// Triangulate a set of 2D points. /// Returns `None` if no triangulation exists for the input (e.g. all points are collinear). pub fn triangulate(points: &[Point2D]) -> Option<Triangulation> { let n = points.len(); let (i0, i1, i2) = find_seed_triangle(points)?; let center = (&points[i0]).circumcenter(&points[i1], &points[i2]); let mut triangulation = Triangulation::new(n); triangulation.add_triangle(i0, i1, i2, EMPTY, EMPTY, EMPTY); // sort the points by distance from the seed triangle circumcenter let mut dists: Vec<_> = points .iter() .enumerate() .map(|(i, point)| (i, center.distance_squared(point))) .collect(); dists.sort_unstable_by(|&(_, da), &(_, db)| da.partial_cmp(&db).unwrap()); let mut hull = Hull::new(n, center, i0, i1, i2, points); for (k, &(i, _)) in dists.iter().enumerate() { let p = &points[i]; // skip near-duplicates if k > 0 && p.nearly_equals(&points[dists[k - 1].0]) { continue; } // skip seed triangle points if i == i0 || i == i1 || i == i2 { continue; } // find a visible edge on the convex hull using edge hash let (mut e, walk_back) = hull.find_visible_edge(p, points); if e == EMPTY { continue; // likely a near-duplicate point; skip it } // add the first triangle from the point let t = triangulation.add_triangle(e, i, hull.next[e], EMPTY, EMPTY, hull.tri[e]); // recursively flip triangles from the point until they satisfy the Delaunay condition hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.tri[e] = t; // keep track of boundary triangles on the hull // walk forward through the hull, adding more triangles and flipping recursively let mut n = hull.next[e]; loop { let q = hull.next[n]; if !p.orient(&points[n], &points[q]) { break; } let t = triangulation.add_triangle(n, i, q, hull.tri[i], EMPTY, hull.tri[n]); hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.next[n] = EMPTY; // mark as removed n = q; } // walk backward from the other side, adding more triangles and flipping if walk_back { loop { let q = hull.prev[e]; if !p.orient(&points[q], &points[e]) { break; } let t = triangulation.add_triangle(q, i, e, EMPTY, hull.tri[e], hull.tri[q]); triangulation.legalize(t + 2, points, &mut hull); hull.tri[q] = t; hull.next[e] = EMPTY; // mark as removed e = q; } } // update the hull indices hull.prev[i] = e; hull.next[i] = n; hull.prev[n] = i; hull.next[e] = i; hull.start = e; // save the two new edges in the hash table hull.hash_edge(p, i); hull.hash_edge(&points[e], e); } // expose hull as a vector of point indices let mut e = hull.start; loop { triangulation.hull.push(e); e = hull.next[e]; if e == hull.start { break; } } triangulation.triangles.shrink_to_fit(); triangulation.halfedges.shrink_to_fit(); Some(triangulation) }
} else if incoming == start {
conditional_block
delaunay_triangulation.rs
/*! The following code has been modified from the original delaunator-rs project: https://github.com/mourner/delaunator-rs For a description of the data structure, including the halfedge connectivity, see: https://mapbox.github.io/delaunator/ # Description A very fast 2D [Delaunay Triangulation](https://en.wikipedia.org/wiki/Delaunay_triangulation) library for Rust. A port of [Delaunator](https://github.com/mapbox/delaunator). A triangle edge may be shared with another triangle. Instead of thinking about each edge A↔︎B, we will use two half-edges A→B and B→A. Having two half-edges is the key to everything this library provides. Half-edges e are the indices into both of delaunator’s outputs: delaunay.triangles[e] returns the point id where the half-edge starts delaunay.halfedges[e] returns the opposite half-edge in the adjacent triangle, or -1 if there is no adjacent triangle Triangle ids and half-edge ids are related. The half-edges of triangle t are 3*t, 3*t + 1, and 3*t + 2. The triangle of half-edge id e is floor(e/3 # Example ```rust use delaunator::triangulate; use structures::Point2D let points = vec![ Point2D { x: 0., y: 0. }, Point2D { x: 1., y: 0. }, Point2D { x: 1., y: 1. }, Point2D { x: 0., y: 1. }, ]; let result = triangulate(&points).expect("No triangulation exists."); println!("{:?}", result.triangles); // [0, 2, 1, 0, 3, 2] ``` */ use crate::structures::Point2D; use std::collections::HashSet; use std::f64; /// Represents the area outside of the triangulation. /// Halfedges on the convex hull (which don't have an adjacent halfedge) /// will have this value. pub const EMPTY: usize = usize::max_value(); /// A data structure used to perform Delaunay triangulation on /// a set of input vector points. Connectivity between points, /// triangles, and halfedges is as follows: /// /// - edge → edges: next_halfedge, prevHalfedge, halfedges[] /// - edge → points: triangles[] /// - edge → triangle: triangle_of_edge /// - triangle → edges: edges_of_triangle /// - triangle → points: points_of_triangle /// - triangle → triangles: triangles_adjacent_to_triangle /// - point → incoming edges: edges_around_point /// - point → outgoing edges: edges_around_point + halfedge[] /// - point → points: edges_around_point + triangles[] /// - point → triangles: edges_around_point + triangle_of_edge pub struct Triangulation { /// A vector of point indices where each triple represents a Delaunay triangle. /// All triangles are directed counter-clockwise. pub triangles: Vec<usize>, /// A vector of adjacent halfedge indices that allows traversing the triangulation graph. /// /// `i`-th half-edge in the array corresponds to vertex `triangles[i]` /// the half-edge is coming from. `halfedges[i]` is the index of a twin half-edge /// in an adjacent triangle (or `EMPTY` for outer half-edges on the convex hull). pub halfedges: Vec<usize>, /// A vector of indices that reference points on the convex hull of the triangulation,
} impl Triangulation { /// Constructs a new *Triangulation*. fn new(n: usize) -> Self { let max_triangles = 2 * n - 5; Self { triangles: Vec::with_capacity(max_triangles * 3), halfedges: Vec::with_capacity(max_triangles * 3), hull: Vec::new(), } } /// The number of triangles in the triangulation. pub fn len(&self) -> usize { self.triangles.len() / 3 } /// Next halfedge in a triangle. pub fn next_halfedge(&self, edge: usize) -> usize { if edge % 3 == 2 { edge - 2 } else { edge + 1 } } /// Previous halfedge in a triangle. pub fn prev_halfedge(&self, edge: usize) -> usize { if edge % 3 == 0 { edge + 2 } else { edge - 1 } } /// Returns the triangle of an edge. pub fn triangle_of_edge(&self, edge: usize) -> usize { edge / 3 } /// Returns the edges of a triangle. pub fn edges_of_triangle(&self, triangle: usize) -> [usize; 3] { [3 * triangle, 3 * triangle + 1, 3 * triangle + 2] } /// Returns the points of a triangle. pub fn points_of_triangle(&self, triangle: usize) -> [usize; 3] { // self.edges_of_triangle(t) // .into_iter() // .map(|e| self.triangles[*e]) // .collect() let e = self.edges_of_triangle(triangle); [ self.triangles[e[0]], self.triangles[e[1]], self.triangles[e[2]], ] } /// Triangle circumcenter. pub fn triangle_center(&self, points: &[Point2D], triangle: usize) -> Point2D { let p = self.points_of_triangle(triangle); points[p[0]].circumcenter(&points[p[1]], &points[p[2]]) } /// Returns the edges around a point connected to halfedge '*start*'. pub fn edges_around_point(&self, start: usize) -> Vec<usize> { let mut result = vec![]; let mut incoming = start; let mut outgoing: usize; // let mut i = 0; loop { if result.contains(&incoming) { break; } result.push(incoming); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { result.push(incoming); break; } // i += 1; // if i > 100 { // // println!("{} {} {}", outgoing, incoming, start); // break; // } } result } pub fn natural_neighbours_from_incoming_edge(&self, start: usize) -> Vec<usize> { let mut result = vec![]; //result.push(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { result.push(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } result } pub fn natural_neighbours_2nd_order(&self, start: usize) -> Vec<usize> { let mut set = HashSet::new(); let mut edges = vec![]; // result.push(self.triangles[self.next_halfedge(start)]); // set.insert(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; edges.push(outgoing); if incoming == EMPTY { break; } else if incoming == start { break; } } for start in edges { incoming = start; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } } set.into_iter().map(|i| i).collect() } /// Returns the indices of the adjacent triangles to a triangle. pub fn triangles_adjacent_to_triangle(&self, triangle: usize) -> Vec<usize> { let mut adjacent_triangles: Vec<usize> = vec![]; let mut opposite: usize; for e in self.edges_of_triangle(triangle).iter() { opposite = self.halfedges[*e]; if opposite != EMPTY { adjacent_triangles.push(self.triangle_of_edge(opposite)); } } adjacent_triangles } fn add_triangle( &mut self, i0: usize, i1: usize, i2: usize, a: usize, b: usize, c: usize, ) -> usize { let t = self.triangles.len(); self.triangles.push(i0); self.triangles.push(i1); self.triangles.push(i2); self.halfedges.push(a); self.halfedges.push(b); self.halfedges.push(c); if a != EMPTY { self.halfedges[a] = t; } if b != EMPTY { self.halfedges[b] = t + 1; } if c != EMPTY { self.halfedges[c] = t + 2; } t } fn legalize(&mut self, a: usize, points: &[Point2D], hull: &mut Hull) -> usize { let b = self.halfedges[a]; // if the pair of triangles doesn't satisfy the Delaunay condition // (p1 is inside the circumcircle of [p0, pl, pr]), flip them, // then do the same check/flip recursively for the new pair of triangles // // pl pl // /||\ / \ // al/ || \bl al/ \a // / || \ / \ // / a||b \ flip /___ar___\ // p0\ || /p1 => p0\---bl---/p1 // \ || / \ / // ar\ || /br b\ /br // \||/ \ / // pr pr // let ar = self.prev_halfedge(a); if b == EMPTY { return ar; } let al = self.next_halfedge(a); let bl = self.prev_halfedge(b); let p0 = self.triangles[ar]; let pr = self.triangles[a]; let pl = self.triangles[al]; let p1 = self.triangles[bl]; let illegal = (&points[p0]).in_circle(&points[pr], &points[pl], &points[p1]); if illegal { self.triangles[a] = p1; self.triangles[b] = p0; let hbl = self.halfedges[bl]; let har = self.halfedges[ar]; // edge swapped on the other side of the hull (rare); fix the halfedge reference if hbl == EMPTY { let mut e = hull.start; loop { if hull.tri[e] == bl { hull.tri[e] = a; break; } e = hull.next[e]; if e == hull.start || e == EMPTY { // notice, I added the || e == EMPTY after // finding a bug. I don't know about this. break; } } } self.halfedges[a] = hbl; self.halfedges[b] = har; self.halfedges[ar] = bl; if hbl != EMPTY { self.halfedges[hbl] = a; } if har != EMPTY { self.halfedges[har] = b; } if bl != EMPTY { self.halfedges[bl] = ar; } let br = self.next_halfedge(b); self.legalize(a, points, hull); return self.legalize(br, points, hull); } ar } } // data structure for tracking the edges of the advancing convex hull struct Hull { prev: Vec<usize>, next: Vec<usize>, tri: Vec<usize>, hash: Vec<usize>, start: usize, center: Point2D, } impl Hull { fn new(n: usize, center: Point2D, i0: usize, i1: usize, i2: usize, points: &[Point2D]) -> Self { let hash_len = (n as f64).sqrt() as usize; let mut hull = Self { prev: vec![0; n], // edge to prev edge next: vec![0; n], // edge to next edge tri: vec![0; n], // edge to adjacent halfedge hash: vec![EMPTY; hash_len], // angular edge hash start: i0, center, }; hull.next[i0] = i1; hull.prev[i2] = i1; hull.next[i1] = i2; hull.prev[i0] = i2; hull.next[i2] = i0; hull.prev[i1] = i0; hull.tri[i0] = 0; hull.tri[i1] = 1; hull.tri[i2] = 2; hull.hash_edge(&points[i0], i0); hull.hash_edge(&points[i1], i1); hull.hash_edge(&points[i2], i2); hull } fn hash_key(&self, p: &Point2D) -> usize { let dx = p.x - self.center.x; let dy = p.y - self.center.y; let p = dx / (dx.abs() + dy.abs()); let a = (if dy > 0.0 { 3.0 - p } else { 1.0 + p }) / 4.0; // [0..1] let len = self.hash.len(); (((len as f64) * a).floor() as usize) % len } fn hash_edge(&mut self, p: &Point2D, i: usize) { let key = self.hash_key(p); self.hash[key] = i; } fn find_visible_edge(&self, p: &Point2D, points: &[Point2D]) -> (usize, bool) { let mut start: usize = 0; let key = self.hash_key(p); let len = self.hash.len(); for j in 0..len { start = self.hash[(key + j) % len]; if start != EMPTY && self.next[start] != EMPTY { break; } } start = self.prev[start]; let mut e = start; while !p.orient(&points[e], &points[self.next[e]]) { e = self.next[e]; if e == start { return (EMPTY, false); } } (e, e == start) } } fn calc_bbox_center(points: &[Point2D]) -> Point2D { let mut min_x = f64::INFINITY; let mut min_y = f64::INFINITY; let mut max_x = f64::NEG_INFINITY; let mut max_y = f64::NEG_INFINITY; for p in points.iter() { min_x = min_x.min(p.x); min_y = min_y.min(p.y); max_x = max_x.max(p.x); max_y = max_y.max(p.y); } Point2D { x: (min_x + max_x) / 2.0, y: (min_y + max_y) / 2.0, } } fn find_closest_point(points: &[Point2D], p0: &Point2D) -> Option<usize> { let mut min_dist = f64::INFINITY; let mut k: usize = 0; for (i, p) in points.iter().enumerate() { let d = p0.distance_squared(p); if d > 0.0 && d < min_dist { k = i; min_dist = d; } } if min_dist == f64::INFINITY { None } else { Some(k) } } fn find_seed_triangle(points: &[Point2D]) -> Option<(usize, usize, usize)> { // pick a seed point close to the center let bbox_center = calc_bbox_center(points); let i0 = find_closest_point(points, &bbox_center)?; let p0 = &points[i0]; // find the point closest to the seed let i1 = find_closest_point(points, p0)?; let p1 = &points[i1]; // find the third point which forms the smallest circumcircle with the first two let mut min_radius = f64::INFINITY; let mut i2: usize = 0; for (i, p) in points.iter().enumerate() { if i == i0 || i == i1 { continue; } let r = p0.circumradius2(p1, p); if r < min_radius { i2 = i; min_radius = r; } } if min_radius == f64::INFINITY { None } else { // swap the order of the seed points for counter-clockwise orientation Some(if p0.orient(p1, &points[i2]) { (i0, i2, i1) } else { (i0, i1, i2) }) } } /// Triangulate a set of 2D points. /// Returns `None` if no triangulation exists for the input (e.g. all points are collinear). pub fn triangulate(points: &[Point2D]) -> Option<Triangulation> { let n = points.len(); let (i0, i1, i2) = find_seed_triangle(points)?; let center = (&points[i0]).circumcenter(&points[i1], &points[i2]); let mut triangulation = Triangulation::new(n); triangulation.add_triangle(i0, i1, i2, EMPTY, EMPTY, EMPTY); // sort the points by distance from the seed triangle circumcenter let mut dists: Vec<_> = points .iter() .enumerate() .map(|(i, point)| (i, center.distance_squared(point))) .collect(); dists.sort_unstable_by(|&(_, da), &(_, db)| da.partial_cmp(&db).unwrap()); let mut hull = Hull::new(n, center, i0, i1, i2, points); for (k, &(i, _)) in dists.iter().enumerate() { let p = &points[i]; // skip near-duplicates if k > 0 && p.nearly_equals(&points[dists[k - 1].0]) { continue; } // skip seed triangle points if i == i0 || i == i1 || i == i2 { continue; } // find a visible edge on the convex hull using edge hash let (mut e, walk_back) = hull.find_visible_edge(p, points); if e == EMPTY { continue; // likely a near-duplicate point; skip it } // add the first triangle from the point let t = triangulation.add_triangle(e, i, hull.next[e], EMPTY, EMPTY, hull.tri[e]); // recursively flip triangles from the point until they satisfy the Delaunay condition hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.tri[e] = t; // keep track of boundary triangles on the hull // walk forward through the hull, adding more triangles and flipping recursively let mut n = hull.next[e]; loop { let q = hull.next[n]; if !p.orient(&points[n], &points[q]) { break; } let t = triangulation.add_triangle(n, i, q, hull.tri[i], EMPTY, hull.tri[n]); hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.next[n] = EMPTY; // mark as removed n = q; } // walk backward from the other side, adding more triangles and flipping if walk_back { loop { let q = hull.prev[e]; if !p.orient(&points[q], &points[e]) { break; } let t = triangulation.add_triangle(q, i, e, EMPTY, hull.tri[e], hull.tri[q]); triangulation.legalize(t + 2, points, &mut hull); hull.tri[q] = t; hull.next[e] = EMPTY; // mark as removed e = q; } } // update the hull indices hull.prev[i] = e; hull.next[i] = n; hull.prev[n] = i; hull.next[e] = i; hull.start = e; // save the two new edges in the hash table hull.hash_edge(p, i); hull.hash_edge(&points[e], e); } // expose hull as a vector of point indices let mut e = hull.start; loop { triangulation.hull.push(e); e = hull.next[e]; if e == hull.start { break; } } triangulation.triangles.shrink_to_fit(); triangulation.halfedges.shrink_to_fit(); Some(triangulation) }
/// counter-clockwise. pub hull: Vec<usize>,
random_line_split
delaunay_triangulation.rs
/*! The following code has been modified from the original delaunator-rs project: https://github.com/mourner/delaunator-rs For a description of the data structure, including the halfedge connectivity, see: https://mapbox.github.io/delaunator/ # Description A very fast 2D [Delaunay Triangulation](https://en.wikipedia.org/wiki/Delaunay_triangulation) library for Rust. A port of [Delaunator](https://github.com/mapbox/delaunator). A triangle edge may be shared with another triangle. Instead of thinking about each edge A↔︎B, we will use two half-edges A→B and B→A. Having two half-edges is the key to everything this library provides. Half-edges e are the indices into both of delaunator’s outputs: delaunay.triangles[e] returns the point id where the half-edge starts delaunay.halfedges[e] returns the opposite half-edge in the adjacent triangle, or -1 if there is no adjacent triangle Triangle ids and half-edge ids are related. The half-edges of triangle t are 3*t, 3*t + 1, and 3*t + 2. The triangle of half-edge id e is floor(e/3 # Example ```rust use delaunator::triangulate; use structures::Point2D let points = vec![ Point2D { x: 0., y: 0. }, Point2D { x: 1., y: 0. }, Point2D { x: 1., y: 1. }, Point2D { x: 0., y: 1. }, ]; let result = triangulate(&points).expect("No triangulation exists."); println!("{:?}", result.triangles); // [0, 2, 1, 0, 3, 2] ``` */ use crate::structures::Point2D; use std::collections::HashSet; use std::f64; /// Represents the area outside of the triangulation. /// Halfedges on the convex hull (which don't have an adjacent halfedge) /// will have this value. pub const EMPTY: usize = usize::max_value(); /// A data structure used to perform Delaunay triangulation on /// a set of input vector points. Connectivity between points, /// triangles, and halfedges is as follows: /// /// - edge → edges: next_halfedge, prevHalfedge, halfedges[] /// - edge → points: triangles[] /// - edge → triangle: triangle_of_edge /// - triangle → edges: edges_of_triangle /// - triangle → points: points_of_triangle /// - triangle → triangles: triangles_adjacent_to_triangle /// - point → incoming edges: edges_around_point /// - point → outgoing edges: edges_around_point + halfedge[] /// - point → points: edges_around_point + triangles[] /// - point → triangles: edges_around_point + triangle_of_edge pub struct Triangulation { /// A vector of point indices where each triple represents a Delaunay triangle. /// All triangles are directed counter-clockwise. pub triangles: Vec<usize>, /// A vector of adjacent halfedge indices that allows traversing the triangulation graph. /// /// `i`-th half-edge in the array corresponds to vertex `triangles[i]` /// the half-edge is coming from. `halfedges[i]` is the index of a twin half-edge /// in an adjacent triangle (or `EMPTY` for outer half-edges on the convex hull). pub halfedges: Vec<usize>, /// A vector of indices that reference points on the convex hull of the triangulation, /// counter-clockwise. pub hull: Vec<usize>, } impl Triangulation { /// Constructs a new *Triangulation*. fn new(n: usize) -> Self { let max_triangles = 2 * n - 5; Self { triangles: Vec::with_capacity(max_triangles * 3), halfedges: Vec::with_capacity(max_triangles * 3), hull: Vec::new(), } } /// The number of triangles in the triangulation. pub fn len(&self) -> usize { self.triangles.len() / 3 } /// Next halfedge in a triangle. pub fn next_halfedge(&self, edge: usize) -> usize { if edge % 3 == 2 { edge - 2 } else { edge + 1 } } /// Previous halfedge in a triangle. pub fn prev_halfedge(&self, edge: usize) -> usize { if edge % 3 == 0 { edge + 2 } else { edge - 1 } } /// Returns the triangle of an edge. pub fn triangle_of_edge(&self, edge: usize) -> usize { edge / 3 } /// Returns the edges of a triangle. pub fn edges_of_triangle(&self, triangle: usize) -> [usize; 3] { [3 * triangle, 3 * triangle + 1, 3 * triangle + 2] } /// Returns the points of a triangle. pub fn points_of_triangle(&self, triangle: usize) -> [usize; 3] { // self.edges_of_triangle(t) // .into_iter() // .map(|e| self.triangles[*e]) // .collect() let e = self.edges_of_triangle(triangle); [ self.triangles[e[0]], self.triangles[e[1]], self.triangles[e[2]], ] } /// Triangle circumcenter. pub fn triangle_center(&self, points: &[Point2D], triangle: usize) -> Point2D { let p = self.points_of_triangle(triangle); points[p[0]].circumcenter(&points[p[1]], &points[p[2]]) } /// Returns the edges around a point connected to halfedge '*start*'. pub fn edges_around_point(&self, start: usize) -> Vec<usize> { let mut result = vec![]; let mut incoming = start; let mut outgoing: usize; // let mut i = 0; loop { if result.contains(&incoming) { break; } result.push(incoming); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { result.push(incoming); break; } // i += 1; // if i > 100 { // // println!("{} {} {}", outgoing, incoming, start); // break; // } } result } pub fn natural_neighbours_from_incoming_edge(&self, start: usize) -> Vec<usize> { let mut result = vec![]; //result.push(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { result.push(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } result } pub fn natural_neighbours_2nd_order(&self, start: usize) -> Vec<usize> { let mut set = HashSet::new(); let mut edges = vec![]; // result.push(self.triangles[self.next_halfedge(start)]); // set.insert(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; edges.push(outgoing); if incoming == EMPTY { break; } else if incoming == start { break; } } for start in edges { incoming = start; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } } set.into_iter().map(|i| i).collect() } /// Returns the indices of the adjacent triangles to a triangle. pub fn triangles_adjacent_to_triangle(&self, triangle: usize) -> Vec<usize> { let mut adjacent_triangles: Vec<usize> = vec![]; let mut opposite: usize; for e in self.edges_of_triangle(triangle).iter() { opposite = self.halfedges[*e]; if opposite != EMPTY { adjacent_triangles.push(self.triangle_of_edge(opposite)); } } adjacent_triangles } fn add_triangle( &mut sel
0: usize, i1: usize, i2: usize, a: usize, b: usize, c: usize, ) -> usize { let t = self.triangles.len(); self.triangles.push(i0); self.triangles.push(i1); self.triangles.push(i2); self.halfedges.push(a); self.halfedges.push(b); self.halfedges.push(c); if a != EMPTY { self.halfedges[a] = t; } if b != EMPTY { self.halfedges[b] = t + 1; } if c != EMPTY { self.halfedges[c] = t + 2; } t } fn legalize(&mut self, a: usize, points: &[Point2D], hull: &mut Hull) -> usize { let b = self.halfedges[a]; // if the pair of triangles doesn't satisfy the Delaunay condition // (p1 is inside the circumcircle of [p0, pl, pr]), flip them, // then do the same check/flip recursively for the new pair of triangles // // pl pl // /||\ / \ // al/ || \bl al/ \a // / || \ / \ // / a||b \ flip /___ar___\ // p0\ || /p1 => p0\---bl---/p1 // \ || / \ / // ar\ || /br b\ /br // \||/ \ / // pr pr // let ar = self.prev_halfedge(a); if b == EMPTY { return ar; } let al = self.next_halfedge(a); let bl = self.prev_halfedge(b); let p0 = self.triangles[ar]; let pr = self.triangles[a]; let pl = self.triangles[al]; let p1 = self.triangles[bl]; let illegal = (&points[p0]).in_circle(&points[pr], &points[pl], &points[p1]); if illegal { self.triangles[a] = p1; self.triangles[b] = p0; let hbl = self.halfedges[bl]; let har = self.halfedges[ar]; // edge swapped on the other side of the hull (rare); fix the halfedge reference if hbl == EMPTY { let mut e = hull.start; loop { if hull.tri[e] == bl { hull.tri[e] = a; break; } e = hull.next[e]; if e == hull.start || e == EMPTY { // notice, I added the || e == EMPTY after // finding a bug. I don't know about this. break; } } } self.halfedges[a] = hbl; self.halfedges[b] = har; self.halfedges[ar] = bl; if hbl != EMPTY { self.halfedges[hbl] = a; } if har != EMPTY { self.halfedges[har] = b; } if bl != EMPTY { self.halfedges[bl] = ar; } let br = self.next_halfedge(b); self.legalize(a, points, hull); return self.legalize(br, points, hull); } ar } } // data structure for tracking the edges of the advancing convex hull struct Hull { prev: Vec<usize>, next: Vec<usize>, tri: Vec<usize>, hash: Vec<usize>, start: usize, center: Point2D, } impl Hull { fn new(n: usize, center: Point2D, i0: usize, i1: usize, i2: usize, points: &[Point2D]) -> Self { let hash_len = (n as f64).sqrt() as usize; let mut hull = Self { prev: vec![0; n], // edge to prev edge next: vec![0; n], // edge to next edge tri: vec![0; n], // edge to adjacent halfedge hash: vec![EMPTY; hash_len], // angular edge hash start: i0, center, }; hull.next[i0] = i1; hull.prev[i2] = i1; hull.next[i1] = i2; hull.prev[i0] = i2; hull.next[i2] = i0; hull.prev[i1] = i0; hull.tri[i0] = 0; hull.tri[i1] = 1; hull.tri[i2] = 2; hull.hash_edge(&points[i0], i0); hull.hash_edge(&points[i1], i1); hull.hash_edge(&points[i2], i2); hull } fn hash_key(&self, p: &Point2D) -> usize { let dx = p.x - self.center.x; let dy = p.y - self.center.y; let p = dx / (dx.abs() + dy.abs()); let a = (if dy > 0.0 { 3.0 - p } else { 1.0 + p }) / 4.0; // [0..1] let len = self.hash.len(); (((len as f64) * a).floor() as usize) % len } fn hash_edge(&mut self, p: &Point2D, i: usize) { let key = self.hash_key(p); self.hash[key] = i; } fn find_visible_edge(&self, p: &Point2D, points: &[Point2D]) -> (usize, bool) { let mut start: usize = 0; let key = self.hash_key(p); let len = self.hash.len(); for j in 0..len { start = self.hash[(key + j) % len]; if start != EMPTY && self.next[start] != EMPTY { break; } } start = self.prev[start]; let mut e = start; while !p.orient(&points[e], &points[self.next[e]]) { e = self.next[e]; if e == start { return (EMPTY, false); } } (e, e == start) } } fn calc_bbox_center(points: &[Point2D]) -> Point2D { let mut min_x = f64::INFINITY; let mut min_y = f64::INFINITY; let mut max_x = f64::NEG_INFINITY; let mut max_y = f64::NEG_INFINITY; for p in points.iter() { min_x = min_x.min(p.x); min_y = min_y.min(p.y); max_x = max_x.max(p.x); max_y = max_y.max(p.y); } Point2D { x: (min_x + max_x) / 2.0, y: (min_y + max_y) / 2.0, } } fn find_closest_point(points: &[Point2D], p0: &Point2D) -> Option<usize> { let mut min_dist = f64::INFINITY; let mut k: usize = 0; for (i, p) in points.iter().enumerate() { let d = p0.distance_squared(p); if d > 0.0 && d < min_dist { k = i; min_dist = d; } } if min_dist == f64::INFINITY { None } else { Some(k) } } fn find_seed_triangle(points: &[Point2D]) -> Option<(usize, usize, usize)> { // pick a seed point close to the center let bbox_center = calc_bbox_center(points); let i0 = find_closest_point(points, &bbox_center)?; let p0 = &points[i0]; // find the point closest to the seed let i1 = find_closest_point(points, p0)?; let p1 = &points[i1]; // find the third point which forms the smallest circumcircle with the first two let mut min_radius = f64::INFINITY; let mut i2: usize = 0; for (i, p) in points.iter().enumerate() { if i == i0 || i == i1 { continue; } let r = p0.circumradius2(p1, p); if r < min_radius { i2 = i; min_radius = r; } } if min_radius == f64::INFINITY { None } else { // swap the order of the seed points for counter-clockwise orientation Some(if p0.orient(p1, &points[i2]) { (i0, i2, i1) } else { (i0, i1, i2) }) } } /// Triangulate a set of 2D points. /// Returns `None` if no triangulation exists for the input (e.g. all points are collinear). pub fn triangulate(points: &[Point2D]) -> Option<Triangulation> { let n = points.len(); let (i0, i1, i2) = find_seed_triangle(points)?; let center = (&points[i0]).circumcenter(&points[i1], &points[i2]); let mut triangulation = Triangulation::new(n); triangulation.add_triangle(i0, i1, i2, EMPTY, EMPTY, EMPTY); // sort the points by distance from the seed triangle circumcenter let mut dists: Vec<_> = points .iter() .enumerate() .map(|(i, point)| (i, center.distance_squared(point))) .collect(); dists.sort_unstable_by(|&(_, da), &(_, db)| da.partial_cmp(&db).unwrap()); let mut hull = Hull::new(n, center, i0, i1, i2, points); for (k, &(i, _)) in dists.iter().enumerate() { let p = &points[i]; // skip near-duplicates if k > 0 && p.nearly_equals(&points[dists[k - 1].0]) { continue; } // skip seed triangle points if i == i0 || i == i1 || i == i2 { continue; } // find a visible edge on the convex hull using edge hash let (mut e, walk_back) = hull.find_visible_edge(p, points); if e == EMPTY { continue; // likely a near-duplicate point; skip it } // add the first triangle from the point let t = triangulation.add_triangle(e, i, hull.next[e], EMPTY, EMPTY, hull.tri[e]); // recursively flip triangles from the point until they satisfy the Delaunay condition hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.tri[e] = t; // keep track of boundary triangles on the hull // walk forward through the hull, adding more triangles and flipping recursively let mut n = hull.next[e]; loop { let q = hull.next[n]; if !p.orient(&points[n], &points[q]) { break; } let t = triangulation.add_triangle(n, i, q, hull.tri[i], EMPTY, hull.tri[n]); hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.next[n] = EMPTY; // mark as removed n = q; } // walk backward from the other side, adding more triangles and flipping if walk_back { loop { let q = hull.prev[e]; if !p.orient(&points[q], &points[e]) { break; } let t = triangulation.add_triangle(q, i, e, EMPTY, hull.tri[e], hull.tri[q]); triangulation.legalize(t + 2, points, &mut hull); hull.tri[q] = t; hull.next[e] = EMPTY; // mark as removed e = q; } } // update the hull indices hull.prev[i] = e; hull.next[i] = n; hull.prev[n] = i; hull.next[e] = i; hull.start = e; // save the two new edges in the hash table hull.hash_edge(p, i); hull.hash_edge(&points[e], e); } // expose hull as a vector of point indices let mut e = hull.start; loop { triangulation.hull.push(e); e = hull.next[e]; if e == hull.start { break; } } triangulation.triangles.shrink_to_fit(); triangulation.halfedges.shrink_to_fit(); Some(triangulation) }
f, i
identifier_name
api.py
import requests from requests import HTTPError from pbincli.utils import PBinCLIError def _config_requests(settings=None, shortener=False): if settings['no_insecure_warning']: from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) session = requests.Session() session.verify = not settings['no_check_certificate'] if settings['auth'] and not shortener: # do not leak PrivateBin authorization to shortener services if settings['auth'] == 'basic' and settings['auth_user'] and settings['auth_pass']: session.auth = (settings['auth_user'], settings['auth_pass']) elif settings['auth'] == 'custom' and settings['auth_custom']: from json import loads as json_loads auth = json_loads(settings['auth_custom']) session.headers.update(auth) else: PBinCLIError("Incorrect authorization configuration") if settings['proxy']: scheme = settings['proxy'].split('://')[0] if (scheme.startswith("socks")): session.proxies.update({ "http": settings['proxy'], "https": settings['proxy'] }) else: session.proxies.update({scheme: settings['proxy']}) return session class PrivateBin:
class Shortener: """Some parts of this class was taken from python-yourls (https://github.com/tflink/python-yourls/) library """ def __init__(self, settings=None): self.api = settings['short_api'] if self.api is None: PBinCLIError("Unable to activate link shortener without short_api.") # we checking which service is used, because some services doesn't require # any authentication, or have only one domain on which it working if self.api == 'yourls': self._yourls_init(settings) elif self.api == 'isgd' or self.api == 'vgd': self._gd_init() elif self.api == 'custom': self.apiurl = settings['short_url'] self.session = _config_requests(settings, True) def _yourls_init(self, settings): if not settings['short_url']: PBinCLIError("YOURLS: An API URL is required") # setting API URL apiurl = settings['short_url'] if apiurl.endswith('/yourls-api.php'): self.apiurl = apiurl elif apiurl.endswith('/'): self.apiurl = apiurl + 'yourls-api.php' else: PBinCLIError("YOURLS: Incorrect URL is provided.\n" + "It must contain full address to 'yourls-api.php' script (like https://example.com/yourls-api.php)\n" + "or just contain instance URL with '/' at the end (like https://example.com/)") # validating for required credentials if settings['short_user'] and settings['short_pass'] and settings['short_token'] is None: self.auth_args = {'username': settings['short_user'], 'password': settings['short_pass']} elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token']: self.auth_args = {'signature': settings['short_token']} elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token'] is None: self.auth_args = {} else: PBinCLIError("YOURLS: either username and password or token are required. Otherwise set to default (None)") def _gd_init(self): if self.api == 'isgd': self.apiurl = 'https://is.gd/' else: self.apiurl = 'https://v.gd/' self.useragent = 'Mozilla/5.0 (compatible; pbincli - https://github.com/r4sas/pbincli/)' def getlink(self, url): # that is api -> function mapper for running service-related function when getlink() used servicesList = { 'yourls': self._yourls, 'clckru': self._clckru, 'tinyurl': self._tinyurl, 'isgd': self._gd, 'vgd': self._gd, 'cuttly': self._cuttly, 'custom': self._custom } # run function selected by choosen API servicesList[self.api](url) def _yourls(self,url): request = {'action': 'shorturl', 'format': 'json', 'url': url} request.update(self.auth_args) result = self.session.post( url = self.apiurl, data = request) try: result.raise_for_status() except HTTPError: try: response = result.json() except ValueError: PBinCLIError("YOURLS: Unable parse response. Received (size = {}):\n{}".format(len(result.text), result.text)) else: PBinCLIError("YOURLS: Received error from API: {} with JSON {}".format(result, response)) else: response = result.json() if {'status', 'statusCode', 'message'} <= set(response.keys()): if response['status'] == 'fail': PBinCLIError("YOURLS: Received error from API: {}".format(response['message'])) if not 'shorturl' in response: PBinCLIError("YOURLS: Unknown error: {}".format(response['message'])) else: print("Short Link:\t{}".format(response['shorturl'])) else: PBinCLIError("YOURLS: No status, statusCode or message fields in response! Received:\n{}".format(response)) def _clckru(self, url): request = {'url': url} try: result = self.session.post( url = "https://clck.ru/--", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("clck.ru: unexcepted behavior: {}".format(ex)) def _tinyurl(self, url): request = {'url': url} try: result = self.session.post( url = "https://tinyurl.com/api-create.php", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("TinyURL: unexcepted behavior: {}".format(ex)) def _gd(self, url): request = { 'format': 'json', 'url': url, 'logstats': 0 # we don't want use any statistics } headers = { 'User-Agent': self.useragent} try: result = self.session.post( url = self.apiurl + "create.php", headers = headers, data = request) response = result.json() if 'shorturl' in response: print("Short Link:\t{}".format(response['shorturl'])) else: PBinCLIError("{}: got error {} from API: {}".format( "is.gd" if self.api == 'isgd' else 'v.gd', response['errorcode'], response['errormessage'])) except Exception as ex: PBinCLIError("{}: unexcepted behavior: {}".format( "is.gd" if self.api == 'isgd' else 'v.gd', ex)) def _cuttly(self, url): request = { 'url': url, 'domain': 0 } try: result = self.session.post( url = "https://cutt.ly/scripts/shortenUrl.php", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("cutt.ly: unexcepted behavior: {}".format(ex)) def _custom(self, url): if self.apiurl is None: PBinCLIError("No short_url specified - link will not be shortened.") from urllib.parse import quote qUrl = quote(url, safe="") # urlencoded paste url rUrl = self.apiurl.replace("{{url}}", qUrl) try: result = self.session.get( url = rUrl) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("Shorter: unexcepted behavior: {}".format(ex))
def __init__(self, settings=None): self.server = settings['server'] self.headers = {'X-Requested-With': 'JSONHttpRequest'} self.session = _config_requests(settings, False) def post(self, request): result = self.session.post( url = self.server, headers = self.headers, data = request) try: return result.json() except ValueError: PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text)) def get(self, request): return self.session.get( url = self.server + "?" + request, headers = self.headers).json() def delete(self, request): # using try as workaround for versions < 1.3 due to we cant detect # if server used version 1.2, where auto-deletion is added try: result = self.session.post( url = self.server, headers = self.headers, data = request).json() except ValueError: # unable parse response as json because it can be empty (1.2), so simulate correct answer print("NOTICE: Received empty response. We interpret that as our paste has already been deleted.") from json import loads as json_loads result = json_loads('{"status":0}') if not result['status']: print("Paste successfully deleted!") elif result['status']: PBinCLIError("Something went wrong...\nError:\t\t{}".format(result['message'])) else: PBinCLIError("Something went wrong...\nError: Empty response.") def getVersion(self): result = self.session.get( url = self.server + '?jsonld=paste', headers = self.headers) try: jsonldSchema = result.json() return jsonldSchema['@context']['v']['@value'] \ if ('@context' in jsonldSchema and 'v' in jsonldSchema['@context'] and '@value' in jsonldSchema['@context']['v']) \ else 1 except ValueError: PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text)) def getServer(self): return self.server
identifier_body
api.py
import requests from requests import HTTPError from pbincli.utils import PBinCLIError def _config_requests(settings=None, shortener=False): if settings['no_insecure_warning']: from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) session = requests.Session() session.verify = not settings['no_check_certificate'] if settings['auth'] and not shortener: # do not leak PrivateBin authorization to shortener services if settings['auth'] == 'basic' and settings['auth_user'] and settings['auth_pass']: session.auth = (settings['auth_user'], settings['auth_pass']) elif settings['auth'] == 'custom' and settings['auth_custom']: from json import loads as json_loads auth = json_loads(settings['auth_custom']) session.headers.update(auth) else: PBinCLIError("Incorrect authorization configuration") if settings['proxy']: scheme = settings['proxy'].split('://')[0] if (scheme.startswith("socks")): session.proxies.update({ "http": settings['proxy'], "https": settings['proxy'] }) else: session.proxies.update({scheme: settings['proxy']}) return session class PrivateBin: def __init__(self, settings=None): self.server = settings['server'] self.headers = {'X-Requested-With': 'JSONHttpRequest'} self.session = _config_requests(settings, False) def post(self, request): result = self.session.post( url = self.server, headers = self.headers, data = request) try: return result.json() except ValueError: PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text)) def get(self, request): return self.session.get( url = self.server + "?" + request, headers = self.headers).json() def delete(self, request): # using try as workaround for versions < 1.3 due to we cant detect # if server used version 1.2, where auto-deletion is added try: result = self.session.post( url = self.server, headers = self.headers, data = request).json() except ValueError: # unable parse response as json because it can be empty (1.2), so simulate correct answer print("NOTICE: Received empty response. We interpret that as our paste has already been deleted.") from json import loads as json_loads result = json_loads('{"status":0}') if not result['status']: print("Paste successfully deleted!") elif result['status']: PBinCLIError("Something went wrong...\nError:\t\t{}".format(result['message'])) else: PBinCLIError("Something went wrong...\nError: Empty response.") def getVersion(self): result = self.session.get( url = self.server + '?jsonld=paste', headers = self.headers) try: jsonldSchema = result.json() return jsonldSchema['@context']['v']['@value'] \ if ('@context' in jsonldSchema and 'v' in jsonldSchema['@context'] and '@value' in jsonldSchema['@context']['v']) \ else 1 except ValueError: PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text)) def getServer(self): return self.server class Shortener: """Some parts of this class was taken from python-yourls (https://github.com/tflink/python-yourls/) library """ def __init__(self, settings=None): self.api = settings['short_api'] if self.api is None: PBinCLIError("Unable to activate link shortener without short_api.") # we checking which service is used, because some services doesn't require # any authentication, or have only one domain on which it working if self.api == 'yourls': self._yourls_init(settings) elif self.api == 'isgd' or self.api == 'vgd': self._gd_init() elif self.api == 'custom': self.apiurl = settings['short_url'] self.session = _config_requests(settings, True) def _yourls_init(self, settings): if not settings['short_url']: PBinCLIError("YOURLS: An API URL is required") # setting API URL apiurl = settings['short_url'] if apiurl.endswith('/yourls-api.php'): self.apiurl = apiurl elif apiurl.endswith('/'): self.apiurl = apiurl + 'yourls-api.php' else: PBinCLIError("YOURLS: Incorrect URL is provided.\n" + "It must contain full address to 'yourls-api.php' script (like https://example.com/yourls-api.php)\n" + "or just contain instance URL with '/' at the end (like https://example.com/)") # validating for required credentials if settings['short_user'] and settings['short_pass'] and settings['short_token'] is None: self.auth_args = {'username': settings['short_user'], 'password': settings['short_pass']} elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token']: self.auth_args = {'signature': settings['short_token']} elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token'] is None: self.auth_args = {} else: PBinCLIError("YOURLS: either username and password or token are required. Otherwise set to default (None)") def _gd_init(self): if self.api == 'isgd': self.apiurl = 'https://is.gd/' else: self.apiurl = 'https://v.gd/' self.useragent = 'Mozilla/5.0 (compatible; pbincli - https://github.com/r4sas/pbincli/)' def getlink(self, url): # that is api -> function mapper for running service-related function when getlink() used servicesList = { 'yourls': self._yourls, 'clckru': self._clckru, 'tinyurl': self._tinyurl, 'isgd': self._gd, 'vgd': self._gd, 'cuttly': self._cuttly, 'custom': self._custom } # run function selected by choosen API servicesList[self.api](url) def _yourls(self,url): request = {'action': 'shorturl', 'format': 'json', 'url': url} request.update(self.auth_args) result = self.session.post( url = self.apiurl, data = request) try: result.raise_for_status() except HTTPError: try: response = result.json() except ValueError: PBinCLIError("YOURLS: Unable parse response. Received (size = {}):\n{}".format(len(result.text), result.text)) else: PBinCLIError("YOURLS: Received error from API: {} with JSON {}".format(result, response)) else: response = result.json() if {'status', 'statusCode', 'message'} <= set(response.keys()): if response['status'] == 'fail': PBinCLIError("YOURLS: Received error from API: {}".format(response['message'])) if not 'shorturl' in response: PBinCLIError("YOURLS: Unknown error: {}".format(response['message'])) else: print("Short Link:\t{}".format(response['shorturl'])) else: PBinCLIError("YOURLS: No status, statusCode or message fields in response! Received:\n{}".format(response)) def _clckru(self, url): request = {'url': url} try: result = self.session.post( url = "https://clck.ru/--", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("clck.ru: unexcepted behavior: {}".format(ex)) def _tinyurl(self, url): request = {'url': url} try: result = self.session.post( url = "https://tinyurl.com/api-create.php", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("TinyURL: unexcepted behavior: {}".format(ex)) def _gd(self, url): request = { 'format': 'json', 'url': url, 'logstats': 0 # we don't want use any statistics } headers = { 'User-Agent': self.useragent} try: result = self.session.post( url = self.apiurl + "create.php", headers = headers, data = request) response = result.json() if 'shorturl' in response: print("Short Link:\t{}".format(response['shorturl'])) else: PBinCLIError("{}: got error {} from API: {}".format( "is.gd" if self.api == 'isgd' else 'v.gd', response['errorcode'], response['errormessage'])) except Exception as ex: PBinCLIError("{}: unexcepted behavior: {}".format( "is.gd" if self.api == 'isgd' else 'v.gd', ex)) def _cuttly(self, url): request = { 'url': url, 'domain': 0 } try: result = self.session.post( url = "https://cutt.ly/scripts/shortenUrl.php", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("cutt.ly: unexcepted behavior: {}".format(ex)) def _custom(self, url): if self.apiurl is None: PBinCLIError("No short_url specified - link will not be shortened.") from urllib.parse import quote qUrl = quote(url, safe="") # urlencoded paste url rUrl = self.apiurl.replace("{{url}}", qUrl) try: result = self.session.get( url = rUrl)
except Exception as ex: PBinCLIError("Shorter: unexcepted behavior: {}".format(ex))
print("Short Link:\t{}".format(result.text))
random_line_split
api.py
import requests from requests import HTTPError from pbincli.utils import PBinCLIError def _config_requests(settings=None, shortener=False): if settings['no_insecure_warning']: from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) session = requests.Session() session.verify = not settings['no_check_certificate'] if settings['auth'] and not shortener: # do not leak PrivateBin authorization to shortener services if settings['auth'] == 'basic' and settings['auth_user'] and settings['auth_pass']: session.auth = (settings['auth_user'], settings['auth_pass']) elif settings['auth'] == 'custom' and settings['auth_custom']: from json import loads as json_loads auth = json_loads(settings['auth_custom']) session.headers.update(auth) else: PBinCLIError("Incorrect authorization configuration") if settings['proxy']: scheme = settings['proxy'].split('://')[0] if (scheme.startswith("socks")): session.proxies.update({ "http": settings['proxy'], "https": settings['proxy'] }) else: session.proxies.update({scheme: settings['proxy']}) return session class PrivateBin: def __init__(self, settings=None): self.server = settings['server'] self.headers = {'X-Requested-With': 'JSONHttpRequest'} self.session = _config_requests(settings, False) def post(self, request): result = self.session.post( url = self.server, headers = self.headers, data = request) try: return result.json() except ValueError: PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text)) def get(self, request): return self.session.get( url = self.server + "?" + request, headers = self.headers).json() def delete(self, request): # using try as workaround for versions < 1.3 due to we cant detect # if server used version 1.2, where auto-deletion is added try: result = self.session.post( url = self.server, headers = self.headers, data = request).json() except ValueError: # unable parse response as json because it can be empty (1.2), so simulate correct answer print("NOTICE: Received empty response. We interpret that as our paste has already been deleted.") from json import loads as json_loads result = json_loads('{"status":0}') if not result['status']: print("Paste successfully deleted!") elif result['status']: PBinCLIError("Something went wrong...\nError:\t\t{}".format(result['message'])) else:
def getVersion(self): result = self.session.get( url = self.server + '?jsonld=paste', headers = self.headers) try: jsonldSchema = result.json() return jsonldSchema['@context']['v']['@value'] \ if ('@context' in jsonldSchema and 'v' in jsonldSchema['@context'] and '@value' in jsonldSchema['@context']['v']) \ else 1 except ValueError: PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text)) def getServer(self): return self.server class Shortener: """Some parts of this class was taken from python-yourls (https://github.com/tflink/python-yourls/) library """ def __init__(self, settings=None): self.api = settings['short_api'] if self.api is None: PBinCLIError("Unable to activate link shortener without short_api.") # we checking which service is used, because some services doesn't require # any authentication, or have only one domain on which it working if self.api == 'yourls': self._yourls_init(settings) elif self.api == 'isgd' or self.api == 'vgd': self._gd_init() elif self.api == 'custom': self.apiurl = settings['short_url'] self.session = _config_requests(settings, True) def _yourls_init(self, settings): if not settings['short_url']: PBinCLIError("YOURLS: An API URL is required") # setting API URL apiurl = settings['short_url'] if apiurl.endswith('/yourls-api.php'): self.apiurl = apiurl elif apiurl.endswith('/'): self.apiurl = apiurl + 'yourls-api.php' else: PBinCLIError("YOURLS: Incorrect URL is provided.\n" + "It must contain full address to 'yourls-api.php' script (like https://example.com/yourls-api.php)\n" + "or just contain instance URL with '/' at the end (like https://example.com/)") # validating for required credentials if settings['short_user'] and settings['short_pass'] and settings['short_token'] is None: self.auth_args = {'username': settings['short_user'], 'password': settings['short_pass']} elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token']: self.auth_args = {'signature': settings['short_token']} elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token'] is None: self.auth_args = {} else: PBinCLIError("YOURLS: either username and password or token are required. Otherwise set to default (None)") def _gd_init(self): if self.api == 'isgd': self.apiurl = 'https://is.gd/' else: self.apiurl = 'https://v.gd/' self.useragent = 'Mozilla/5.0 (compatible; pbincli - https://github.com/r4sas/pbincli/)' def getlink(self, url): # that is api -> function mapper for running service-related function when getlink() used servicesList = { 'yourls': self._yourls, 'clckru': self._clckru, 'tinyurl': self._tinyurl, 'isgd': self._gd, 'vgd': self._gd, 'cuttly': self._cuttly, 'custom': self._custom } # run function selected by choosen API servicesList[self.api](url) def _yourls(self,url): request = {'action': 'shorturl', 'format': 'json', 'url': url} request.update(self.auth_args) result = self.session.post( url = self.apiurl, data = request) try: result.raise_for_status() except HTTPError: try: response = result.json() except ValueError: PBinCLIError("YOURLS: Unable parse response. Received (size = {}):\n{}".format(len(result.text), result.text)) else: PBinCLIError("YOURLS: Received error from API: {} with JSON {}".format(result, response)) else: response = result.json() if {'status', 'statusCode', 'message'} <= set(response.keys()): if response['status'] == 'fail': PBinCLIError("YOURLS: Received error from API: {}".format(response['message'])) if not 'shorturl' in response: PBinCLIError("YOURLS: Unknown error: {}".format(response['message'])) else: print("Short Link:\t{}".format(response['shorturl'])) else: PBinCLIError("YOURLS: No status, statusCode or message fields in response! Received:\n{}".format(response)) def _clckru(self, url): request = {'url': url} try: result = self.session.post( url = "https://clck.ru/--", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("clck.ru: unexcepted behavior: {}".format(ex)) def _tinyurl(self, url): request = {'url': url} try: result = self.session.post( url = "https://tinyurl.com/api-create.php", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("TinyURL: unexcepted behavior: {}".format(ex)) def _gd(self, url): request = { 'format': 'json', 'url': url, 'logstats': 0 # we don't want use any statistics } headers = { 'User-Agent': self.useragent} try: result = self.session.post( url = self.apiurl + "create.php", headers = headers, data = request) response = result.json() if 'shorturl' in response: print("Short Link:\t{}".format(response['shorturl'])) else: PBinCLIError("{}: got error {} from API: {}".format( "is.gd" if self.api == 'isgd' else 'v.gd', response['errorcode'], response['errormessage'])) except Exception as ex: PBinCLIError("{}: unexcepted behavior: {}".format( "is.gd" if self.api == 'isgd' else 'v.gd', ex)) def _cuttly(self, url): request = { 'url': url, 'domain': 0 } try: result = self.session.post( url = "https://cutt.ly/scripts/shortenUrl.php", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("cutt.ly: unexcepted behavior: {}".format(ex)) def _custom(self, url): if self.apiurl is None: PBinCLIError("No short_url specified - link will not be shortened.") from urllib.parse import quote qUrl = quote(url, safe="") # urlencoded paste url rUrl = self.apiurl.replace("{{url}}", qUrl) try: result = self.session.get( url = rUrl) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("Shorter: unexcepted behavior: {}".format(ex))
PBinCLIError("Something went wrong...\nError: Empty response.")
conditional_block
api.py
import requests from requests import HTTPError from pbincli.utils import PBinCLIError def _config_requests(settings=None, shortener=False): if settings['no_insecure_warning']: from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) session = requests.Session() session.verify = not settings['no_check_certificate'] if settings['auth'] and not shortener: # do not leak PrivateBin authorization to shortener services if settings['auth'] == 'basic' and settings['auth_user'] and settings['auth_pass']: session.auth = (settings['auth_user'], settings['auth_pass']) elif settings['auth'] == 'custom' and settings['auth_custom']: from json import loads as json_loads auth = json_loads(settings['auth_custom']) session.headers.update(auth) else: PBinCLIError("Incorrect authorization configuration") if settings['proxy']: scheme = settings['proxy'].split('://')[0] if (scheme.startswith("socks")): session.proxies.update({ "http": settings['proxy'], "https": settings['proxy'] }) else: session.proxies.update({scheme: settings['proxy']}) return session class PrivateBin: def __init__(self, settings=None): self.server = settings['server'] self.headers = {'X-Requested-With': 'JSONHttpRequest'} self.session = _config_requests(settings, False) def post(self, request): result = self.session.post( url = self.server, headers = self.headers, data = request) try: return result.json() except ValueError: PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text)) def get(self, request): return self.session.get( url = self.server + "?" + request, headers = self.headers).json() def delete(self, request): # using try as workaround for versions < 1.3 due to we cant detect # if server used version 1.2, where auto-deletion is added try: result = self.session.post( url = self.server, headers = self.headers, data = request).json() except ValueError: # unable parse response as json because it can be empty (1.2), so simulate correct answer print("NOTICE: Received empty response. We interpret that as our paste has already been deleted.") from json import loads as json_loads result = json_loads('{"status":0}') if not result['status']: print("Paste successfully deleted!") elif result['status']: PBinCLIError("Something went wrong...\nError:\t\t{}".format(result['message'])) else: PBinCLIError("Something went wrong...\nError: Empty response.") def getVersion(self): result = self.session.get( url = self.server + '?jsonld=paste', headers = self.headers) try: jsonldSchema = result.json() return jsonldSchema['@context']['v']['@value'] \ if ('@context' in jsonldSchema and 'v' in jsonldSchema['@context'] and '@value' in jsonldSchema['@context']['v']) \ else 1 except ValueError: PBinCLIError("Unable parse response as json. Received (size = {}):\n{}".format(len(result.text), result.text)) def getServer(self): return self.server class Shortener: """Some parts of this class was taken from python-yourls (https://github.com/tflink/python-yourls/) library """ def __init__(self, settings=None): self.api = settings['short_api'] if self.api is None: PBinCLIError("Unable to activate link shortener without short_api.") # we checking which service is used, because some services doesn't require # any authentication, or have only one domain on which it working if self.api == 'yourls': self._yourls_init(settings) elif self.api == 'isgd' or self.api == 'vgd': self._gd_init() elif self.api == 'custom': self.apiurl = settings['short_url'] self.session = _config_requests(settings, True) def _yourls_init(self, settings): if not settings['short_url']: PBinCLIError("YOURLS: An API URL is required") # setting API URL apiurl = settings['short_url'] if apiurl.endswith('/yourls-api.php'): self.apiurl = apiurl elif apiurl.endswith('/'): self.apiurl = apiurl + 'yourls-api.php' else: PBinCLIError("YOURLS: Incorrect URL is provided.\n" + "It must contain full address to 'yourls-api.php' script (like https://example.com/yourls-api.php)\n" + "or just contain instance URL with '/' at the end (like https://example.com/)") # validating for required credentials if settings['short_user'] and settings['short_pass'] and settings['short_token'] is None: self.auth_args = {'username': settings['short_user'], 'password': settings['short_pass']} elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token']: self.auth_args = {'signature': settings['short_token']} elif settings['short_user'] is None and settings['short_pass'] is None and settings['short_token'] is None: self.auth_args = {} else: PBinCLIError("YOURLS: either username and password or token are required. Otherwise set to default (None)") def _gd_init(self): if self.api == 'isgd': self.apiurl = 'https://is.gd/' else: self.apiurl = 'https://v.gd/' self.useragent = 'Mozilla/5.0 (compatible; pbincli - https://github.com/r4sas/pbincli/)' def getlink(self, url): # that is api -> function mapper for running service-related function when getlink() used servicesList = { 'yourls': self._yourls, 'clckru': self._clckru, 'tinyurl': self._tinyurl, 'isgd': self._gd, 'vgd': self._gd, 'cuttly': self._cuttly, 'custom': self._custom } # run function selected by choosen API servicesList[self.api](url) def _yourls(self,url): request = {'action': 'shorturl', 'format': 'json', 'url': url} request.update(self.auth_args) result = self.session.post( url = self.apiurl, data = request) try: result.raise_for_status() except HTTPError: try: response = result.json() except ValueError: PBinCLIError("YOURLS: Unable parse response. Received (size = {}):\n{}".format(len(result.text), result.text)) else: PBinCLIError("YOURLS: Received error from API: {} with JSON {}".format(result, response)) else: response = result.json() if {'status', 'statusCode', 'message'} <= set(response.keys()): if response['status'] == 'fail': PBinCLIError("YOURLS: Received error from API: {}".format(response['message'])) if not 'shorturl' in response: PBinCLIError("YOURLS: Unknown error: {}".format(response['message'])) else: print("Short Link:\t{}".format(response['shorturl'])) else: PBinCLIError("YOURLS: No status, statusCode or message fields in response! Received:\n{}".format(response)) def
(self, url): request = {'url': url} try: result = self.session.post( url = "https://clck.ru/--", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("clck.ru: unexcepted behavior: {}".format(ex)) def _tinyurl(self, url): request = {'url': url} try: result = self.session.post( url = "https://tinyurl.com/api-create.php", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("TinyURL: unexcepted behavior: {}".format(ex)) def _gd(self, url): request = { 'format': 'json', 'url': url, 'logstats': 0 # we don't want use any statistics } headers = { 'User-Agent': self.useragent} try: result = self.session.post( url = self.apiurl + "create.php", headers = headers, data = request) response = result.json() if 'shorturl' in response: print("Short Link:\t{}".format(response['shorturl'])) else: PBinCLIError("{}: got error {} from API: {}".format( "is.gd" if self.api == 'isgd' else 'v.gd', response['errorcode'], response['errormessage'])) except Exception as ex: PBinCLIError("{}: unexcepted behavior: {}".format( "is.gd" if self.api == 'isgd' else 'v.gd', ex)) def _cuttly(self, url): request = { 'url': url, 'domain': 0 } try: result = self.session.post( url = "https://cutt.ly/scripts/shortenUrl.php", data = request) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("cutt.ly: unexcepted behavior: {}".format(ex)) def _custom(self, url): if self.apiurl is None: PBinCLIError("No short_url specified - link will not be shortened.") from urllib.parse import quote qUrl = quote(url, safe="") # urlencoded paste url rUrl = self.apiurl.replace("{{url}}", qUrl) try: result = self.session.get( url = rUrl) print("Short Link:\t{}".format(result.text)) except Exception as ex: PBinCLIError("Shorter: unexcepted behavior: {}".format(ex))
_clckru
identifier_name
txn_process.go
// Copyright (c) 2018 Cisco and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kvscheduler import ( "reflect" "time" "github.com/gogo/protobuf/proto" "github.com/ligato/cn-infra/logging" kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" ) // preProcessedTxn appends un-marshalled (or filtered retry) values to a queued // transaction and sets the sequence number. type preProcessedTxn struct { seqNum uint64 values []kvForTxn args *queuedTxn } // kvForTxn represents a new value for a given key to be applied in a transaction. type kvForTxn struct { key string value proto.Message metadata kvs.Metadata origin kvs.ValueOrigin isRevert bool } // consumeTransactions pulls the oldest queued transaction and starts the processing. func (s *Scheduler) consumeTransactions() { defer s.wg.Done() for { txn, canceled := s.dequeueTxn() if canceled { return } s.processTransaction(txn) } } // processTransaction processes transaction in 6 steps: // 1. Pre-processing: transaction parameters are initialized, retry operations // are filtered from the obsolete ones and for the resync the graph is refreshed // 2. Simulation (skipped for SB notification): simulating transaction without // actually executing any of the Add/Delete/Modify/Update operations in order // to obtain the "execution plan" // 3. Pre-recording: logging transaction arguments + plan before execution to // persist some information in case there is a crash during execution // 4. Execution: executing the transaction, collecting errors // 5. Recording: recording the finalized transaction (log + in-memory) // 6. Post-processing: scheduling retry for failed operations, propagating errors // to the subscribers and to the caller of blocking commit func (s *Scheduler) processTransaction(qTxn *queuedTxn) { var ( simulatedOps kvs.RecordedTxnOps executedOps kvs.RecordedTxnOps failed map[string]bool startTime time.Time stopTime time.Time ) s.txnLock.Lock() defer s.txnLock.Unlock() // 1. Pre-processing: startTime = time.Now() txn, preErrors := s.preProcessTransaction(qTxn) eligibleForExec := len(txn.values) > 0 && len(preErrors) == 0 // 2. Ordering: txn.values = s.orderValuesByOp(txn.values) // 3. Simulation: if eligibleForExec { simulatedOps, _ = s.executeTransaction(txn, true) } // 4. Pre-recording preTxnRecord := s.preRecordTransaction(txn, simulatedOps, preErrors) // 5. Execution: if eligibleForExec { executedOps, failed = s.executeTransaction(txn, false) } stopTime = time.Now() // 6. Recording: s.recordTransaction(preTxnRecord, executedOps, startTime, stopTime) // 7. Post-processing: s.postProcessTransaction(txn, executedOps, failed, preErrors) } // preProcessTransaction initializes transaction parameters, filters obsolete retry // operations and refreshes the graph for resync. func (s *Scheduler) preProcessTransaction(qTxn *queuedTxn) (txn *preProcessedTxn, errors []kvs.KeyWithError) { // allocate new transaction sequence number preTxn := &preProcessedTxn{seqNum: s.txnSeqNumber, args: qTxn} s.txnSeqNumber++ switch qTxn.txnType { case kvs.SBNotification: s.preProcessNotification(qTxn, preTxn) case kvs.NBTransaction: errors = s.preProcessNBTransaction(qTxn, preTxn) case kvs.RetryFailedOps: s.preProcessRetryTxn(qTxn, preTxn) } return preTxn, errors } // preProcessNotification filters out non-valid SB notification. func (s *Scheduler) preProcessNotification(qTxn *queuedTxn, preTxn *preProcessedTxn) { graphR := s.graph.Read() defer graphR.Release() if !s.validTxnValue(graphR, qTxn.sb.value.Key, qTxn.sb.value.Value, kvs.FromSB, preTxn.seqNum) { return } preTxn.values = append(preTxn.values, kvForTxn{ key: qTxn.sb.value.Key, value: qTxn.sb.value.Value, metadata: qTxn.sb.metadata, origin: kvs.FromSB, }) } // preProcessNBTransaction unmarshalls transaction values and for resync also refreshes the graph. func (s *Scheduler) preProcessNBTransaction(qTxn *queuedTxn, preTxn *preProcessedTxn) (errors []kvs.KeyWithError) { // unmarshall all values graphR := s.graph.Read() for key, lazyValue := range qTxn.nb.value { descriptor := s.registry.GetDescriptorForKey(key) if descriptor == nil { // unimplemented base value errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: kvs.ErrUnimplementedKey}) continue } var value proto.Message if lazyValue != nil { // create an instance of the target proto.Message type valueType := proto.MessageType(descriptor.ValueTypeName) if valueType == nil { errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: kvs.ErrUnregisteredValueType}) continue } value = reflect.New(valueType.Elem()).Interface().(proto.Message) // try to deserialize the value err := lazyValue.GetValue(value) if err != nil { errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: err}) continue } } if !s.validTxnValue(graphR, key, value, kvs.FromNB, preTxn.seqNum) { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: key, value: value, origin: kvs.FromNB, }) } graphR.Release() // for resync refresh the graph + collect deletes if len(errors) == 0 && qTxn.nb.resyncType != kvs.NotResync { graphW := s.graph.Write(false) defer graphW.Release() defer graphW.Save() s.resyncCount++ if qTxn.nb.resyncType == kvs.DownstreamResync { // for downstream resync it is assumed that scheduler is in-sync with NB currentNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromNB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range currentNodes { lastChange := getNodeLastChange(node) preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: lastChange.value, origin: kvs.FromNB, isRevert: lastChange.revert, }) } } // build the set of keys currently in NB nbKeys := utils.NewMapBasedKeySet() for _, kv := range preTxn.values { nbKeys.Add(kv.key) } // unless this is only UpstreamResync, refresh the graph with the current // state of SB if qTxn.nb.resyncType != kvs.UpstreamResync { s.refreshGraph(graphW, nil, &resyncData{ first: s.resyncCount == 1, values: preTxn.values, verbose: qTxn.nb.verboseRefresh}) } // collect deletes for obsolete values currentNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromNB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range currentNodes { if nbKey := nbKeys.Has(node.GetKey()); nbKey { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: nil, // remove origin: kvs.FromNB, }) } // update (record) SB values sbNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromSB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range sbNodes { if nbKey := nbKeys.Has(node.GetKey()); nbKey { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: node.GetValue(), origin: kvs.FromSB, }) } } return errors } // preProcessRetryTxn filters out obsolete retry operations. func (s *Scheduler) preProcessRetryTxn(qTxn *queuedTxn, preTxn *preProcessedTxn) { graphR := s.graph.Read() defer graphR.Release() for _, key := range qTxn.retry.keys.Iterate() { node := graphR.GetNode(key) if node == nil { continue } lastChange := getNodeLastChange(node) if lastChange.txnSeqNum > qTxn.retry.txnSeqNum { // obsolete retry, the value has been changed since the failure continue } preTxn.values = append(preTxn.values, kvForTxn{ key: key, value: lastChange.value, origin: lastChange.origin, // FromNB isRevert: lastChange.revert, }) } } // postProcessTransaction schedules retry for failed operations and propagates // errors to the subscribers and to the caller of a blocking commit. func (s *Scheduler) postProcessTransaction(txn *preProcessedTxn, executed kvs.RecordedTxnOps, failed map[string]bool, preErrors []kvs.KeyWithError) { // refresh base values with error or with a derived value that has an error if len(failed) > 0 { graphW := s.graph.Write(false) toRefresh := utils.NewMapBasedKeySet() for key := range failed { toRefresh.Add(key) } s.refreshGraph(graphW, toRefresh, nil) graphW.Save() // split failed values based on transactions that performed the last change retryTxns := make(map[uint64]*retryOps) for retryKey, retriable := range failed { if !retriable { continue } node := graphW.GetNode(retryKey) if node == nil { // delete returned error, but refresh showed that it is not in SB anymore anyway continue } lastChange := getNodeLastChange(node) seqNum := lastChange.txnSeqNum if lastChange.retryEnabled { if _, has := retryTxns[seqNum]; !has { period := lastChange.retryPeriod if seqNum == txn.seqNum && txn.args.txnType == kvs.RetryFailedOps && lastChange.retryExpBackoff { period = txn.args.retry.period * 2 } retryTxns[seqNum] = &retryOps{ txnSeqNum: seqNum, period: period, keys: utils.NewMapBasedKeySet(), } } retryTxns[seqNum].keys.Add(retryKey) } } // schedule a series of re-try transactions for failed values for _, retryTxn := range retryTxns { s.enqueueRetry(retryTxn) } graphW.Release() } // collect errors var txnErrors []kvs.KeyWithError txnErrors = append(txnErrors, preErrors...) for _, txnOp := range executed { if txnOp.PrevErr == nil && txnOp.NewErr == nil { continue } txnErrors = append(txnErrors, kvs.KeyWithError{ Key: txnOp.Key, TxnOperation: txnOp.Operation, Error: txnOp.NewErr, }) } // for blocking txn, send non-nil errors to the resultChan if txn.args.txnType == kvs.NBTransaction && txn.args.nb.isBlocking { var ( errors []kvs.KeyWithError txnErr error ) for _, kvWithError := range txnErrors { if kvWithError.Error != nil { errors = append(errors, kvWithError) } } if len(errors) > 0 { txnErr = kvs.NewTransactionError(nil, errors) } select { case txn.args.nb.resultChan <- txnResult{txnSeqNum: txn.seqNum, err: txnErr}: default: s.Log.WithField("txnSeq", txn.seqNum). Warn("Failed to deliver transaction result to the caller") } } // send errors to the subscribers for _, errSub := range s.errorSubs { for _, kvWithError := range txnErrors { if errSub.selector == nil || errSub.selector(kvWithError.Key) { select { case errSub.channel <- kvWithError: default: s.Log.WithField("txnSeq", txn.seqNum). Warn("Failed to deliver transaction error to a subscriber") } } } } } // validTxnValue checks validity of a kv-pair to be applied in a transaction. func (s *Scheduler) validTxnValue(graphR graph.ReadAccess, key string, value proto.Message, origin kvs.ValueOrigin, txnSeqNum uint64) bool { if key == "" { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, }).Warn("Empty key for a value in the transaction") return false } if origin == kvs.FromSB { descriptor := s.registry.GetDescriptorForKey(key) if descriptor == nil { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Debug("Ignoring unimplemented notification") return false } } node := graphR.GetNode(key) if node != nil { if isNodeDerived(node)
if origin == kvs.FromSB && getNodeOrigin(node) == kvs.FromNB { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Debug("Ignoring notification for a NB-managed value") return false } } return true }
{ s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Warn("Transaction attempting to change a derived value") return false }
conditional_block
txn_process.go
// Copyright (c) 2018 Cisco and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kvscheduler import ( "reflect" "time" "github.com/gogo/protobuf/proto" "github.com/ligato/cn-infra/logging" kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" ) // preProcessedTxn appends un-marshalled (or filtered retry) values to a queued // transaction and sets the sequence number. type preProcessedTxn struct { seqNum uint64 values []kvForTxn args *queuedTxn } // kvForTxn represents a new value for a given key to be applied in a transaction. type kvForTxn struct { key string value proto.Message metadata kvs.Metadata origin kvs.ValueOrigin isRevert bool } // consumeTransactions pulls the oldest queued transaction and starts the processing. func (s *Scheduler) consumeTransactions() { defer s.wg.Done() for { txn, canceled := s.dequeueTxn() if canceled { return } s.processTransaction(txn) } } // processTransaction processes transaction in 6 steps: // 1. Pre-processing: transaction parameters are initialized, retry operations // are filtered from the obsolete ones and for the resync the graph is refreshed // 2. Simulation (skipped for SB notification): simulating transaction without // actually executing any of the Add/Delete/Modify/Update operations in order // to obtain the "execution plan" // 3. Pre-recording: logging transaction arguments + plan before execution to // persist some information in case there is a crash during execution // 4. Execution: executing the transaction, collecting errors // 5. Recording: recording the finalized transaction (log + in-memory) // 6. Post-processing: scheduling retry for failed operations, propagating errors // to the subscribers and to the caller of blocking commit func (s *Scheduler) processTransaction(qTxn *queuedTxn) { var ( simulatedOps kvs.RecordedTxnOps executedOps kvs.RecordedTxnOps failed map[string]bool startTime time.Time stopTime time.Time ) s.txnLock.Lock() defer s.txnLock.Unlock() // 1. Pre-processing: startTime = time.Now() txn, preErrors := s.preProcessTransaction(qTxn) eligibleForExec := len(txn.values) > 0 && len(preErrors) == 0 // 2. Ordering: txn.values = s.orderValuesByOp(txn.values) // 3. Simulation: if eligibleForExec { simulatedOps, _ = s.executeTransaction(txn, true) } // 4. Pre-recording preTxnRecord := s.preRecordTransaction(txn, simulatedOps, preErrors) // 5. Execution: if eligibleForExec { executedOps, failed = s.executeTransaction(txn, false) } stopTime = time.Now() // 6. Recording: s.recordTransaction(preTxnRecord, executedOps, startTime, stopTime) // 7. Post-processing: s.postProcessTransaction(txn, executedOps, failed, preErrors) } // preProcessTransaction initializes transaction parameters, filters obsolete retry // operations and refreshes the graph for resync. func (s *Scheduler) preProcessTransaction(qTxn *queuedTxn) (txn *preProcessedTxn, errors []kvs.KeyWithError) { // allocate new transaction sequence number preTxn := &preProcessedTxn{seqNum: s.txnSeqNumber, args: qTxn} s.txnSeqNumber++ switch qTxn.txnType { case kvs.SBNotification: s.preProcessNotification(qTxn, preTxn) case kvs.NBTransaction: errors = s.preProcessNBTransaction(qTxn, preTxn) case kvs.RetryFailedOps: s.preProcessRetryTxn(qTxn, preTxn) } return preTxn, errors } // preProcessNotification filters out non-valid SB notification. func (s *Scheduler) preProcessNotification(qTxn *queuedTxn, preTxn *preProcessedTxn) { graphR := s.graph.Read() defer graphR.Release() if !s.validTxnValue(graphR, qTxn.sb.value.Key, qTxn.sb.value.Value, kvs.FromSB, preTxn.seqNum) { return } preTxn.values = append(preTxn.values, kvForTxn{ key: qTxn.sb.value.Key, value: qTxn.sb.value.Value, metadata: qTxn.sb.metadata, origin: kvs.FromSB, }) } // preProcessNBTransaction unmarshalls transaction values and for resync also refreshes the graph. func (s *Scheduler) preProcessNBTransaction(qTxn *queuedTxn, preTxn *preProcessedTxn) (errors []kvs.KeyWithError) { // unmarshall all values graphR := s.graph.Read() for key, lazyValue := range qTxn.nb.value { descriptor := s.registry.GetDescriptorForKey(key) if descriptor == nil { // unimplemented base value errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: kvs.ErrUnimplementedKey}) continue } var value proto.Message if lazyValue != nil { // create an instance of the target proto.Message type valueType := proto.MessageType(descriptor.ValueTypeName) if valueType == nil { errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: kvs.ErrUnregisteredValueType}) continue } value = reflect.New(valueType.Elem()).Interface().(proto.Message) // try to deserialize the value err := lazyValue.GetValue(value) if err != nil { errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: err}) continue } } if !s.validTxnValue(graphR, key, value, kvs.FromNB, preTxn.seqNum) { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: key, value: value, origin: kvs.FromNB, }) } graphR.Release() // for resync refresh the graph + collect deletes if len(errors) == 0 && qTxn.nb.resyncType != kvs.NotResync { graphW := s.graph.Write(false) defer graphW.Release() defer graphW.Save() s.resyncCount++ if qTxn.nb.resyncType == kvs.DownstreamResync { // for downstream resync it is assumed that scheduler is in-sync with NB currentNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromNB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range currentNodes { lastChange := getNodeLastChange(node) preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: lastChange.value, origin: kvs.FromNB, isRevert: lastChange.revert, }) } } // build the set of keys currently in NB nbKeys := utils.NewMapBasedKeySet() for _, kv := range preTxn.values { nbKeys.Add(kv.key) } // unless this is only UpstreamResync, refresh the graph with the current // state of SB if qTxn.nb.resyncType != kvs.UpstreamResync { s.refreshGraph(graphW, nil, &resyncData{ first: s.resyncCount == 1, values: preTxn.values, verbose: qTxn.nb.verboseRefresh}) } // collect deletes for obsolete values currentNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromNB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range currentNodes { if nbKey := nbKeys.Has(node.GetKey()); nbKey { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: nil, // remove origin: kvs.FromNB, }) } // update (record) SB values sbNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromSB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range sbNodes { if nbKey := nbKeys.Has(node.GetKey()); nbKey { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: node.GetValue(), origin: kvs.FromSB, }) } } return errors } // preProcessRetryTxn filters out obsolete retry operations. func (s *Scheduler) preProcessRetryTxn(qTxn *queuedTxn, preTxn *preProcessedTxn)
// postProcessTransaction schedules retry for failed operations and propagates // errors to the subscribers and to the caller of a blocking commit. func (s *Scheduler) postProcessTransaction(txn *preProcessedTxn, executed kvs.RecordedTxnOps, failed map[string]bool, preErrors []kvs.KeyWithError) { // refresh base values with error or with a derived value that has an error if len(failed) > 0 { graphW := s.graph.Write(false) toRefresh := utils.NewMapBasedKeySet() for key := range failed { toRefresh.Add(key) } s.refreshGraph(graphW, toRefresh, nil) graphW.Save() // split failed values based on transactions that performed the last change retryTxns := make(map[uint64]*retryOps) for retryKey, retriable := range failed { if !retriable { continue } node := graphW.GetNode(retryKey) if node == nil { // delete returned error, but refresh showed that it is not in SB anymore anyway continue } lastChange := getNodeLastChange(node) seqNum := lastChange.txnSeqNum if lastChange.retryEnabled { if _, has := retryTxns[seqNum]; !has { period := lastChange.retryPeriod if seqNum == txn.seqNum && txn.args.txnType == kvs.RetryFailedOps && lastChange.retryExpBackoff { period = txn.args.retry.period * 2 } retryTxns[seqNum] = &retryOps{ txnSeqNum: seqNum, period: period, keys: utils.NewMapBasedKeySet(), } } retryTxns[seqNum].keys.Add(retryKey) } } // schedule a series of re-try transactions for failed values for _, retryTxn := range retryTxns { s.enqueueRetry(retryTxn) } graphW.Release() } // collect errors var txnErrors []kvs.KeyWithError txnErrors = append(txnErrors, preErrors...) for _, txnOp := range executed { if txnOp.PrevErr == nil && txnOp.NewErr == nil { continue } txnErrors = append(txnErrors, kvs.KeyWithError{ Key: txnOp.Key, TxnOperation: txnOp.Operation, Error: txnOp.NewErr, }) } // for blocking txn, send non-nil errors to the resultChan if txn.args.txnType == kvs.NBTransaction && txn.args.nb.isBlocking { var ( errors []kvs.KeyWithError txnErr error ) for _, kvWithError := range txnErrors { if kvWithError.Error != nil { errors = append(errors, kvWithError) } } if len(errors) > 0 { txnErr = kvs.NewTransactionError(nil, errors) } select { case txn.args.nb.resultChan <- txnResult{txnSeqNum: txn.seqNum, err: txnErr}: default: s.Log.WithField("txnSeq", txn.seqNum). Warn("Failed to deliver transaction result to the caller") } } // send errors to the subscribers for _, errSub := range s.errorSubs { for _, kvWithError := range txnErrors { if errSub.selector == nil || errSub.selector(kvWithError.Key) { select { case errSub.channel <- kvWithError: default: s.Log.WithField("txnSeq", txn.seqNum). Warn("Failed to deliver transaction error to a subscriber") } } } } } // validTxnValue checks validity of a kv-pair to be applied in a transaction. func (s *Scheduler) validTxnValue(graphR graph.ReadAccess, key string, value proto.Message, origin kvs.ValueOrigin, txnSeqNum uint64) bool { if key == "" { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, }).Warn("Empty key for a value in the transaction") return false } if origin == kvs.FromSB { descriptor := s.registry.GetDescriptorForKey(key) if descriptor == nil { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Debug("Ignoring unimplemented notification") return false } } node := graphR.GetNode(key) if node != nil { if isNodeDerived(node) { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Warn("Transaction attempting to change a derived value") return false } if origin == kvs.FromSB && getNodeOrigin(node) == kvs.FromNB { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Debug("Ignoring notification for a NB-managed value") return false } } return true }
{ graphR := s.graph.Read() defer graphR.Release() for _, key := range qTxn.retry.keys.Iterate() { node := graphR.GetNode(key) if node == nil { continue } lastChange := getNodeLastChange(node) if lastChange.txnSeqNum > qTxn.retry.txnSeqNum { // obsolete retry, the value has been changed since the failure continue } preTxn.values = append(preTxn.values, kvForTxn{ key: key, value: lastChange.value, origin: lastChange.origin, // FromNB isRevert: lastChange.revert, }) } }
identifier_body
txn_process.go
// Copyright (c) 2018 Cisco and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kvscheduler import ( "reflect" "time" "github.com/gogo/protobuf/proto" "github.com/ligato/cn-infra/logging" kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" ) // preProcessedTxn appends un-marshalled (or filtered retry) values to a queued // transaction and sets the sequence number. type preProcessedTxn struct { seqNum uint64 values []kvForTxn args *queuedTxn } // kvForTxn represents a new value for a given key to be applied in a transaction. type kvForTxn struct { key string value proto.Message metadata kvs.Metadata origin kvs.ValueOrigin isRevert bool } // consumeTransactions pulls the oldest queued transaction and starts the processing. func (s *Scheduler) consumeTransactions() { defer s.wg.Done() for { txn, canceled := s.dequeueTxn() if canceled { return } s.processTransaction(txn) } } // processTransaction processes transaction in 6 steps: // 1. Pre-processing: transaction parameters are initialized, retry operations // are filtered from the obsolete ones and for the resync the graph is refreshed // 2. Simulation (skipped for SB notification): simulating transaction without // actually executing any of the Add/Delete/Modify/Update operations in order // to obtain the "execution plan" // 3. Pre-recording: logging transaction arguments + plan before execution to // persist some information in case there is a crash during execution // 4. Execution: executing the transaction, collecting errors // 5. Recording: recording the finalized transaction (log + in-memory) // 6. Post-processing: scheduling retry for failed operations, propagating errors // to the subscribers and to the caller of blocking commit func (s *Scheduler) processTransaction(qTxn *queuedTxn) { var ( simulatedOps kvs.RecordedTxnOps executedOps kvs.RecordedTxnOps failed map[string]bool startTime time.Time stopTime time.Time ) s.txnLock.Lock() defer s.txnLock.Unlock() // 1. Pre-processing: startTime = time.Now() txn, preErrors := s.preProcessTransaction(qTxn) eligibleForExec := len(txn.values) > 0 && len(preErrors) == 0 // 2. Ordering: txn.values = s.orderValuesByOp(txn.values) // 3. Simulation: if eligibleForExec { simulatedOps, _ = s.executeTransaction(txn, true) } // 4. Pre-recording preTxnRecord := s.preRecordTransaction(txn, simulatedOps, preErrors) // 5. Execution: if eligibleForExec { executedOps, failed = s.executeTransaction(txn, false) } stopTime = time.Now() // 6. Recording: s.recordTransaction(preTxnRecord, executedOps, startTime, stopTime) // 7. Post-processing: s.postProcessTransaction(txn, executedOps, failed, preErrors) } // preProcessTransaction initializes transaction parameters, filters obsolete retry // operations and refreshes the graph for resync. func (s *Scheduler) preProcessTransaction(qTxn *queuedTxn) (txn *preProcessedTxn, errors []kvs.KeyWithError) { // allocate new transaction sequence number preTxn := &preProcessedTxn{seqNum: s.txnSeqNumber, args: qTxn} s.txnSeqNumber++ switch qTxn.txnType { case kvs.SBNotification: s.preProcessNotification(qTxn, preTxn) case kvs.NBTransaction: errors = s.preProcessNBTransaction(qTxn, preTxn) case kvs.RetryFailedOps: s.preProcessRetryTxn(qTxn, preTxn) } return preTxn, errors } // preProcessNotification filters out non-valid SB notification. func (s *Scheduler) preProcessNotification(qTxn *queuedTxn, preTxn *preProcessedTxn) { graphR := s.graph.Read() defer graphR.Release() if !s.validTxnValue(graphR, qTxn.sb.value.Key, qTxn.sb.value.Value, kvs.FromSB, preTxn.seqNum) { return } preTxn.values = append(preTxn.values, kvForTxn{ key: qTxn.sb.value.Key, value: qTxn.sb.value.Value, metadata: qTxn.sb.metadata, origin: kvs.FromSB, }) } // preProcessNBTransaction unmarshalls transaction values and for resync also refreshes the graph. func (s *Scheduler) preProcessNBTransaction(qTxn *queuedTxn, preTxn *preProcessedTxn) (errors []kvs.KeyWithError) { // unmarshall all values graphR := s.graph.Read() for key, lazyValue := range qTxn.nb.value { descriptor := s.registry.GetDescriptorForKey(key) if descriptor == nil { // unimplemented base value errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: kvs.ErrUnimplementedKey}) continue } var value proto.Message if lazyValue != nil { // create an instance of the target proto.Message type valueType := proto.MessageType(descriptor.ValueTypeName) if valueType == nil { errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: kvs.ErrUnregisteredValueType}) continue } value = reflect.New(valueType.Elem()).Interface().(proto.Message) // try to deserialize the value err := lazyValue.GetValue(value) if err != nil { errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: err}) continue } } if !s.validTxnValue(graphR, key, value, kvs.FromNB, preTxn.seqNum) { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: key, value: value, origin: kvs.FromNB, }) } graphR.Release() // for resync refresh the graph + collect deletes if len(errors) == 0 && qTxn.nb.resyncType != kvs.NotResync { graphW := s.graph.Write(false) defer graphW.Release() defer graphW.Save() s.resyncCount++ if qTxn.nb.resyncType == kvs.DownstreamResync { // for downstream resync it is assumed that scheduler is in-sync with NB currentNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromNB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range currentNodes { lastChange := getNodeLastChange(node) preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: lastChange.value, origin: kvs.FromNB, isRevert: lastChange.revert, }) } } // build the set of keys currently in NB nbKeys := utils.NewMapBasedKeySet() for _, kv := range preTxn.values { nbKeys.Add(kv.key) } // unless this is only UpstreamResync, refresh the graph with the current // state of SB if qTxn.nb.resyncType != kvs.UpstreamResync { s.refreshGraph(graphW, nil, &resyncData{ first: s.resyncCount == 1, values: preTxn.values, verbose: qTxn.nb.verboseRefresh}) } // collect deletes for obsolete values currentNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromNB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range currentNodes { if nbKey := nbKeys.Has(node.GetKey()); nbKey { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: nil, // remove origin: kvs.FromNB, }) } // update (record) SB values sbNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromSB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range sbNodes { if nbKey := nbKeys.Has(node.GetKey()); nbKey { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: node.GetValue(), origin: kvs.FromSB, }) } } return errors } // preProcessRetryTxn filters out obsolete retry operations. func (s *Scheduler) preProcessRetryTxn(qTxn *queuedTxn, preTxn *preProcessedTxn) { graphR := s.graph.Read() defer graphR.Release() for _, key := range qTxn.retry.keys.Iterate() { node := graphR.GetNode(key) if node == nil { continue } lastChange := getNodeLastChange(node) if lastChange.txnSeqNum > qTxn.retry.txnSeqNum { // obsolete retry, the value has been changed since the failure continue } preTxn.values = append(preTxn.values, kvForTxn{ key: key, value: lastChange.value, origin: lastChange.origin, // FromNB isRevert: lastChange.revert, }) } } // postProcessTransaction schedules retry for failed operations and propagates // errors to the subscribers and to the caller of a blocking commit. func (s *Scheduler) postProcessTransaction(txn *preProcessedTxn, executed kvs.RecordedTxnOps, failed map[string]bool, preErrors []kvs.KeyWithError) { // refresh base values with error or with a derived value that has an error if len(failed) > 0 { graphW := s.graph.Write(false) toRefresh := utils.NewMapBasedKeySet() for key := range failed { toRefresh.Add(key) } s.refreshGraph(graphW, toRefresh, nil) graphW.Save() // split failed values based on transactions that performed the last change retryTxns := make(map[uint64]*retryOps) for retryKey, retriable := range failed { if !retriable { continue } node := graphW.GetNode(retryKey) if node == nil { // delete returned error, but refresh showed that it is not in SB anymore anyway continue } lastChange := getNodeLastChange(node) seqNum := lastChange.txnSeqNum if lastChange.retryEnabled { if _, has := retryTxns[seqNum]; !has { period := lastChange.retryPeriod if seqNum == txn.seqNum && txn.args.txnType == kvs.RetryFailedOps && lastChange.retryExpBackoff { period = txn.args.retry.period * 2 } retryTxns[seqNum] = &retryOps{ txnSeqNum: seqNum, period: period, keys: utils.NewMapBasedKeySet(), } } retryTxns[seqNum].keys.Add(retryKey) } } // schedule a series of re-try transactions for failed values for _, retryTxn := range retryTxns { s.enqueueRetry(retryTxn) } graphW.Release() } // collect errors var txnErrors []kvs.KeyWithError txnErrors = append(txnErrors, preErrors...) for _, txnOp := range executed { if txnOp.PrevErr == nil && txnOp.NewErr == nil { continue } txnErrors = append(txnErrors, kvs.KeyWithError{ Key: txnOp.Key, TxnOperation: txnOp.Operation, Error: txnOp.NewErr, }) } // for blocking txn, send non-nil errors to the resultChan if txn.args.txnType == kvs.NBTransaction && txn.args.nb.isBlocking { var ( errors []kvs.KeyWithError txnErr error ) for _, kvWithError := range txnErrors { if kvWithError.Error != nil { errors = append(errors, kvWithError) } } if len(errors) > 0 { txnErr = kvs.NewTransactionError(nil, errors) } select { case txn.args.nb.resultChan <- txnResult{txnSeqNum: txn.seqNum, err: txnErr}: default: s.Log.WithField("txnSeq", txn.seqNum). Warn("Failed to deliver transaction result to the caller") } } // send errors to the subscribers for _, errSub := range s.errorSubs { for _, kvWithError := range txnErrors { if errSub.selector == nil || errSub.selector(kvWithError.Key) { select { case errSub.channel <- kvWithError: default: s.Log.WithField("txnSeq", txn.seqNum). Warn("Failed to deliver transaction error to a subscriber") } } } } } // validTxnValue checks validity of a kv-pair to be applied in a transaction. func (s *Scheduler)
(graphR graph.ReadAccess, key string, value proto.Message, origin kvs.ValueOrigin, txnSeqNum uint64) bool { if key == "" { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, }).Warn("Empty key for a value in the transaction") return false } if origin == kvs.FromSB { descriptor := s.registry.GetDescriptorForKey(key) if descriptor == nil { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Debug("Ignoring unimplemented notification") return false } } node := graphR.GetNode(key) if node != nil { if isNodeDerived(node) { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Warn("Transaction attempting to change a derived value") return false } if origin == kvs.FromSB && getNodeOrigin(node) == kvs.FromNB { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Debug("Ignoring notification for a NB-managed value") return false } } return true }
validTxnValue
identifier_name
txn_process.go
// Copyright (c) 2018 Cisco and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kvscheduler import ( "reflect" "time" "github.com/gogo/protobuf/proto" "github.com/ligato/cn-infra/logging" kvs "github.com/ligato/vpp-agent/plugins/kvscheduler/api" "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/graph" "github.com/ligato/vpp-agent/plugins/kvscheduler/internal/utils" ) // preProcessedTxn appends un-marshalled (or filtered retry) values to a queued // transaction and sets the sequence number. type preProcessedTxn struct { seqNum uint64 values []kvForTxn args *queuedTxn } // kvForTxn represents a new value for a given key to be applied in a transaction. type kvForTxn struct { key string value proto.Message metadata kvs.Metadata origin kvs.ValueOrigin isRevert bool } // consumeTransactions pulls the oldest queued transaction and starts the processing. func (s *Scheduler) consumeTransactions() { defer s.wg.Done() for { txn, canceled := s.dequeueTxn() if canceled { return } s.processTransaction(txn) } } // processTransaction processes transaction in 6 steps: // 1. Pre-processing: transaction parameters are initialized, retry operations // are filtered from the obsolete ones and for the resync the graph is refreshed // 2. Simulation (skipped for SB notification): simulating transaction without // actually executing any of the Add/Delete/Modify/Update operations in order // to obtain the "execution plan" // 3. Pre-recording: logging transaction arguments + plan before execution to // persist some information in case there is a crash during execution // 4. Execution: executing the transaction, collecting errors // 5. Recording: recording the finalized transaction (log + in-memory) // 6. Post-processing: scheduling retry for failed operations, propagating errors // to the subscribers and to the caller of blocking commit func (s *Scheduler) processTransaction(qTxn *queuedTxn) { var ( simulatedOps kvs.RecordedTxnOps executedOps kvs.RecordedTxnOps failed map[string]bool startTime time.Time stopTime time.Time ) s.txnLock.Lock() defer s.txnLock.Unlock() // 1. Pre-processing: startTime = time.Now() txn, preErrors := s.preProcessTransaction(qTxn) eligibleForExec := len(txn.values) > 0 && len(preErrors) == 0 // 2. Ordering: txn.values = s.orderValuesByOp(txn.values) // 3. Simulation: if eligibleForExec { simulatedOps, _ = s.executeTransaction(txn, true) } // 4. Pre-recording preTxnRecord := s.preRecordTransaction(txn, simulatedOps, preErrors) // 5. Execution: if eligibleForExec { executedOps, failed = s.executeTransaction(txn, false) } stopTime = time.Now() // 6. Recording: s.recordTransaction(preTxnRecord, executedOps, startTime, stopTime) // 7. Post-processing: s.postProcessTransaction(txn, executedOps, failed, preErrors) } // preProcessTransaction initializes transaction parameters, filters obsolete retry // operations and refreshes the graph for resync. func (s *Scheduler) preProcessTransaction(qTxn *queuedTxn) (txn *preProcessedTxn, errors []kvs.KeyWithError) { // allocate new transaction sequence number preTxn := &preProcessedTxn{seqNum: s.txnSeqNumber, args: qTxn} s.txnSeqNumber++ switch qTxn.txnType { case kvs.SBNotification: s.preProcessNotification(qTxn, preTxn) case kvs.NBTransaction: errors = s.preProcessNBTransaction(qTxn, preTxn) case kvs.RetryFailedOps: s.preProcessRetryTxn(qTxn, preTxn) } return preTxn, errors } // preProcessNotification filters out non-valid SB notification. func (s *Scheduler) preProcessNotification(qTxn *queuedTxn, preTxn *preProcessedTxn) { graphR := s.graph.Read() defer graphR.Release() if !s.validTxnValue(graphR, qTxn.sb.value.Key, qTxn.sb.value.Value, kvs.FromSB, preTxn.seqNum) { return } preTxn.values = append(preTxn.values, kvForTxn{ key: qTxn.sb.value.Key, value: qTxn.sb.value.Value, metadata: qTxn.sb.metadata, origin: kvs.FromSB, }) } // preProcessNBTransaction unmarshalls transaction values and for resync also refreshes the graph. func (s *Scheduler) preProcessNBTransaction(qTxn *queuedTxn, preTxn *preProcessedTxn) (errors []kvs.KeyWithError) { // unmarshall all values graphR := s.graph.Read() for key, lazyValue := range qTxn.nb.value { descriptor := s.registry.GetDescriptorForKey(key) if descriptor == nil { // unimplemented base value errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: kvs.ErrUnimplementedKey}) continue } var value proto.Message if lazyValue != nil { // create an instance of the target proto.Message type valueType := proto.MessageType(descriptor.ValueTypeName) if valueType == nil { errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: kvs.ErrUnregisteredValueType}) continue } value = reflect.New(valueType.Elem()).Interface().(proto.Message) // try to deserialize the value err := lazyValue.GetValue(value) if err != nil { errors = append(errors, kvs.KeyWithError{Key: key, TxnOperation: kvs.PreProcess, Error: err}) continue } } if !s.validTxnValue(graphR, key, value, kvs.FromNB, preTxn.seqNum) { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: key, value: value, origin: kvs.FromNB, }) } graphR.Release() // for resync refresh the graph + collect deletes if len(errors) == 0 && qTxn.nb.resyncType != kvs.NotResync { graphW := s.graph.Write(false) defer graphW.Release() defer graphW.Save() s.resyncCount++ if qTxn.nb.resyncType == kvs.DownstreamResync { // for downstream resync it is assumed that scheduler is in-sync with NB currentNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromNB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range currentNodes { lastChange := getNodeLastChange(node) preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: lastChange.value, origin: kvs.FromNB, isRevert: lastChange.revert, }) } } // build the set of keys currently in NB nbKeys := utils.NewMapBasedKeySet() for _, kv := range preTxn.values { nbKeys.Add(kv.key) } // unless this is only UpstreamResync, refresh the graph with the current // state of SB if qTxn.nb.resyncType != kvs.UpstreamResync { s.refreshGraph(graphW, nil, &resyncData{ first: s.resyncCount == 1, values: preTxn.values, verbose: qTxn.nb.verboseRefresh}) } // collect deletes for obsolete values currentNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromNB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range currentNodes { if nbKey := nbKeys.Has(node.GetKey()); nbKey { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: nil, // remove origin: kvs.FromNB, }) } // update (record) SB values sbNodes := graphW.GetNodes(nil, graph.WithFlags(&OriginFlag{kvs.FromSB}), graph.WithoutFlags(&DerivedFlag{})) for _, node := range sbNodes { if nbKey := nbKeys.Has(node.GetKey()); nbKey { continue } preTxn.values = append(preTxn.values, kvForTxn{ key: node.GetKey(), value: node.GetValue(), origin: kvs.FromSB, }) } } return errors } // preProcessRetryTxn filters out obsolete retry operations. func (s *Scheduler) preProcessRetryTxn(qTxn *queuedTxn, preTxn *preProcessedTxn) { graphR := s.graph.Read() defer graphR.Release() for _, key := range qTxn.retry.keys.Iterate() { node := graphR.GetNode(key) if node == nil { continue } lastChange := getNodeLastChange(node) if lastChange.txnSeqNum > qTxn.retry.txnSeqNum { // obsolete retry, the value has been changed since the failure continue } preTxn.values = append(preTxn.values, kvForTxn{ key: key, value: lastChange.value, origin: lastChange.origin, // FromNB isRevert: lastChange.revert, }) } } // postProcessTransaction schedules retry for failed operations and propagates // errors to the subscribers and to the caller of a blocking commit. func (s *Scheduler) postProcessTransaction(txn *preProcessedTxn, executed kvs.RecordedTxnOps, failed map[string]bool, preErrors []kvs.KeyWithError) { // refresh base values with error or with a derived value that has an error if len(failed) > 0 { graphW := s.graph.Write(false) toRefresh := utils.NewMapBasedKeySet() for key := range failed { toRefresh.Add(key) } s.refreshGraph(graphW, toRefresh, nil) graphW.Save() // split failed values based on transactions that performed the last change retryTxns := make(map[uint64]*retryOps) for retryKey, retriable := range failed { if !retriable { continue } node := graphW.GetNode(retryKey) if node == nil { // delete returned error, but refresh showed that it is not in SB anymore anyway continue } lastChange := getNodeLastChange(node) seqNum := lastChange.txnSeqNum if lastChange.retryEnabled { if _, has := retryTxns[seqNum]; !has { period := lastChange.retryPeriod if seqNum == txn.seqNum && txn.args.txnType == kvs.RetryFailedOps && lastChange.retryExpBackoff { period = txn.args.retry.period * 2 } retryTxns[seqNum] = &retryOps{ txnSeqNum: seqNum, period: period, keys: utils.NewMapBasedKeySet(), } } retryTxns[seqNum].keys.Add(retryKey) } } // schedule a series of re-try transactions for failed values for _, retryTxn := range retryTxns {
s.enqueueRetry(retryTxn) } graphW.Release() } // collect errors var txnErrors []kvs.KeyWithError txnErrors = append(txnErrors, preErrors...) for _, txnOp := range executed { if txnOp.PrevErr == nil && txnOp.NewErr == nil { continue } txnErrors = append(txnErrors, kvs.KeyWithError{ Key: txnOp.Key, TxnOperation: txnOp.Operation, Error: txnOp.NewErr, }) } // for blocking txn, send non-nil errors to the resultChan if txn.args.txnType == kvs.NBTransaction && txn.args.nb.isBlocking { var ( errors []kvs.KeyWithError txnErr error ) for _, kvWithError := range txnErrors { if kvWithError.Error != nil { errors = append(errors, kvWithError) } } if len(errors) > 0 { txnErr = kvs.NewTransactionError(nil, errors) } select { case txn.args.nb.resultChan <- txnResult{txnSeqNum: txn.seqNum, err: txnErr}: default: s.Log.WithField("txnSeq", txn.seqNum). Warn("Failed to deliver transaction result to the caller") } } // send errors to the subscribers for _, errSub := range s.errorSubs { for _, kvWithError := range txnErrors { if errSub.selector == nil || errSub.selector(kvWithError.Key) { select { case errSub.channel <- kvWithError: default: s.Log.WithField("txnSeq", txn.seqNum). Warn("Failed to deliver transaction error to a subscriber") } } } } } // validTxnValue checks validity of a kv-pair to be applied in a transaction. func (s *Scheduler) validTxnValue(graphR graph.ReadAccess, key string, value proto.Message, origin kvs.ValueOrigin, txnSeqNum uint64) bool { if key == "" { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, }).Warn("Empty key for a value in the transaction") return false } if origin == kvs.FromSB { descriptor := s.registry.GetDescriptorForKey(key) if descriptor == nil { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Debug("Ignoring unimplemented notification") return false } } node := graphR.GetNode(key) if node != nil { if isNodeDerived(node) { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Warn("Transaction attempting to change a derived value") return false } if origin == kvs.FromSB && getNodeOrigin(node) == kvs.FromNB { s.Log.WithFields(logging.Fields{ "txnSeqNum": txnSeqNum, "key": key, }).Debug("Ignoring notification for a NB-managed value") return false } } return true }
random_line_split
model.py
#!/usr/bin/env python # coding: utf-8 ''' Event Clustering within News Articles accepted to AESPEN in LREC 2020. Faik Kerem Örs, Süveyda Yeniterzi, Reyyan Yeniterzi 2nd April 2020 - Version 1 ''' import numpy as np import pandas as pd import json import torch import time import datetime import random from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from sklearn.model_selection import train_test_split from transformers import AlbertForSequenceClassification, AlbertTokenizer, AdamW, get_linear_schedule_with_warmup import io from ast import literal_eval import numpy as np %tensorflow_version 2.x import tensorflow as tf SEQ_LEN = 115 # Decided Based on Sentence Lengths def text_processor(data_prep, column_names, data_type="train"): # Remove NaN Rows if data_type == "train": data_prep.dropna(inplace=True) for col_name in column_names: # Strip Spaces data_prep[col_name] = data_prep[col_name].str.strip() def convert_data(data_df): # Encode sentence pairs using the tokenizer. encoded_data = [] for ind in data_df["label"].index: encoded_sents = tokenizer.encode_plus(text=data_df["sent1"][ind], text_pair=data_df["sent2"][ind], add_special_tokens=True, max_length=SEQ_LEN, pad_to_max_length=True, return_token_type_ids=True) encoded_sents["label"] = data_df["label"][ind] encoded_data.append(encoded_sents) return encoded_data def test_convert_data(data_df): # Again we encode the sentence pairs of the test data. # Since we don't have label, we stored index. encoded_data = [] for ind in data_df["sent1"].index: encoded_sents = tokenizer.encode_plus(text=data_df["sent1"][ind], text_pair=data_df["sent2"][ind], add_special_tokens=True, max_length=SEQ_LEN, pad_to_max_length=True, return_token_type_ids=True) encoded_sents["index"] = ind encoded_data.append(encoded_sents) return encoded_data def convert_tensors(encoded_df): # Convert data to tensors. encoded_tensors = {'input_ids': [], 'token_type_ids': [], 'attention_mask': [], 'label': []} for elt in encoded_df: for key in encoded_tensors.keys(): encoded_tensors[key].append(elt[key]) for key in encoded_tensors.keys(): encoded_tensors[key] = torch.tensor(encoded_tensors[key]) return encoded_tensors def test_convert_tensors(encoded_df): # Convert test data to tensors. # This time we don't have the labels but indices. encoded_tensors = {'input_ids': [], 'token_type_ids': [], 'attention_mask': [], 'index': []} for elt in encoded_df: for key in encoded_tensors.keys(): encoded_tensors[key].append(elt[key]) for key in encoded_tensors.keys(): encoded_tensors[key] = torch.tensor(encoded_tensors[key]) return encoded_tensors def get_dataloader(dict_tensor, batch_size=32, shuffle=False): # Generate the data loader for training and testing. dataset = TensorDataset(*dict_tensor.values()) if shuffle: # Train data is shuffled. sampler = RandomSampler(dataset) else: # Test data is not shuffled. sampler = SequentialSampler(dataset) dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size) return dataloader def flat_accuracy(preds, labels): # Function to calculate the accuracy of our predictions vs labels # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) def format_time(elapsed): ''
def get_classes(preds): # Function to calculate the accuracy of our predictions vs labels # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 pred_flat = np.argmax(preds, axis=1).flatten() return pred_flat def get_probs(logits): # Converts logits to probabilities. obj = torch.nn.Sigmoid() return obj(logits) def train(model, train_dataloader): # Model Training # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0} ] # Note: AdamW is a class from the huggingface library (as opposed to pytorch) # I believe the 'W' stands for 'Weight Decay fix" optimizer = AdamW(model.parameters(), lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5 eps = 1e-8 # args.adam_epsilon - default is 1e-8. ) # Number of training epochs (authors recommend between 2 and 4) epochs = 4 # Total number of training steps is number of batches * number of epochs. total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, # Default value in run_glue.py num_training_steps = total_steps) # This training code is based on the `run_glue.py` script here: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 # Set the seed value all over the place to make this reproducible. seed_val = 113 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) # Store the average loss after each epoch so we can plot them. loss_values = [] # For each epoch... for epoch_i in range(0, epochs): # ======================================== # Training # ======================================== # Perform one full pass over the training set. print("") print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('Training...') # Measure how long the training epoch takes. t0 = time.time() # Reset the total loss for this epoch. total_loss = 0 # Put the model into training mode. Don't be mislead--the call to # `train` just changes the *mode*, it doesn't *perform* the training. # `dropout` and `batchnorm` layers behave differently during training # vs. test (Based on https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch) model.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): # Progress update every 40 batches. if step % 40 == 0 and not step == 0: # Calculate elapsed time in minutes. elapsed = format_time(time.time() - t0) # Report progress. print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed)) # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using the # `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_token_type_ids = batch[1].to(device) b_input_mask = batch[2].to(device) b_labels = batch[3].to(device) # Always clear any previously calculated gradients before performing a # backward pass. PyTorch doesn't do this automatically because # accumulating the gradients is "convenient while training RNNs". # (Based on https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) model.zero_grad() # Perform a forward pass (evaluate the model on this training batch). # This will return the loss (rather than the model output) because we # have provided the `labels`. # The documentation for this `model` function is here: # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification outputs = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask, labels=b_labels) # The call to `model` always returns a tuple, so we need to pull the # loss value out of the tuple. loss = outputs[0] # Accumulate the training loss over all of the batches so that we can # calculate the average loss at the end. `loss` is a Tensor containing a # single value; the `.item()` function just returns the Python value # from the tensor. total_loss += loss.item() # Perform a backward pass to calculate the gradients. loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # Update parameters and take a step using the computed gradient. # The optimizer dictates the "update rule"--how the parameters are # modified based on their gradients, the learning rate, etc. optimizer.step() # Update the learning rate. scheduler.step() # Calculate the average loss over the training data. avg_train_loss = total_loss / len(train_dataloader) # Store the loss value for plotting the learning curve. loss_values.append(avg_train_loss) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(format_time(time.time() - t0))) print("") print("Training complete!") def test(model, test_dataloader): # Prediction on test set # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 print('Predicting the labels...') eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 # Put model in evaluation mode model.eval() # Tracking variables predictions, true_labels, sent_logits, sent_probs = [], [], [], [] # Predict for batch in test_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_token_type_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up prediction with torch.no_grad(): # Forward pass, calculate logit predictions outputs = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask) logits = outputs[0] sent_probs.append(get_probs(logits).detach().cpu().numpy()) if len(sent_probs) % 100 == 0: print(len(sent_probs)) # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Calculate the accuracy for this batch of test sentences. #tmp_eval_accuracy = flat_accuracy(logits, label_ids) # Accumulate the total accuracy. #eval_accuracy += tmp_eval_accuracy # Track the number of batches nb_eval_steps += 1 # Store predictions and true labels predictions.append(get_classes(logits)[0]) #true_labels.append(label_ids) sent_logits.append(logits.tolist()[0]) print(' DONE.') #print(" Accuracy: {0:.3f}".format(eval_accuracy/nb_eval_steps)) return predictions, sent_logits def sort_clusters(news_clusters): for news_index, clusters in news_clusters.items(): for lst in clusters: lst.sort() clusters.sort(key=lambda x: x[0]) def check_duplicates(news_clusters): # Check an element exists in other lists for news_index, clusters in news_clusters.items(): for lst in clusters: for elt in lst: for lst2 in clusters: if lst != lst2: if elt in lst2: print("Duplicate EXISTS:", news_index) def get_scores(post_val_data, reward, penalty): ''' Scoring Algorithm. ''' news_scores = {} for news_index in post_val_data["news_index"].unique(): # Create a dict to store the pairwise predictions. news_relationships = {} for _, sent_pair in post_val_data[post_val_data["news_index"] == news_index].iterrows(): # Get pair IDs and corresponding pairwise prediction. sent1_index = sent_pair["sent1_index"] sent2_index = sent_pair["sent2_index"] prediction = sent_pair["predictions"] # Store the predictions in the format: {sent_id1: {another_sent_id1: pairwise_prediction1, another_sent_id2: pairwise_prediction2}} if sent1_index in news_relationships: news_relationships[sent1_index][sent2_index] = prediction else: news_relationships[sent1_index] = {sent2_index: prediction} # Store the relationships symmetrically # Symmetric case would be: {sent_id: {another_sent_id: pairwise_prediction}, {another_sent_id: {sent_id: pairwise_prediction}}} if sent1_index < sent2_index: if sent2_index in news_relationships: news_relationships[sent2_index][sent1_index] = prediction else: news_relationships[sent2_index] = {sent1_index: prediction} # Create a dict to store the relationship scores. # Neighbor terminology used in this code means that the sentence pairs 'main_key' and 'neigh_key' appear to be in the same cluster. final_neighbors = {} for main_key, main_neighs in news_relationships.items(): # The first sentence, say 'main_key' final_neighbors[main_key] = {} for main_neigh in main_neighs.items(): # 'main_neigh_key': The second sentence that forms the pair together with the first sentence 'main_key' # 'main_pred': Model's prediction for the pair (main_key, main_neigh_key) main_neigh_key, main_pred = main_neigh # Set initial scores based the pairwise predictions. # 1 if they are predicted to be in the same cluster. # -1 Otherwise (penalize). if main_pred == 1: neighbor_score = 1 else: neighbor_score = -1 # Consider common relationships that main_key and main_neigh_key have. # Reward their pairwise score if they have common neighbors. # Penalize their pairwise if they have neighbors that are not common. if main_neigh_key in news_relationships: # Iterate over the neighbors of main_neigh_key (the second sentence) for helper_neighs in news_relationships[main_neigh_key].items(): # 'helper_neigh_key': The sentence that forms the pair together with the second sentence 'main_neigh_key' # 'main_pred': Model's prediction for the pair (main_neigh_key, helper_neigh_key) helper_neigh_key, helper_neigh_pred = helper_neighs # Iterate over the neighbors of main_key to see whether it also appears to be in the same cluster with helper_neigh_key for x_neigh_key, x_pred in main_neighs.items(): if x_neigh_key == helper_neigh_key: # If main_key (the first sentence) and main_neigh_key (the second sentence) have a common neighbor, reward their pairwise score. # If helper_neigh_key is the neighbor of only one of them (the first or second sentence), penalize the pairwise score of main_key and main_neigh_key. # Otherwise, do nothing since we might not know. if x_pred == 1 and helper_neigh_pred == 1: neighbor_score += reward elif x_pred == 1 and helper_neigh_pred == 0: neighbor_score -= penalty elif x_pred == 0 and helper_neigh_pred == 1: neighbor_score -= penalty break # Scores for one news. final_neighbors[main_key][main_neigh_key] = neighbor_score # Store the scores together with the corresponding news. news_scores[news_index] = final_neighbors return news_scores def get_clusters(news_scores): ''' Clustering Algorithm ''' # Example input ''' scores = {2: {4: 1, 27: 0, 36: 2, 37: 0, 40: -6, 43: -4}, 4: {2: 1, 27: 0, 36: -1, 37: -1, 40: -3, 43: -3}, 27: {2: 0, 4: 0, 36: 0, 37: -2, 40: -4, 43: -2}, 36: {2: 2, 4: -1, 27: 0, 37: 1, 40: -5, 43: -3}, 37: {2: 0, 4: -1, 27: -2, 36: 1, 40: -4, 43: -5}, 40: {2: -6, 4: -3, 27: -4, 36: -5, 37: -4, 43: 0}, 43: {2: -4, 4: -3, 27: -2, 36: -3, 37: -5, 40: 0}} ''' news_clusters = {} for news_index, scores in news_scores.items(): column_names = ["Sen_1", "Sen_2", "Score"] df = pd.DataFrame(columns = column_names) # Create a dataframe of pairwise sentence scores for sentence, scores in scores.items(): for key in scores: df = df.append(pd.DataFrame({"Sen_1":[sentence], "Sen_2":[key], "Score":[scores[key]]}) , ignore_index = True) # Sort the dataframe by descending order of score, and the ascending order of sentence 1 and 2 df.sort_values(by=['Score', 'Sen_1', 'Sen_2'], ascending=[0, 1, 1], inplace = True) # Create a sentence list with all currently assigned to group 0 sentences = pd.DataFrame(set(df['Sen_1'].tolist()), columns =['Sentences']) sentences['Group'] = 0 # Eliminate all sentence pairs with score <= 0 df = df[df['Score'] > 0] group_count = 0 if not df.empty: # Eliminate duplicate rows df['Sen_min'] = df.apply(lambda row: min(row.Sen_1, row.Sen_2), axis=1) df['Sen_max'] = df.apply(lambda row: max(row.Sen_1, row.Sen_2), axis=1) df.drop(['Sen_1', 'Sen_2'], axis=1, inplace=True) df.drop_duplicates(inplace = True) # Iterate over the dataframe and assign sentence pairs to groups based on the below conditions: # - If the current sentence pair have both Group = 0 (means they've not yet assigned to any group), then create a new group and assign both sentence to this new group # - Else if only one of the sentence has Group = 0 in the pair, then that sentence is assigned to the group of the other sentence # - Else sentences are already assigned to other groups, then no need to do anything for index, row in df.iterrows(): if sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] == 0 and sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] == 0: group_count = group_count + 1 sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'] = group_count sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'] = group_count elif sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] == 0: sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'] = sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] elif sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] == 0: sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'] = sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] else: pass # At the end if there are still sentences that have not been assigned to any group, then assign them to seperate groups individually for index, row in sentences.iterrows(): if row['Group'] == 0: group_count = group_count + 1 sentences.loc[sentences['Sentences'] == row['Sentences'], 'Group'] = group_count news_clusters[news_index] = [] for gr in sentences["Group"].unique(): news_clusters[news_index].append(sentences[sentences["Group"] == gr]["Sentences"].values.tolist()) return news_clusters if __name__ == "__main__": # Load data. train_data = pd.read_csv("train_data.csv") test_data = pd.read_csv("test_data.csv") # GPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() # Remove NaN values etc. text_processor(train_data, train_data.columns.values.tolist()[3:-1]) text_processor(test_data, test_data.columns.values.tolist()[3:], data_type="test") # ALBERT xxlarge-v2 model_class = AlbertForSequenceClassification tokenizer_class = AlbertTokenizer pretrained_weights = 'albert-xxlarge-v2' tokenizer = tokenizer_class.from_pretrained(pretrained_weights, do_lower_case=True) model = model_class.from_pretrained(pretrained_weights, num_labels=2, output_attentions=False, output_hidden_states=False) # Encode sentence pairs in train and test sets using tokenizer. encoded_train = convert_data(train_data) encoded_test = test_convert_data(test_data) # Convert encoded data to tensors. train_tensors = convert_tensors(encoded_train) test_tensors = test_convert_tensors(encoded_test) # Create data loaders to feed the data batch by batch. train_dataloader = get_dataloader(train_tensors, batch_size=16, shuffle=True) test_dataloader = get_dataloader(test_tensors, batch_size=1) # Send model to the device. model.cuda() # Train model train(model, train_dataloader) # SAVE MODEL torch.save(model.state_dict(), "model.pt") # LOAD MODEL #model.load_state_dict(torch.load("model.pt")) #model.cuda() # Test model and get pairwise predictions and logits. pred_lst, logit_lst = test(model, test_dataloader) # Fine-tuned rewards and penalties. rewards = [0.8] penalties = [0.8] for reward in rewards: for penalty in penalties: # Use post_val_data to store the predictions and logits for each sentence pair. post_val_data = test_data.copy() post_val_data["predictions"] = pred_lst post_val_data["logits"] = logit_lst #post_val_data["probabilities"] = prob_lst # Get Scores news_scores = get_scores(post_val_data, reward, penalty) # Get the Clusters. news_clusters = get_clusters(news_scores) # There shouldn't be a duplicate, just for debugging... check_duplicates(news_clusters) # Sort Clusters for evaluation. sort_clusters(news_clusters) # Put cluster predictions also to post_val_data. for news_index, clusters in news_clusters.items(): for ind in post_val_data[post_val_data["news_index"] == news_index].index: post_val_data.loc[ind, "prediction_clusters"] = str(clusters) # Get test.json file that doesn't have gold labels. orj_test = pd.read_json("Path_to_Data/test.json", lines=True) # Put cluster predictions to the json file for evaluation. for news_index, clusters in news_clusters.items(): orj_test.loc[news_index, "prediction_clusters"] = str(clusters) # Name cluster predictions as event_clusters for evaluation. orj_test.rename(columns={"prediction_clusters": "event_clusters"}, inplace=True) orj_test.loc[:,'event_clusters'] = orj_test.loc[:,'event_clusters'].apply(lambda x: literal_eval(x)) # Save pairwise and cluster predictions in csv and json format. post_val_data.to_csv("pairwise_predictions.csv", index=None) orj_test.to_json("cluster_predictions.json", orient="records", lines=True) # Use json for final evaluation.
' Takes a time in seconds and returns a string hh:mm:ss Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 ''' # Round to the nearest second. elapsed_rounded = int(round((elapsed))) # Format as hh:mm:ss return str(datetime.timedelta(seconds=elapsed_rounded))
identifier_body
model.py
#!/usr/bin/env python # coding: utf-8 ''' Event Clustering within News Articles accepted to AESPEN in LREC 2020. Faik Kerem Örs, Süveyda Yeniterzi, Reyyan Yeniterzi 2nd April 2020 - Version 1 ''' import numpy as np import pandas as pd import json import torch import time import datetime import random from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from sklearn.model_selection import train_test_split from transformers import AlbertForSequenceClassification, AlbertTokenizer, AdamW, get_linear_schedule_with_warmup import io from ast import literal_eval import numpy as np %tensorflow_version 2.x import tensorflow as tf SEQ_LEN = 115 # Decided Based on Sentence Lengths def text_processor(data_prep, column_names, data_type="train"): # Remove NaN Rows if data_type == "train": data_prep.dropna(inplace=True) for col_name in column_names: # Strip Spaces data_prep[col_name] = data_prep[col_name].str.strip() def convert_data(data_df): # Encode sentence pairs using the tokenizer. encoded_data = [] for ind in data_df["label"].index: encoded_sents = tokenizer.encode_plus(text=data_df["sent1"][ind], text_pair=data_df["sent2"][ind], add_special_tokens=True, max_length=SEQ_LEN, pad_to_max_length=True, return_token_type_ids=True) encoded_sents["label"] = data_df["label"][ind] encoded_data.append(encoded_sents) return encoded_data def test_convert_data(data_df): # Again we encode the sentence pairs of the test data. # Since we don't have label, we stored index. encoded_data = [] for ind in data_df["sent1"].index: encoded_sents = tokenizer.encode_plus(text=data_df["sent1"][ind], text_pair=data_df["sent2"][ind], add_special_tokens=True, max_length=SEQ_LEN, pad_to_max_length=True, return_token_type_ids=True) encoded_sents["index"] = ind encoded_data.append(encoded_sents) return encoded_data def convert_tensors(encoded_df): # Convert data to tensors. encoded_tensors = {'input_ids': [], 'token_type_ids': [], 'attention_mask': [], 'label': []} for elt in encoded_df: for key in encoded_tensors.keys(): encoded_tensors[key].append(elt[key]) for key in encoded_tensors.keys(): encoded_tensors[key] = torch.tensor(encoded_tensors[key]) return encoded_tensors def test_convert_tensors(encoded_df): # Convert test data to tensors. # This time we don't have the labels but indices. encoded_tensors = {'input_ids': [], 'token_type_ids': [], 'attention_mask': [], 'index': []} for elt in encoded_df: for key in encoded_tensors.keys(): encoded_tensors[key].append(elt[key]) for key in encoded_tensors.keys(): encoded_tensors[key] = torch.tensor(encoded_tensors[key]) return encoded_tensors def get_dataloader(dict_tensor, batch_size=32, shuffle=False): # Generate the data loader for training and testing. dataset = TensorDataset(*dict_tensor.values()) if shuffle: # Train data is shuffled. sampler = RandomSampler(dataset) else: # Test data is not shuffled. sampler = SequentialSampler(dataset) dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size) return dataloader def flat_accuracy(preds, labels): # Function to calculate the accuracy of our predictions vs labels # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) def format_time(elapsed): ''' Takes a time in seconds and returns a string hh:mm:ss Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 ''' # Round to the nearest second. elapsed_rounded = int(round((elapsed))) # Format as hh:mm:ss return str(datetime.timedelta(seconds=elapsed_rounded)) def get_classes(preds): # Function to calculate the accuracy of our predictions vs labels # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 pred_flat = np.argmax(preds, axis=1).flatten() return pred_flat def get_probs(logits): # Converts logits to probabilities. obj = torch.nn.Sigmoid() return obj(logits) def train(model, train_dataloader): # Model Training # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0} ] # Note: AdamW is a class from the huggingface library (as opposed to pytorch) # I believe the 'W' stands for 'Weight Decay fix" optimizer = AdamW(model.parameters(), lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5 eps = 1e-8 # args.adam_epsilon - default is 1e-8. ) # Number of training epochs (authors recommend between 2 and 4) epochs = 4 # Total number of training steps is number of batches * number of epochs. total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, # Default value in run_glue.py num_training_steps = total_steps) # This training code is based on the `run_glue.py` script here: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 # Set the seed value all over the place to make this reproducible. seed_val = 113 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) # Store the average loss after each epoch so we can plot them. loss_values = [] # For each epoch... for epoch_i in range(0, epochs): # ======================================== # Training # ======================================== # Perform one full pass over the training set. print("") print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('Training...') # Measure how long the training epoch takes. t0 = time.time() # Reset the total loss for this epoch. total_loss = 0 # Put the model into training mode. Don't be mislead--the call to # `train` just changes the *mode*, it doesn't *perform* the training. # `dropout` and `batchnorm` layers behave differently during training # vs. test (Based on https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch) model.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): # Progress update every 40 batches. if step % 40 == 0 and not step == 0: # Calculate elapsed time in minutes. elapsed = format_time(time.time() - t0) # Report progress. print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed)) # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using the # `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_token_type_ids = batch[1].to(device) b_input_mask = batch[2].to(device) b_labels = batch[3].to(device) # Always clear any previously calculated gradients before performing a # backward pass. PyTorch doesn't do this automatically because # accumulating the gradients is "convenient while training RNNs". # (Based on https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) model.zero_grad() # Perform a forward pass (evaluate the model on this training batch). # This will return the loss (rather than the model output) because we # have provided the `labels`. # The documentation for this `model` function is here: # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification outputs = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask, labels=b_labels) # The call to `model` always returns a tuple, so we need to pull the # loss value out of the tuple. loss = outputs[0] # Accumulate the training loss over all of the batches so that we can # calculate the average loss at the end. `loss` is a Tensor containing a # single value; the `.item()` function just returns the Python value # from the tensor. total_loss += loss.item() # Perform a backward pass to calculate the gradients. loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # Update parameters and take a step using the computed gradient. # The optimizer dictates the "update rule"--how the parameters are # modified based on their gradients, the learning rate, etc. optimizer.step() # Update the learning rate. scheduler.step() # Calculate the average loss over the training data. avg_train_loss = total_loss / len(train_dataloader) # Store the loss value for plotting the learning curve. loss_values.append(avg_train_loss) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(format_time(time.time() - t0))) print("") print("Training complete!") def test(model, test_dataloader): # Prediction on test set # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 print('Predicting the labels...') eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 # Put model in evaluation mode model.eval() # Tracking variables predictions, true_labels, sent_logits, sent_probs = [], [], [], [] # Predict for batch in test_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_token_type_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up prediction with torch.no_grad(): # Forward pass, calculate logit predictions outputs = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask) logits = outputs[0] sent_probs.append(get_probs(logits).detach().cpu().numpy()) if len(sent_probs) % 100 == 0: print(len(sent_probs)) # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Calculate the accuracy for this batch of test sentences. #tmp_eval_accuracy = flat_accuracy(logits, label_ids) # Accumulate the total accuracy. #eval_accuracy += tmp_eval_accuracy # Track the number of batches nb_eval_steps += 1 # Store predictions and true labels predictions.append(get_classes(logits)[0]) #true_labels.append(label_ids) sent_logits.append(logits.tolist()[0]) print(' DONE.') #print(" Accuracy: {0:.3f}".format(eval_accuracy/nb_eval_steps)) return predictions, sent_logits def so
ews_clusters): for news_index, clusters in news_clusters.items(): for lst in clusters: lst.sort() clusters.sort(key=lambda x: x[0]) def check_duplicates(news_clusters): # Check an element exists in other lists for news_index, clusters in news_clusters.items(): for lst in clusters: for elt in lst: for lst2 in clusters: if lst != lst2: if elt in lst2: print("Duplicate EXISTS:", news_index) def get_scores(post_val_data, reward, penalty): ''' Scoring Algorithm. ''' news_scores = {} for news_index in post_val_data["news_index"].unique(): # Create a dict to store the pairwise predictions. news_relationships = {} for _, sent_pair in post_val_data[post_val_data["news_index"] == news_index].iterrows(): # Get pair IDs and corresponding pairwise prediction. sent1_index = sent_pair["sent1_index"] sent2_index = sent_pair["sent2_index"] prediction = sent_pair["predictions"] # Store the predictions in the format: {sent_id1: {another_sent_id1: pairwise_prediction1, another_sent_id2: pairwise_prediction2}} if sent1_index in news_relationships: news_relationships[sent1_index][sent2_index] = prediction else: news_relationships[sent1_index] = {sent2_index: prediction} # Store the relationships symmetrically # Symmetric case would be: {sent_id: {another_sent_id: pairwise_prediction}, {another_sent_id: {sent_id: pairwise_prediction}}} if sent1_index < sent2_index: if sent2_index in news_relationships: news_relationships[sent2_index][sent1_index] = prediction else: news_relationships[sent2_index] = {sent1_index: prediction} # Create a dict to store the relationship scores. # Neighbor terminology used in this code means that the sentence pairs 'main_key' and 'neigh_key' appear to be in the same cluster. final_neighbors = {} for main_key, main_neighs in news_relationships.items(): # The first sentence, say 'main_key' final_neighbors[main_key] = {} for main_neigh in main_neighs.items(): # 'main_neigh_key': The second sentence that forms the pair together with the first sentence 'main_key' # 'main_pred': Model's prediction for the pair (main_key, main_neigh_key) main_neigh_key, main_pred = main_neigh # Set initial scores based the pairwise predictions. # 1 if they are predicted to be in the same cluster. # -1 Otherwise (penalize). if main_pred == 1: neighbor_score = 1 else: neighbor_score = -1 # Consider common relationships that main_key and main_neigh_key have. # Reward their pairwise score if they have common neighbors. # Penalize their pairwise if they have neighbors that are not common. if main_neigh_key in news_relationships: # Iterate over the neighbors of main_neigh_key (the second sentence) for helper_neighs in news_relationships[main_neigh_key].items(): # 'helper_neigh_key': The sentence that forms the pair together with the second sentence 'main_neigh_key' # 'main_pred': Model's prediction for the pair (main_neigh_key, helper_neigh_key) helper_neigh_key, helper_neigh_pred = helper_neighs # Iterate over the neighbors of main_key to see whether it also appears to be in the same cluster with helper_neigh_key for x_neigh_key, x_pred in main_neighs.items(): if x_neigh_key == helper_neigh_key: # If main_key (the first sentence) and main_neigh_key (the second sentence) have a common neighbor, reward their pairwise score. # If helper_neigh_key is the neighbor of only one of them (the first or second sentence), penalize the pairwise score of main_key and main_neigh_key. # Otherwise, do nothing since we might not know. if x_pred == 1 and helper_neigh_pred == 1: neighbor_score += reward elif x_pred == 1 and helper_neigh_pred == 0: neighbor_score -= penalty elif x_pred == 0 and helper_neigh_pred == 1: neighbor_score -= penalty break # Scores for one news. final_neighbors[main_key][main_neigh_key] = neighbor_score # Store the scores together with the corresponding news. news_scores[news_index] = final_neighbors return news_scores def get_clusters(news_scores): ''' Clustering Algorithm ''' # Example input ''' scores = {2: {4: 1, 27: 0, 36: 2, 37: 0, 40: -6, 43: -4}, 4: {2: 1, 27: 0, 36: -1, 37: -1, 40: -3, 43: -3}, 27: {2: 0, 4: 0, 36: 0, 37: -2, 40: -4, 43: -2}, 36: {2: 2, 4: -1, 27: 0, 37: 1, 40: -5, 43: -3}, 37: {2: 0, 4: -1, 27: -2, 36: 1, 40: -4, 43: -5}, 40: {2: -6, 4: -3, 27: -4, 36: -5, 37: -4, 43: 0}, 43: {2: -4, 4: -3, 27: -2, 36: -3, 37: -5, 40: 0}} ''' news_clusters = {} for news_index, scores in news_scores.items(): column_names = ["Sen_1", "Sen_2", "Score"] df = pd.DataFrame(columns = column_names) # Create a dataframe of pairwise sentence scores for sentence, scores in scores.items(): for key in scores: df = df.append(pd.DataFrame({"Sen_1":[sentence], "Sen_2":[key], "Score":[scores[key]]}) , ignore_index = True) # Sort the dataframe by descending order of score, and the ascending order of sentence 1 and 2 df.sort_values(by=['Score', 'Sen_1', 'Sen_2'], ascending=[0, 1, 1], inplace = True) # Create a sentence list with all currently assigned to group 0 sentences = pd.DataFrame(set(df['Sen_1'].tolist()), columns =['Sentences']) sentences['Group'] = 0 # Eliminate all sentence pairs with score <= 0 df = df[df['Score'] > 0] group_count = 0 if not df.empty: # Eliminate duplicate rows df['Sen_min'] = df.apply(lambda row: min(row.Sen_1, row.Sen_2), axis=1) df['Sen_max'] = df.apply(lambda row: max(row.Sen_1, row.Sen_2), axis=1) df.drop(['Sen_1', 'Sen_2'], axis=1, inplace=True) df.drop_duplicates(inplace = True) # Iterate over the dataframe and assign sentence pairs to groups based on the below conditions: # - If the current sentence pair have both Group = 0 (means they've not yet assigned to any group), then create a new group and assign both sentence to this new group # - Else if only one of the sentence has Group = 0 in the pair, then that sentence is assigned to the group of the other sentence # - Else sentences are already assigned to other groups, then no need to do anything for index, row in df.iterrows(): if sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] == 0 and sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] == 0: group_count = group_count + 1 sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'] = group_count sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'] = group_count elif sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] == 0: sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'] = sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] elif sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] == 0: sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'] = sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] else: pass # At the end if there are still sentences that have not been assigned to any group, then assign them to seperate groups individually for index, row in sentences.iterrows(): if row['Group'] == 0: group_count = group_count + 1 sentences.loc[sentences['Sentences'] == row['Sentences'], 'Group'] = group_count news_clusters[news_index] = [] for gr in sentences["Group"].unique(): news_clusters[news_index].append(sentences[sentences["Group"] == gr]["Sentences"].values.tolist()) return news_clusters if __name__ == "__main__": # Load data. train_data = pd.read_csv("train_data.csv") test_data = pd.read_csv("test_data.csv") # GPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() # Remove NaN values etc. text_processor(train_data, train_data.columns.values.tolist()[3:-1]) text_processor(test_data, test_data.columns.values.tolist()[3:], data_type="test") # ALBERT xxlarge-v2 model_class = AlbertForSequenceClassification tokenizer_class = AlbertTokenizer pretrained_weights = 'albert-xxlarge-v2' tokenizer = tokenizer_class.from_pretrained(pretrained_weights, do_lower_case=True) model = model_class.from_pretrained(pretrained_weights, num_labels=2, output_attentions=False, output_hidden_states=False) # Encode sentence pairs in train and test sets using tokenizer. encoded_train = convert_data(train_data) encoded_test = test_convert_data(test_data) # Convert encoded data to tensors. train_tensors = convert_tensors(encoded_train) test_tensors = test_convert_tensors(encoded_test) # Create data loaders to feed the data batch by batch. train_dataloader = get_dataloader(train_tensors, batch_size=16, shuffle=True) test_dataloader = get_dataloader(test_tensors, batch_size=1) # Send model to the device. model.cuda() # Train model train(model, train_dataloader) # SAVE MODEL torch.save(model.state_dict(), "model.pt") # LOAD MODEL #model.load_state_dict(torch.load("model.pt")) #model.cuda() # Test model and get pairwise predictions and logits. pred_lst, logit_lst = test(model, test_dataloader) # Fine-tuned rewards and penalties. rewards = [0.8] penalties = [0.8] for reward in rewards: for penalty in penalties: # Use post_val_data to store the predictions and logits for each sentence pair. post_val_data = test_data.copy() post_val_data["predictions"] = pred_lst post_val_data["logits"] = logit_lst #post_val_data["probabilities"] = prob_lst # Get Scores news_scores = get_scores(post_val_data, reward, penalty) # Get the Clusters. news_clusters = get_clusters(news_scores) # There shouldn't be a duplicate, just for debugging... check_duplicates(news_clusters) # Sort Clusters for evaluation. sort_clusters(news_clusters) # Put cluster predictions also to post_val_data. for news_index, clusters in news_clusters.items(): for ind in post_val_data[post_val_data["news_index"] == news_index].index: post_val_data.loc[ind, "prediction_clusters"] = str(clusters) # Get test.json file that doesn't have gold labels. orj_test = pd.read_json("Path_to_Data/test.json", lines=True) # Put cluster predictions to the json file for evaluation. for news_index, clusters in news_clusters.items(): orj_test.loc[news_index, "prediction_clusters"] = str(clusters) # Name cluster predictions as event_clusters for evaluation. orj_test.rename(columns={"prediction_clusters": "event_clusters"}, inplace=True) orj_test.loc[:,'event_clusters'] = orj_test.loc[:,'event_clusters'].apply(lambda x: literal_eval(x)) # Save pairwise and cluster predictions in csv and json format. post_val_data.to_csv("pairwise_predictions.csv", index=None) orj_test.to_json("cluster_predictions.json", orient="records", lines=True) # Use json for final evaluation.
rt_clusters(n
identifier_name
model.py
#!/usr/bin/env python # coding: utf-8 ''' Event Clustering within News Articles accepted to AESPEN in LREC 2020. Faik Kerem Örs, Süveyda Yeniterzi, Reyyan Yeniterzi 2nd April 2020 - Version 1 ''' import numpy as np import pandas as pd import json import torch import time import datetime import random from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from sklearn.model_selection import train_test_split from transformers import AlbertForSequenceClassification, AlbertTokenizer, AdamW, get_linear_schedule_with_warmup import io from ast import literal_eval import numpy as np %tensorflow_version 2.x import tensorflow as tf SEQ_LEN = 115 # Decided Based on Sentence Lengths def text_processor(data_prep, column_names, data_type="train"): # Remove NaN Rows if data_type == "train": data_prep.dropna(inplace=True) for col_name in column_names: # Strip Spaces data_prep[col_name] = data_prep[col_name].str.strip() def convert_data(data_df): # Encode sentence pairs using the tokenizer. encoded_data = [] for ind in data_df["label"].index: encoded_sents = tokenizer.encode_plus(text=data_df["sent1"][ind], text_pair=data_df["sent2"][ind], add_special_tokens=True, max_length=SEQ_LEN, pad_to_max_length=True, return_token_type_ids=True) encoded_sents["label"] = data_df["label"][ind] encoded_data.append(encoded_sents) return encoded_data def test_convert_data(data_df): # Again we encode the sentence pairs of the test data. # Since we don't have label, we stored index. encoded_data = [] for ind in data_df["sent1"].index: encoded_sents = tokenizer.encode_plus(text=data_df["sent1"][ind], text_pair=data_df["sent2"][ind], add_special_tokens=True, max_length=SEQ_LEN, pad_to_max_length=True, return_token_type_ids=True) encoded_sents["index"] = ind encoded_data.append(encoded_sents) return encoded_data def convert_tensors(encoded_df): # Convert data to tensors. encoded_tensors = {'input_ids': [], 'token_type_ids': [], 'attention_mask': [], 'label': []} for elt in encoded_df: for key in encoded_tensors.keys(): encoded_tensors[key].append(elt[key]) for key in encoded_tensors.keys(): encoded_tensors[key] = torch.tensor(encoded_tensors[key]) return encoded_tensors def test_convert_tensors(encoded_df): # Convert test data to tensors. # This time we don't have the labels but indices. encoded_tensors = {'input_ids': [], 'token_type_ids': [], 'attention_mask': [], 'index': []} for elt in encoded_df: for key in encoded_tensors.keys(): encoded_tensors[key].append(elt[key]) for key in encoded_tensors.keys(): encoded_tensors[key] = torch.tensor(encoded_tensors[key]) return encoded_tensors def get_dataloader(dict_tensor, batch_size=32, shuffle=False): # Generate the data loader for training and testing. dataset = TensorDataset(*dict_tensor.values()) if shuffle: # Train data is shuffled. sampler = RandomSampler(dataset) else: # Test data is not shuffled. sampler = SequentialSampler(dataset) dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size) return dataloader def flat_accuracy(preds, labels): # Function to calculate the accuracy of our predictions vs labels # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) def format_time(elapsed): ''' Takes a time in seconds and returns a string hh:mm:ss Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 ''' # Round to the nearest second. elapsed_rounded = int(round((elapsed))) # Format as hh:mm:ss return str(datetime.timedelta(seconds=elapsed_rounded)) def get_classes(preds): # Function to calculate the accuracy of our predictions vs labels # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 pred_flat = np.argmax(preds, axis=1).flatten() return pred_flat def get_probs(logits): # Converts logits to probabilities. obj = torch.nn.Sigmoid() return obj(logits) def train(model, train_dataloader): # Model Training # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0} ] # Note: AdamW is a class from the huggingface library (as opposed to pytorch) # I believe the 'W' stands for 'Weight Decay fix" optimizer = AdamW(model.parameters(), lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5 eps = 1e-8 # args.adam_epsilon - default is 1e-8. ) # Number of training epochs (authors recommend between 2 and 4) epochs = 4 # Total number of training steps is number of batches * number of epochs. total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, # Default value in run_glue.py num_training_steps = total_steps) # This training code is based on the `run_glue.py` script here: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 # Set the seed value all over the place to make this reproducible. seed_val = 113 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) # Store the average loss after each epoch so we can plot them. loss_values = [] # For each epoch... for epoch_i in range(0, epochs): # ======================================== # Training # ======================================== # Perform one full pass over the training set. print("") print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('Training...') # Measure how long the training epoch takes. t0 = time.time() # Reset the total loss for this epoch. total_loss = 0 # Put the model into training mode. Don't be mislead--the call to # `train` just changes the *mode*, it doesn't *perform* the training. # `dropout` and `batchnorm` layers behave differently during training # vs. test (Based on https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch) model.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): # Progress update every 40 batches. if step % 40 == 0 and not step == 0: # Calculate elapsed time in minutes. elapsed = format_time(time.time() - t0) # Report progress. print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed)) # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using the # `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_token_type_ids = batch[1].to(device) b_input_mask = batch[2].to(device) b_labels = batch[3].to(device) # Always clear any previously calculated gradients before performing a # backward pass. PyTorch doesn't do this automatically because # accumulating the gradients is "convenient while training RNNs". # (Based on https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) model.zero_grad() # Perform a forward pass (evaluate the model on this training batch). # This will return the loss (rather than the model output) because we # have provided the `labels`. # The documentation for this `model` function is here: # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification outputs = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask, labels=b_labels) # The call to `model` always returns a tuple, so we need to pull the # loss value out of the tuple. loss = outputs[0] # Accumulate the training loss over all of the batches so that we can # calculate the average loss at the end. `loss` is a Tensor containing a # single value; the `.item()` function just returns the Python value # from the tensor. total_loss += loss.item() # Perform a backward pass to calculate the gradients. loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # Update parameters and take a step using the computed gradient. # The optimizer dictates the "update rule"--how the parameters are # modified based on their gradients, the learning rate, etc. optimizer.step() # Update the learning rate. scheduler.step() # Calculate the average loss over the training data. avg_train_loss = total_loss / len(train_dataloader) # Store the loss value for plotting the learning curve. loss_values.append(avg_train_loss) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(format_time(time.time() - t0))) print("") print("Training complete!") def test(model, test_dataloader): # Prediction on test set # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 print('Predicting the labels...') eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 # Put model in evaluation mode model.eval() # Tracking variables predictions, true_labels, sent_logits, sent_probs = [], [], [], [] # Predict for batch in test_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_token_type_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up prediction with torch.no_grad(): # Forward pass, calculate logit predictions outputs = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask) logits = outputs[0] sent_probs.append(get_probs(logits).detach().cpu().numpy()) if len(sent_probs) % 100 == 0: print(len(sent_probs)) # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Calculate the accuracy for this batch of test sentences. #tmp_eval_accuracy = flat_accuracy(logits, label_ids) # Accumulate the total accuracy. #eval_accuracy += tmp_eval_accuracy # Track the number of batches nb_eval_steps += 1 # Store predictions and true labels predictions.append(get_classes(logits)[0]) #true_labels.append(label_ids) sent_logits.append(logits.tolist()[0]) print(' DONE.') #print(" Accuracy: {0:.3f}".format(eval_accuracy/nb_eval_steps)) return predictions, sent_logits def sort_clusters(news_clusters): for news_index, clusters in news_clusters.items(): for lst in clusters: lst.sort() clusters.sort(key=lambda x: x[0]) def check_duplicates(news_clusters): # Check an element exists in other lists for news_index, clusters in news_clusters.items(): for lst in clusters: for elt in lst: for lst2 in clusters: if lst != lst2: if elt in lst2: print("Duplicate EXISTS:", news_index) def get_scores(post_val_data, reward, penalty): ''' Scoring Algorithm. ''' news_scores = {} for news_index in post_val_data["news_index"].unique(): # Create a dict to store the pairwise predictions. news_relationships = {} for _, sent_pair in post_val_data[post_val_data["news_index"] == news_index].iterrows(): # Get pair IDs and corresponding pairwise prediction. sent1_index = sent_pair["sent1_index"] sent2_index = sent_pair["sent2_index"] prediction = sent_pair["predictions"] # Store the predictions in the format: {sent_id1: {another_sent_id1: pairwise_prediction1, another_sent_id2: pairwise_prediction2}} if sent1_index in news_relationships: news_relationships[sent1_index][sent2_index] = prediction else: news_relationships[sent1_index] = {sent2_index: prediction} # Store the relationships symmetrically # Symmetric case would be: {sent_id: {another_sent_id: pairwise_prediction}, {another_sent_id: {sent_id: pairwise_prediction}}} if sent1_index < sent2_index: if sent2_index in news_relationships: news_relationships[sent2_index][sent1_index] = prediction else: news_relationships[sent2_index] = {sent1_index: prediction} # Create a dict to store the relationship scores. # Neighbor terminology used in this code means that the sentence pairs 'main_key' and 'neigh_key' appear to be in the same cluster. final_neighbors = {} for main_key, main_neighs in news_relationships.items(): # The first sentence, say 'main_key' final_neighbors[main_key] = {} for main_neigh in main_neighs.items(): # 'main_neigh_key': The second sentence that forms the pair together with the first sentence 'main_key' # 'main_pred': Model's prediction for the pair (main_key, main_neigh_key) main_neigh_key, main_pred = main_neigh # Set initial scores based the pairwise predictions. # 1 if they are predicted to be in the same cluster. # -1 Otherwise (penalize). if main_pred == 1: neighbor_score = 1 else: neighbor_score = -1 # Consider common relationships that main_key and main_neigh_key have. # Reward their pairwise score if they have common neighbors. # Penalize their pairwise if they have neighbors that are not common. if main_neigh_key in news_relationships: # Iterate over the neighbors of main_neigh_key (the second sentence) for helper_neighs in news_relationships[main_neigh_key].items(): # 'helper_neigh_key': The sentence that forms the pair together with the second sentence 'main_neigh_key' # 'main_pred': Model's prediction for the pair (main_neigh_key, helper_neigh_key) helper_neigh_key, helper_neigh_pred = helper_neighs # Iterate over the neighbors of main_key to see whether it also appears to be in the same cluster with helper_neigh_key for x_neigh_key, x_pred in main_neighs.items(): if x_neigh_key == helper_neigh_key: # If main_key (the first sentence) and main_neigh_key (the second sentence) have a common neighbor, reward their pairwise score. # If helper_neigh_key is the neighbor of only one of them (the first or second sentence), penalize the pairwise score of main_key and main_neigh_key. # Otherwise, do nothing since we might not know. if x_pred == 1 and helper_neigh_pred == 1: neighbor_score += reward elif x_pred == 1 and helper_neigh_pred == 0: neighbor_score -= penalty elif x_pred == 0 and helper_neigh_pred == 1: neighbor_score -= penalty break # Scores for one news. final_neighbors[main_key][main_neigh_key] = neighbor_score # Store the scores together with the corresponding news. news_scores[news_index] = final_neighbors return news_scores def get_clusters(news_scores): ''' Clustering Algorithm ''' # Example input ''' scores = {2: {4: 1, 27: 0, 36: 2, 37: 0, 40: -6, 43: -4}, 4: {2: 1, 27: 0, 36: -1, 37: -1, 40: -3, 43: -3}, 27: {2: 0, 4: 0, 36: 0, 37: -2, 40: -4, 43: -2}, 36: {2: 2, 4: -1, 27: 0, 37: 1, 40: -5, 43: -3}, 37: {2: 0, 4: -1, 27: -2, 36: 1, 40: -4, 43: -5}, 40: {2: -6, 4: -3, 27: -4, 36: -5, 37: -4, 43: 0}, 43: {2: -4, 4: -3, 27: -2, 36: -3, 37: -5, 40: 0}} ''' news_clusters = {} for news_index, scores in news_scores.items(): column_names = ["Sen_1", "Sen_2", "Score"] df = pd.DataFrame(columns = column_names) # Create a dataframe of pairwise sentence scores for sentence, scores in scores.items(): for key in scores: df = df.append(pd.DataFrame({"Sen_1":[sentence], "Sen_2":[key], "Score":[scores[key]]}) , ignore_index = True) # Sort the dataframe by descending order of score, and the ascending order of sentence 1 and 2 df.sort_values(by=['Score', 'Sen_1', 'Sen_2'], ascending=[0, 1, 1], inplace = True) # Create a sentence list with all currently assigned to group 0 sentences = pd.DataFrame(set(df['Sen_1'].tolist()), columns =['Sentences']) sentences['Group'] = 0 # Eliminate all sentence pairs with score <= 0 df = df[df['Score'] > 0] group_count = 0 if not df.empty: # Eliminate duplicate rows df['Sen_min'] = df.apply(lambda row: min(row.Sen_1, row.Sen_2), axis=1) df['Sen_max'] = df.apply(lambda row: max(row.Sen_1, row.Sen_2), axis=1) df.drop(['Sen_1', 'Sen_2'], axis=1, inplace=True) df.drop_duplicates(inplace = True) # Iterate over the dataframe and assign sentence pairs to groups based on the below conditions: # - If the current sentence pair have both Group = 0 (means they've not yet assigned to any group), then create a new group and assign both sentence to this new group # - Else if only one of the sentence has Group = 0 in the pair, then that sentence is assigned to the group of the other sentence # - Else sentences are already assigned to other groups, then no need to do anything for index, row in df.iterrows(): if sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] == 0 and sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] == 0: group_count = group_count + 1 sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'] = group_count sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'] = group_count elif sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] == 0:
sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'] = sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] else: pass # At the end if there are still sentences that have not been assigned to any group, then assign them to seperate groups individually for index, row in sentences.iterrows(): if row['Group'] == 0: group_count = group_count + 1 sentences.loc[sentences['Sentences'] == row['Sentences'], 'Group'] = group_count news_clusters[news_index] = [] for gr in sentences["Group"].unique(): news_clusters[news_index].append(sentences[sentences["Group"] == gr]["Sentences"].values.tolist()) return news_clusters if __name__ == "__main__": # Load data. train_data = pd.read_csv("train_data.csv") test_data = pd.read_csv("test_data.csv") # GPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() # Remove NaN values etc. text_processor(train_data, train_data.columns.values.tolist()[3:-1]) text_processor(test_data, test_data.columns.values.tolist()[3:], data_type="test") # ALBERT xxlarge-v2 model_class = AlbertForSequenceClassification tokenizer_class = AlbertTokenizer pretrained_weights = 'albert-xxlarge-v2' tokenizer = tokenizer_class.from_pretrained(pretrained_weights, do_lower_case=True) model = model_class.from_pretrained(pretrained_weights, num_labels=2, output_attentions=False, output_hidden_states=False) # Encode sentence pairs in train and test sets using tokenizer. encoded_train = convert_data(train_data) encoded_test = test_convert_data(test_data) # Convert encoded data to tensors. train_tensors = convert_tensors(encoded_train) test_tensors = test_convert_tensors(encoded_test) # Create data loaders to feed the data batch by batch. train_dataloader = get_dataloader(train_tensors, batch_size=16, shuffle=True) test_dataloader = get_dataloader(test_tensors, batch_size=1) # Send model to the device. model.cuda() # Train model train(model, train_dataloader) # SAVE MODEL torch.save(model.state_dict(), "model.pt") # LOAD MODEL #model.load_state_dict(torch.load("model.pt")) #model.cuda() # Test model and get pairwise predictions and logits. pred_lst, logit_lst = test(model, test_dataloader) # Fine-tuned rewards and penalties. rewards = [0.8] penalties = [0.8] for reward in rewards: for penalty in penalties: # Use post_val_data to store the predictions and logits for each sentence pair. post_val_data = test_data.copy() post_val_data["predictions"] = pred_lst post_val_data["logits"] = logit_lst #post_val_data["probabilities"] = prob_lst # Get Scores news_scores = get_scores(post_val_data, reward, penalty) # Get the Clusters. news_clusters = get_clusters(news_scores) # There shouldn't be a duplicate, just for debugging... check_duplicates(news_clusters) # Sort Clusters for evaluation. sort_clusters(news_clusters) # Put cluster predictions also to post_val_data. for news_index, clusters in news_clusters.items(): for ind in post_val_data[post_val_data["news_index"] == news_index].index: post_val_data.loc[ind, "prediction_clusters"] = str(clusters) # Get test.json file that doesn't have gold labels. orj_test = pd.read_json("Path_to_Data/test.json", lines=True) # Put cluster predictions to the json file for evaluation. for news_index, clusters in news_clusters.items(): orj_test.loc[news_index, "prediction_clusters"] = str(clusters) # Name cluster predictions as event_clusters for evaluation. orj_test.rename(columns={"prediction_clusters": "event_clusters"}, inplace=True) orj_test.loc[:,'event_clusters'] = orj_test.loc[:,'event_clusters'].apply(lambda x: literal_eval(x)) # Save pairwise and cluster predictions in csv and json format. post_val_data.to_csv("pairwise_predictions.csv", index=None) orj_test.to_json("cluster_predictions.json", orient="records", lines=True) # Use json for final evaluation.
sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'] = sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] elif sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] == 0:
random_line_split
model.py
#!/usr/bin/env python # coding: utf-8 ''' Event Clustering within News Articles accepted to AESPEN in LREC 2020. Faik Kerem Örs, Süveyda Yeniterzi, Reyyan Yeniterzi 2nd April 2020 - Version 1 ''' import numpy as np import pandas as pd import json import torch import time import datetime import random from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from sklearn.model_selection import train_test_split from transformers import AlbertForSequenceClassification, AlbertTokenizer, AdamW, get_linear_schedule_with_warmup import io from ast import literal_eval import numpy as np %tensorflow_version 2.x import tensorflow as tf SEQ_LEN = 115 # Decided Based on Sentence Lengths def text_processor(data_prep, column_names, data_type="train"): # Remove NaN Rows if data_type == "train": data_prep.dropna(inplace=True) for col_name in column_names: # Strip Spaces data_prep[col_name] = data_prep[col_name].str.strip() def convert_data(data_df): # Encode sentence pairs using the tokenizer. encoded_data = [] for ind in data_df["label"].index: encoded_sents = tokenizer.encode_plus(text=data_df["sent1"][ind], text_pair=data_df["sent2"][ind], add_special_tokens=True, max_length=SEQ_LEN, pad_to_max_length=True, return_token_type_ids=True) encoded_sents["label"] = data_df["label"][ind] encoded_data.append(encoded_sents) return encoded_data def test_convert_data(data_df): # Again we encode the sentence pairs of the test data. # Since we don't have label, we stored index. encoded_data = [] for ind in data_df["sent1"].index: encoded_sents = tokenizer.encode_plus(text=data_df["sent1"][ind], text_pair=data_df["sent2"][ind], add_special_tokens=True, max_length=SEQ_LEN, pad_to_max_length=True, return_token_type_ids=True) encoded_sents["index"] = ind encoded_data.append(encoded_sents) return encoded_data def convert_tensors(encoded_df): # Convert data to tensors. encoded_tensors = {'input_ids': [], 'token_type_ids': [], 'attention_mask': [], 'label': []} for elt in encoded_df: for key in encoded_tensors.keys(): encoded_tensors[key].append(elt[key]) for key in encoded_tensors.keys(): encoded_tensors[key] = torch.tensor(encoded_tensors[key]) return encoded_tensors def test_convert_tensors(encoded_df): # Convert test data to tensors. # This time we don't have the labels but indices. encoded_tensors = {'input_ids': [], 'token_type_ids': [], 'attention_mask': [], 'index': []} for elt in encoded_df: for key in encoded_tensors.keys(): encoded_tensors[key].append(elt[key]) for key in encoded_tensors.keys(): encoded_tensors[key] = torch.tensor(encoded_tensors[key]) return encoded_tensors def get_dataloader(dict_tensor, batch_size=32, shuffle=False): # Generate the data loader for training and testing. dataset = TensorDataset(*dict_tensor.values()) if shuffle: # Train data is shuffled. sampler = RandomSampler(dataset) else: # Test data is not shuffled. sampler = SequentialSampler(dataset) dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size) return dataloader def flat_accuracy(preds, labels): # Function to calculate the accuracy of our predictions vs labels # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) def format_time(elapsed): ''' Takes a time in seconds and returns a string hh:mm:ss Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 ''' # Round to the nearest second. elapsed_rounded = int(round((elapsed))) # Format as hh:mm:ss return str(datetime.timedelta(seconds=elapsed_rounded)) def get_classes(preds): # Function to calculate the accuracy of our predictions vs labels # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 pred_flat = np.argmax(preds, axis=1).flatten() return pred_flat def get_probs(logits): # Converts logits to probabilities. obj = torch.nn.Sigmoid() return obj(logits) def train(model, train_dataloader): # Model Training # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0} ] # Note: AdamW is a class from the huggingface library (as opposed to pytorch) # I believe the 'W' stands for 'Weight Decay fix" optimizer = AdamW(model.parameters(), lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5 eps = 1e-8 # args.adam_epsilon - default is 1e-8. ) # Number of training epochs (authors recommend between 2 and 4) epochs = 4 # Total number of training steps is number of batches * number of epochs. total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, # Default value in run_glue.py num_training_steps = total_steps) # This training code is based on the `run_glue.py` script here: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 # Set the seed value all over the place to make this reproducible. seed_val = 113 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) # Store the average loss after each epoch so we can plot them. loss_values = [] # For each epoch... for epoch_i in range(0, epochs): # ======================================== # Training # ======================================== # Perform one full pass over the training set. print("") print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('Training...') # Measure how long the training epoch takes. t0 = time.time() # Reset the total loss for this epoch. total_loss = 0 # Put the model into training mode. Don't be mislead--the call to # `train` just changes the *mode*, it doesn't *perform* the training. # `dropout` and `batchnorm` layers behave differently during training # vs. test (Based on https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch) model.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): # Progress update every 40 batches. if step % 40 == 0 and not step == 0: # Calculate elapsed time in minutes. el
# Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using the # `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_token_type_ids = batch[1].to(device) b_input_mask = batch[2].to(device) b_labels = batch[3].to(device) # Always clear any previously calculated gradients before performing a # backward pass. PyTorch doesn't do this automatically because # accumulating the gradients is "convenient while training RNNs". # (Based on https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) model.zero_grad() # Perform a forward pass (evaluate the model on this training batch). # This will return the loss (rather than the model output) because we # have provided the `labels`. # The documentation for this `model` function is here: # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification outputs = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask, labels=b_labels) # The call to `model` always returns a tuple, so we need to pull the # loss value out of the tuple. loss = outputs[0] # Accumulate the training loss over all of the batches so that we can # calculate the average loss at the end. `loss` is a Tensor containing a # single value; the `.item()` function just returns the Python value # from the tensor. total_loss += loss.item() # Perform a backward pass to calculate the gradients. loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # Update parameters and take a step using the computed gradient. # The optimizer dictates the "update rule"--how the parameters are # modified based on their gradients, the learning rate, etc. optimizer.step() # Update the learning rate. scheduler.step() # Calculate the average loss over the training data. avg_train_loss = total_loss / len(train_dataloader) # Store the loss value for plotting the learning curve. loss_values.append(avg_train_loss) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(format_time(time.time() - t0))) print("") print("Training complete!") def test(model, test_dataloader): # Prediction on test set # Based on https://medium.com/@aniruddha.choudhury94/part-2-bert-fine-tuning-tutorial-with-pytorch-for-text-classification-on-the-corpus-of-linguistic-18057ce330e1 print('Predicting the labels...') eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 # Put model in evaluation mode model.eval() # Tracking variables predictions, true_labels, sent_logits, sent_probs = [], [], [], [] # Predict for batch in test_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_token_type_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up prediction with torch.no_grad(): # Forward pass, calculate logit predictions outputs = model(b_input_ids, token_type_ids=b_token_type_ids, attention_mask=b_input_mask) logits = outputs[0] sent_probs.append(get_probs(logits).detach().cpu().numpy()) if len(sent_probs) % 100 == 0: print(len(sent_probs)) # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Calculate the accuracy for this batch of test sentences. #tmp_eval_accuracy = flat_accuracy(logits, label_ids) # Accumulate the total accuracy. #eval_accuracy += tmp_eval_accuracy # Track the number of batches nb_eval_steps += 1 # Store predictions and true labels predictions.append(get_classes(logits)[0]) #true_labels.append(label_ids) sent_logits.append(logits.tolist()[0]) print(' DONE.') #print(" Accuracy: {0:.3f}".format(eval_accuracy/nb_eval_steps)) return predictions, sent_logits def sort_clusters(news_clusters): for news_index, clusters in news_clusters.items(): for lst in clusters: lst.sort() clusters.sort(key=lambda x: x[0]) def check_duplicates(news_clusters): # Check an element exists in other lists for news_index, clusters in news_clusters.items(): for lst in clusters: for elt in lst: for lst2 in clusters: if lst != lst2: if elt in lst2: print("Duplicate EXISTS:", news_index) def get_scores(post_val_data, reward, penalty): ''' Scoring Algorithm. ''' news_scores = {} for news_index in post_val_data["news_index"].unique(): # Create a dict to store the pairwise predictions. news_relationships = {} for _, sent_pair in post_val_data[post_val_data["news_index"] == news_index].iterrows(): # Get pair IDs and corresponding pairwise prediction. sent1_index = sent_pair["sent1_index"] sent2_index = sent_pair["sent2_index"] prediction = sent_pair["predictions"] # Store the predictions in the format: {sent_id1: {another_sent_id1: pairwise_prediction1, another_sent_id2: pairwise_prediction2}} if sent1_index in news_relationships: news_relationships[sent1_index][sent2_index] = prediction else: news_relationships[sent1_index] = {sent2_index: prediction} # Store the relationships symmetrically # Symmetric case would be: {sent_id: {another_sent_id: pairwise_prediction}, {another_sent_id: {sent_id: pairwise_prediction}}} if sent1_index < sent2_index: if sent2_index in news_relationships: news_relationships[sent2_index][sent1_index] = prediction else: news_relationships[sent2_index] = {sent1_index: prediction} # Create a dict to store the relationship scores. # Neighbor terminology used in this code means that the sentence pairs 'main_key' and 'neigh_key' appear to be in the same cluster. final_neighbors = {} for main_key, main_neighs in news_relationships.items(): # The first sentence, say 'main_key' final_neighbors[main_key] = {} for main_neigh in main_neighs.items(): # 'main_neigh_key': The second sentence that forms the pair together with the first sentence 'main_key' # 'main_pred': Model's prediction for the pair (main_key, main_neigh_key) main_neigh_key, main_pred = main_neigh # Set initial scores based the pairwise predictions. # 1 if they are predicted to be in the same cluster. # -1 Otherwise (penalize). if main_pred == 1: neighbor_score = 1 else: neighbor_score = -1 # Consider common relationships that main_key and main_neigh_key have. # Reward their pairwise score if they have common neighbors. # Penalize their pairwise if they have neighbors that are not common. if main_neigh_key in news_relationships: # Iterate over the neighbors of main_neigh_key (the second sentence) for helper_neighs in news_relationships[main_neigh_key].items(): # 'helper_neigh_key': The sentence that forms the pair together with the second sentence 'main_neigh_key' # 'main_pred': Model's prediction for the pair (main_neigh_key, helper_neigh_key) helper_neigh_key, helper_neigh_pred = helper_neighs # Iterate over the neighbors of main_key to see whether it also appears to be in the same cluster with helper_neigh_key for x_neigh_key, x_pred in main_neighs.items(): if x_neigh_key == helper_neigh_key: # If main_key (the first sentence) and main_neigh_key (the second sentence) have a common neighbor, reward their pairwise score. # If helper_neigh_key is the neighbor of only one of them (the first or second sentence), penalize the pairwise score of main_key and main_neigh_key. # Otherwise, do nothing since we might not know. if x_pred == 1 and helper_neigh_pred == 1: neighbor_score += reward elif x_pred == 1 and helper_neigh_pred == 0: neighbor_score -= penalty elif x_pred == 0 and helper_neigh_pred == 1: neighbor_score -= penalty break # Scores for one news. final_neighbors[main_key][main_neigh_key] = neighbor_score # Store the scores together with the corresponding news. news_scores[news_index] = final_neighbors return news_scores def get_clusters(news_scores): ''' Clustering Algorithm ''' # Example input ''' scores = {2: {4: 1, 27: 0, 36: 2, 37: 0, 40: -6, 43: -4}, 4: {2: 1, 27: 0, 36: -1, 37: -1, 40: -3, 43: -3}, 27: {2: 0, 4: 0, 36: 0, 37: -2, 40: -4, 43: -2}, 36: {2: 2, 4: -1, 27: 0, 37: 1, 40: -5, 43: -3}, 37: {2: 0, 4: -1, 27: -2, 36: 1, 40: -4, 43: -5}, 40: {2: -6, 4: -3, 27: -4, 36: -5, 37: -4, 43: 0}, 43: {2: -4, 4: -3, 27: -2, 36: -3, 37: -5, 40: 0}} ''' news_clusters = {} for news_index, scores in news_scores.items(): column_names = ["Sen_1", "Sen_2", "Score"] df = pd.DataFrame(columns = column_names) # Create a dataframe of pairwise sentence scores for sentence, scores in scores.items(): for key in scores: df = df.append(pd.DataFrame({"Sen_1":[sentence], "Sen_2":[key], "Score":[scores[key]]}) , ignore_index = True) # Sort the dataframe by descending order of score, and the ascending order of sentence 1 and 2 df.sort_values(by=['Score', 'Sen_1', 'Sen_2'], ascending=[0, 1, 1], inplace = True) # Create a sentence list with all currently assigned to group 0 sentences = pd.DataFrame(set(df['Sen_1'].tolist()), columns =['Sentences']) sentences['Group'] = 0 # Eliminate all sentence pairs with score <= 0 df = df[df['Score'] > 0] group_count = 0 if not df.empty: # Eliminate duplicate rows df['Sen_min'] = df.apply(lambda row: min(row.Sen_1, row.Sen_2), axis=1) df['Sen_max'] = df.apply(lambda row: max(row.Sen_1, row.Sen_2), axis=1) df.drop(['Sen_1', 'Sen_2'], axis=1, inplace=True) df.drop_duplicates(inplace = True) # Iterate over the dataframe and assign sentence pairs to groups based on the below conditions: # - If the current sentence pair have both Group = 0 (means they've not yet assigned to any group), then create a new group and assign both sentence to this new group # - Else if only one of the sentence has Group = 0 in the pair, then that sentence is assigned to the group of the other sentence # - Else sentences are already assigned to other groups, then no need to do anything for index, row in df.iterrows(): if sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] == 0 and sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] == 0: group_count = group_count + 1 sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'] = group_count sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'] = group_count elif sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] == 0: sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'] = sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] elif sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'].iloc[0] == 0: sentences.loc[sentences['Sentences'] == row['Sen_max'], 'Group'] = sentences.loc[sentences['Sentences'] == row['Sen_min'], 'Group'].iloc[0] else: pass # At the end if there are still sentences that have not been assigned to any group, then assign them to seperate groups individually for index, row in sentences.iterrows(): if row['Group'] == 0: group_count = group_count + 1 sentences.loc[sentences['Sentences'] == row['Sentences'], 'Group'] = group_count news_clusters[news_index] = [] for gr in sentences["Group"].unique(): news_clusters[news_index].append(sentences[sentences["Group"] == gr]["Sentences"].values.tolist()) return news_clusters if __name__ == "__main__": # Load data. train_data = pd.read_csv("train_data.csv") test_data = pd.read_csv("test_data.csv") # GPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() # Remove NaN values etc. text_processor(train_data, train_data.columns.values.tolist()[3:-1]) text_processor(test_data, test_data.columns.values.tolist()[3:], data_type="test") # ALBERT xxlarge-v2 model_class = AlbertForSequenceClassification tokenizer_class = AlbertTokenizer pretrained_weights = 'albert-xxlarge-v2' tokenizer = tokenizer_class.from_pretrained(pretrained_weights, do_lower_case=True) model = model_class.from_pretrained(pretrained_weights, num_labels=2, output_attentions=False, output_hidden_states=False) # Encode sentence pairs in train and test sets using tokenizer. encoded_train = convert_data(train_data) encoded_test = test_convert_data(test_data) # Convert encoded data to tensors. train_tensors = convert_tensors(encoded_train) test_tensors = test_convert_tensors(encoded_test) # Create data loaders to feed the data batch by batch. train_dataloader = get_dataloader(train_tensors, batch_size=16, shuffle=True) test_dataloader = get_dataloader(test_tensors, batch_size=1) # Send model to the device. model.cuda() # Train model train(model, train_dataloader) # SAVE MODEL torch.save(model.state_dict(), "model.pt") # LOAD MODEL #model.load_state_dict(torch.load("model.pt")) #model.cuda() # Test model and get pairwise predictions and logits. pred_lst, logit_lst = test(model, test_dataloader) # Fine-tuned rewards and penalties. rewards = [0.8] penalties = [0.8] for reward in rewards: for penalty in penalties: # Use post_val_data to store the predictions and logits for each sentence pair. post_val_data = test_data.copy() post_val_data["predictions"] = pred_lst post_val_data["logits"] = logit_lst #post_val_data["probabilities"] = prob_lst # Get Scores news_scores = get_scores(post_val_data, reward, penalty) # Get the Clusters. news_clusters = get_clusters(news_scores) # There shouldn't be a duplicate, just for debugging... check_duplicates(news_clusters) # Sort Clusters for evaluation. sort_clusters(news_clusters) # Put cluster predictions also to post_val_data. for news_index, clusters in news_clusters.items(): for ind in post_val_data[post_val_data["news_index"] == news_index].index: post_val_data.loc[ind, "prediction_clusters"] = str(clusters) # Get test.json file that doesn't have gold labels. orj_test = pd.read_json("Path_to_Data/test.json", lines=True) # Put cluster predictions to the json file for evaluation. for news_index, clusters in news_clusters.items(): orj_test.loc[news_index, "prediction_clusters"] = str(clusters) # Name cluster predictions as event_clusters for evaluation. orj_test.rename(columns={"prediction_clusters": "event_clusters"}, inplace=True) orj_test.loc[:,'event_clusters'] = orj_test.loc[:,'event_clusters'].apply(lambda x: literal_eval(x)) # Save pairwise and cluster predictions in csv and json format. post_val_data.to_csv("pairwise_predictions.csv", index=None) orj_test.to_json("cluster_predictions.json", orient="records", lines=True) # Use json for final evaluation.
apsed = format_time(time.time() - t0) # Report progress. print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
conditional_block
toggle.rs
use bars_duration_ticks; use conrod_core::{self as conrod, widget}; use env; use ruler; use time_calc::{self as time, Ticks}; use track; pub use env::{Point, PointTrait, Toggle as ToggleValue, Trait as EnvelopeTrait}; /// The envelope type compatible with the `Toggle` automation track. pub type Envelope = env::bounded::Envelope<ToggleValue>; /// For viewing and manipulating series of boolean valued points over time. #[derive(WidgetCommon)] pub struct Toggle<'a> { #[conrod(common_builder)] common: widget::CommonBuilder, envelope: &'a Envelope, bars: &'a [time::TimeSig], ppqn: time::Ppqn, /// The position of the playhead in ticks along with the change in its position in ticks. pub maybe_playhead: Option<(Ticks, Ticks)>, style: Style, } /// Unique state for the Toggle automation. pub struct
{ ids: Ids, } widget_ids! { struct Ids { circles[], rectangles[], phantom_line, } } #[derive(Copy, Clone, Debug, Default, PartialEq, WidgetStyle)] pub struct Style { #[conrod(default = "theme.shape_color")] pub color: Option<conrod::Color>, #[conrod(default = "4.0")] pub point_radius: Option<conrod::Scalar>, } /// The various kinds of events returned by an automation track. #[derive(Copy, Clone, Debug)] pub enum Event { /// Upon playhead movement, represents new boolean value at playhead. Interpolate(bool), /// Indicatees that the toggle value has changed since the last update. SwitchTo(bool), /// Some event which would mutate the envelope has occurred. Mutate(super::Mutate<ToggleValue>), } impl<'a> Toggle<'a> { /// Construct a new default Automation. pub fn new(bars: &'a [time::TimeSig], ppqn: time::Ppqn, envelope: &'a Envelope) -> Self { Toggle { bars: bars, ppqn: ppqn, maybe_playhead: None, envelope: envelope, common: widget::CommonBuilder::default(), style: Style::default(), } } builder_methods! { pub point_radius { style.point_radius = Some(conrod::Scalar) } } } impl<'a> track::Widget for Toggle<'a> { fn playhead(mut self, playhead: (Ticks, Ticks)) -> Self { self.maybe_playhead = Some(playhead); self } } impl<'a> conrod::Colorable for Toggle<'a> { builder_method!(color { style.color = Some(conrod::Color) }); } impl<'a> conrod::Widget for Toggle<'a> { type State = State; type Style = Style; type Event = Vec<Event>; fn init_state(&self, id_gen: widget::id::Generator) -> Self::State { State { ids: Ids::new(id_gen), } } fn style(&self) -> Self::Style { self.style.clone() } fn default_y_dimension(&self, ui: &conrod::Ui) -> conrod::position::Dimension { ui.theme .widget_style::<Style>() .and_then(|default| default.common.maybe_y_dimension) .unwrap_or(conrod::position::Dimension::Absolute( super::super::DEFAULT_HEIGHT, )) } fn update(self, args: widget::UpdateArgs<Self>) -> Self::Event { use super::Elem; use conrod_core::utils::{clamp, map_range}; use conrod_core::{Colorable, Positionable}; let widget::UpdateArgs { id, rect, state, style, ui, .. } = args; let Toggle { envelope, bars, ppqn, maybe_playhead, .. } = self; let num_points = envelope.points().count(); let num_rectangles = { let mut points = envelope.points(); points .next() .map(|first| { let mut prev_toggle = first.value; let mut count = 0; for point in points { if prev_toggle == ToggleValue(true) { count += 1; } prev_toggle = point.value; } count }) .unwrap_or(0) }; // Ensure we have a circle index for each point. if state.ids.circles.len() < num_points { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.circles.resize(num_points, id_gen)); } // Ensure we have a rectangle index for each point. if state.ids.rectangles.len() < num_rectangles { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.rectangles.resize(num_rectangles, id_gen)); } let (w, h) = rect.w_h(); let half_h = h / 2.0; let color = style.color(ui.theme()); let point_radius = style.point_radius(ui.theme()); let total_ticks = bars_duration_ticks(bars.iter().cloned(), ppqn); // Get the time in ticks from some position over the Bang automation. let ticks_from_x = |x: conrod::Scalar| { Ticks(map_range( x, rect.left(), rect.right(), 0, total_ticks.ticks(), )) }; // `false` if `y` is closer to the bottom, `true` if y is closer to the top. let value_from_y = |y: conrod::Scalar| { let perc = map_range(y, rect.bottom(), rect.top(), 0.0, 1.0); if perc < 0.5 { ToggleValue(false) } else { ToggleValue(true) } }; // Same as `ticks_from_x` but clamps the ticks to the total_ticks range. let clamped_ticks_from_x = |x: conrod::Scalar| clamp(ticks_from_x(x), Ticks(0), total_ticks); // All that remains is to instantiate the graphics widgets. // // Check whether or not we need to do so by checking whether or not we're visible. if conrod::graph::algo::cropped_area_of_widget(ui.widget_graph(), id).is_none() { return Vec::new(); } // Determine the element range over which the playhead has traversed since the last update. let playhead_delta_range = match maybe_playhead { Some((playhead, delta)) if delta > Ticks(0) => { let start = playhead - delta; let end = playhead; super::maybe_surrounding_elems(total_ticks, envelope, start, end) } _ => None, }; // A function for instantiating a Circle widget for a point. let point_widget = |i: usize, x_offset: conrod::Scalar, value: ToggleValue, point_id: widget::Id, ui: &mut conrod::UiCell, events: &mut Vec<Event>| { for widget_event in ui.widget_input(point_id).events() { use conrod_core::{event, input}; match widget_event { // Check to see if the toggle point is being dragged. event::Widget::Drag(drag) if drag.button == input::MouseButton::Left => { let point_rect = ui.rect_of(point_id).unwrap(); let drag_to_abs_xy = conrod::utils::vec2_add(drag.to, point_rect.xy()); let drag_point = super::DragPoint { idx: i, ticks: clamped_ticks_from_x(drag_to_abs_xy[0]), value: value_from_y(drag_to_abs_xy[1]), }; events.push(Event::Mutate(drag_point.into())); } // Check to see if the toggle point is being removed. event::Widget::Click(click) if click.button == input::MouseButton::Right => { let remove_point = super::RemovePoint { idx: i }; events.push(Event::Mutate(remove_point.into())); } _ => (), } } let y_offset = if value == ToggleValue(false) { -half_h } else { half_h }; let point_elem = Elem::Point(i); let color = super::color_elem_by_playhead(point_elem, playhead_delta_range, color); let color = match ui.widget_input(point_id).mouse() { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; widget::Circle::fill(point_radius) .x_y_relative_to(id, x_offset, y_offset) .graphics_for(id) .parent(id) .color(color) .set(point_id, ui); }; let mut events = Vec::new(); // Instantiate the widgets in a big loop. let mut iter = envelope.points().zip(state.ids.circles.iter()).enumerate(); if let Some((i, (&first, &first_id))) = iter.next() { // The first point widget. let first_offset = ruler::x_offset_from_ticks(first.ticks, total_ticks, w); point_widget(i, first_offset, first.value, first_id, ui, &mut events); let mut prev_offset = first_offset; let mut prev_toggle = first.value; let mut rectangle_ids = state.ids.rectangles.iter(); let mut prev_point_id = first_id; for (i, (&point, &point_id)) in iter { // All following point widgets. let point_x_offset = ruler::x_offset_from_ticks(point.ticks, total_ticks, w); point_widget(i, point_x_offset, point.value, point_id, ui, &mut events); // The rectangle widget. if prev_toggle == ToggleValue(true) { let &rectangle_id = rectangle_ids.next().expect("Not enough rectangle ids"); let right = point_x_offset; let left = prev_offset; let width = right - left; let elem = Elem::BetweenPoints(i - 1, i); let color = super::color_elem_by_playhead(elem, playhead_delta_range, color); let color = match ui .widget_input(prev_point_id) .mouse() .or_else(|| ui.widget_input(point_id).mouse()) { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; let x_offset = left + width / 2.0; widget::Rectangle::fill([width, h]) .depth(2.0) // Place behind lines and circles. .x_relative_to(id, x_offset) .graphics_for(id) .color(color.alpha(0.5)) .parent(id) .set(rectangle_id, ui); } prev_offset = point_x_offset; prev_toggle = point.value; prev_point_id = point_id; } } // // A Line widget to accent the current interaction with the widget. // if let Some(mouse) = ui.widget_input(idx).mouse() { // let (x, ticks, value) = match new_interaction { // Highlighted(Elem::Point(p_idx)) | Clicked(Elem::Point(p_idx), _, _) => { // let p = envelope.env.points[p_idx]; // let x = x_from_ticks(p.ticks); // (x, p.ticks, p.value) // }, // Highlighted(_) | Clicked(_, _, _) => { // let x = mouse.xy[0]; // let ticks = ticks_from_x(x); // let value = value_from_y(mouse.xy[1]); // (x, ticks, value) // }, // _ => return, // }; // let color = match new_interaction { // // If whatever we're interacting with is highlighted, we should be too. // Highlighted(Elem::Point(_)) => color.highlighted(), // Highlighted(_) => color.highlighted().alpha(0.5), // // Only draw the clicked point if it is still between the clicked area. // Clicked(Elem::BetweenPoints(a, b), _, _) => // match (envelope.points().nth(a), envelope.points().nth(b)) { // (Some(p_a), Some(p_b)) if p_a.ticks <= ticks && ticks <= p_b.ticks => // color.clicked().alpha(0.7), // _ => return, // }, // // Only draw the clicked point if it is still before the first point. // Clicked(Elem::BeforeFirstPoint, _, _) => // match envelope.points().nth(0) { // Some(p) if ticks <= p.ticks => color.clicked().alpha(0.7), // _ => return, // }, // // Only draw the clicked point if it is still after the last point. // Clicked(Elem::AfterLastPoint, _, _) => // match envelope.points().last() { // Some(p) if p.ticks <= ticks => color.clicked().alpha(0.7), // _ => return, // }, // Clicked(Elem::EmptyRect, _, _) => color.clicked().alpha(0.7), // Clicked(Elem::Point(_), _, _) => color.clicked(), // _ => return, // }; // let (y_bottom, y_top) = match value { // ToggleValue(true) => (y + h / 4.0, rect.top()), // ToggleValue(false) => (rect.bottom(), y - h / 4.0), // }; // let start = [x, y_bottom]; // let end = [x, y_top]; // const THICKNESS: Scalar = 2.0; // let line_idx = state.phantom_line_idx.get(&mut ui); // Line::abs(start, end) // .depth(1.0) // Place beind circles but in front of rectangles. // .graphics_for(idx) // .parent(idx) // .color(color) // .thickness(THICKNESS) // .set(line_idx, &mut ui); // }; events } }
State
identifier_name
toggle.rs
use bars_duration_ticks; use conrod_core::{self as conrod, widget}; use env; use ruler; use time_calc::{self as time, Ticks}; use track; pub use env::{Point, PointTrait, Toggle as ToggleValue, Trait as EnvelopeTrait}; /// The envelope type compatible with the `Toggle` automation track. pub type Envelope = env::bounded::Envelope<ToggleValue>; /// For viewing and manipulating series of boolean valued points over time. #[derive(WidgetCommon)] pub struct Toggle<'a> { #[conrod(common_builder)] common: widget::CommonBuilder, envelope: &'a Envelope, bars: &'a [time::TimeSig], ppqn: time::Ppqn, /// The position of the playhead in ticks along with the change in its position in ticks. pub maybe_playhead: Option<(Ticks, Ticks)>, style: Style, } /// Unique state for the Toggle automation. pub struct State { ids: Ids, } widget_ids! { struct Ids { circles[], rectangles[], phantom_line, } } #[derive(Copy, Clone, Debug, Default, PartialEq, WidgetStyle)] pub struct Style { #[conrod(default = "theme.shape_color")] pub color: Option<conrod::Color>, #[conrod(default = "4.0")] pub point_radius: Option<conrod::Scalar>, } /// The various kinds of events returned by an automation track. #[derive(Copy, Clone, Debug)] pub enum Event { /// Upon playhead movement, represents new boolean value at playhead. Interpolate(bool), /// Indicatees that the toggle value has changed since the last update. SwitchTo(bool), /// Some event which would mutate the envelope has occurred. Mutate(super::Mutate<ToggleValue>), } impl<'a> Toggle<'a> { /// Construct a new default Automation. pub fn new(bars: &'a [time::TimeSig], ppqn: time::Ppqn, envelope: &'a Envelope) -> Self
builder_methods! { pub point_radius { style.point_radius = Some(conrod::Scalar) } } } impl<'a> track::Widget for Toggle<'a> { fn playhead(mut self, playhead: (Ticks, Ticks)) -> Self { self.maybe_playhead = Some(playhead); self } } impl<'a> conrod::Colorable for Toggle<'a> { builder_method!(color { style.color = Some(conrod::Color) }); } impl<'a> conrod::Widget for Toggle<'a> { type State = State; type Style = Style; type Event = Vec<Event>; fn init_state(&self, id_gen: widget::id::Generator) -> Self::State { State { ids: Ids::new(id_gen), } } fn style(&self) -> Self::Style { self.style.clone() } fn default_y_dimension(&self, ui: &conrod::Ui) -> conrod::position::Dimension { ui.theme .widget_style::<Style>() .and_then(|default| default.common.maybe_y_dimension) .unwrap_or(conrod::position::Dimension::Absolute( super::super::DEFAULT_HEIGHT, )) } fn update(self, args: widget::UpdateArgs<Self>) -> Self::Event { use super::Elem; use conrod_core::utils::{clamp, map_range}; use conrod_core::{Colorable, Positionable}; let widget::UpdateArgs { id, rect, state, style, ui, .. } = args; let Toggle { envelope, bars, ppqn, maybe_playhead, .. } = self; let num_points = envelope.points().count(); let num_rectangles = { let mut points = envelope.points(); points .next() .map(|first| { let mut prev_toggle = first.value; let mut count = 0; for point in points { if prev_toggle == ToggleValue(true) { count += 1; } prev_toggle = point.value; } count }) .unwrap_or(0) }; // Ensure we have a circle index for each point. if state.ids.circles.len() < num_points { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.circles.resize(num_points, id_gen)); } // Ensure we have a rectangle index for each point. if state.ids.rectangles.len() < num_rectangles { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.rectangles.resize(num_rectangles, id_gen)); } let (w, h) = rect.w_h(); let half_h = h / 2.0; let color = style.color(ui.theme()); let point_radius = style.point_radius(ui.theme()); let total_ticks = bars_duration_ticks(bars.iter().cloned(), ppqn); // Get the time in ticks from some position over the Bang automation. let ticks_from_x = |x: conrod::Scalar| { Ticks(map_range( x, rect.left(), rect.right(), 0, total_ticks.ticks(), )) }; // `false` if `y` is closer to the bottom, `true` if y is closer to the top. let value_from_y = |y: conrod::Scalar| { let perc = map_range(y, rect.bottom(), rect.top(), 0.0, 1.0); if perc < 0.5 { ToggleValue(false) } else { ToggleValue(true) } }; // Same as `ticks_from_x` but clamps the ticks to the total_ticks range. let clamped_ticks_from_x = |x: conrod::Scalar| clamp(ticks_from_x(x), Ticks(0), total_ticks); // All that remains is to instantiate the graphics widgets. // // Check whether or not we need to do so by checking whether or not we're visible. if conrod::graph::algo::cropped_area_of_widget(ui.widget_graph(), id).is_none() { return Vec::new(); } // Determine the element range over which the playhead has traversed since the last update. let playhead_delta_range = match maybe_playhead { Some((playhead, delta)) if delta > Ticks(0) => { let start = playhead - delta; let end = playhead; super::maybe_surrounding_elems(total_ticks, envelope, start, end) } _ => None, }; // A function for instantiating a Circle widget for a point. let point_widget = |i: usize, x_offset: conrod::Scalar, value: ToggleValue, point_id: widget::Id, ui: &mut conrod::UiCell, events: &mut Vec<Event>| { for widget_event in ui.widget_input(point_id).events() { use conrod_core::{event, input}; match widget_event { // Check to see if the toggle point is being dragged. event::Widget::Drag(drag) if drag.button == input::MouseButton::Left => { let point_rect = ui.rect_of(point_id).unwrap(); let drag_to_abs_xy = conrod::utils::vec2_add(drag.to, point_rect.xy()); let drag_point = super::DragPoint { idx: i, ticks: clamped_ticks_from_x(drag_to_abs_xy[0]), value: value_from_y(drag_to_abs_xy[1]), }; events.push(Event::Mutate(drag_point.into())); } // Check to see if the toggle point is being removed. event::Widget::Click(click) if click.button == input::MouseButton::Right => { let remove_point = super::RemovePoint { idx: i }; events.push(Event::Mutate(remove_point.into())); } _ => (), } } let y_offset = if value == ToggleValue(false) { -half_h } else { half_h }; let point_elem = Elem::Point(i); let color = super::color_elem_by_playhead(point_elem, playhead_delta_range, color); let color = match ui.widget_input(point_id).mouse() { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; widget::Circle::fill(point_radius) .x_y_relative_to(id, x_offset, y_offset) .graphics_for(id) .parent(id) .color(color) .set(point_id, ui); }; let mut events = Vec::new(); // Instantiate the widgets in a big loop. let mut iter = envelope.points().zip(state.ids.circles.iter()).enumerate(); if let Some((i, (&first, &first_id))) = iter.next() { // The first point widget. let first_offset = ruler::x_offset_from_ticks(first.ticks, total_ticks, w); point_widget(i, first_offset, first.value, first_id, ui, &mut events); let mut prev_offset = first_offset; let mut prev_toggle = first.value; let mut rectangle_ids = state.ids.rectangles.iter(); let mut prev_point_id = first_id; for (i, (&point, &point_id)) in iter { // All following point widgets. let point_x_offset = ruler::x_offset_from_ticks(point.ticks, total_ticks, w); point_widget(i, point_x_offset, point.value, point_id, ui, &mut events); // The rectangle widget. if prev_toggle == ToggleValue(true) { let &rectangle_id = rectangle_ids.next().expect("Not enough rectangle ids"); let right = point_x_offset; let left = prev_offset; let width = right - left; let elem = Elem::BetweenPoints(i - 1, i); let color = super::color_elem_by_playhead(elem, playhead_delta_range, color); let color = match ui .widget_input(prev_point_id) .mouse() .or_else(|| ui.widget_input(point_id).mouse()) { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; let x_offset = left + width / 2.0; widget::Rectangle::fill([width, h]) .depth(2.0) // Place behind lines and circles. .x_relative_to(id, x_offset) .graphics_for(id) .color(color.alpha(0.5)) .parent(id) .set(rectangle_id, ui); } prev_offset = point_x_offset; prev_toggle = point.value; prev_point_id = point_id; } } // // A Line widget to accent the current interaction with the widget. // if let Some(mouse) = ui.widget_input(idx).mouse() { // let (x, ticks, value) = match new_interaction { // Highlighted(Elem::Point(p_idx)) | Clicked(Elem::Point(p_idx), _, _) => { // let p = envelope.env.points[p_idx]; // let x = x_from_ticks(p.ticks); // (x, p.ticks, p.value) // }, // Highlighted(_) | Clicked(_, _, _) => { // let x = mouse.xy[0]; // let ticks = ticks_from_x(x); // let value = value_from_y(mouse.xy[1]); // (x, ticks, value) // }, // _ => return, // }; // let color = match new_interaction { // // If whatever we're interacting with is highlighted, we should be too. // Highlighted(Elem::Point(_)) => color.highlighted(), // Highlighted(_) => color.highlighted().alpha(0.5), // // Only draw the clicked point if it is still between the clicked area. // Clicked(Elem::BetweenPoints(a, b), _, _) => // match (envelope.points().nth(a), envelope.points().nth(b)) { // (Some(p_a), Some(p_b)) if p_a.ticks <= ticks && ticks <= p_b.ticks => // color.clicked().alpha(0.7), // _ => return, // }, // // Only draw the clicked point if it is still before the first point. // Clicked(Elem::BeforeFirstPoint, _, _) => // match envelope.points().nth(0) { // Some(p) if ticks <= p.ticks => color.clicked().alpha(0.7), // _ => return, // }, // // Only draw the clicked point if it is still after the last point. // Clicked(Elem::AfterLastPoint, _, _) => // match envelope.points().last() { // Some(p) if p.ticks <= ticks => color.clicked().alpha(0.7), // _ => return, // }, // Clicked(Elem::EmptyRect, _, _) => color.clicked().alpha(0.7), // Clicked(Elem::Point(_), _, _) => color.clicked(), // _ => return, // }; // let (y_bottom, y_top) = match value { // ToggleValue(true) => (y + h / 4.0, rect.top()), // ToggleValue(false) => (rect.bottom(), y - h / 4.0), // }; // let start = [x, y_bottom]; // let end = [x, y_top]; // const THICKNESS: Scalar = 2.0; // let line_idx = state.phantom_line_idx.get(&mut ui); // Line::abs(start, end) // .depth(1.0) // Place beind circles but in front of rectangles. // .graphics_for(idx) // .parent(idx) // .color(color) // .thickness(THICKNESS) // .set(line_idx, &mut ui); // }; events } }
{ Toggle { bars: bars, ppqn: ppqn, maybe_playhead: None, envelope: envelope, common: widget::CommonBuilder::default(), style: Style::default(), } }
identifier_body
toggle.rs
use bars_duration_ticks; use conrod_core::{self as conrod, widget}; use env; use ruler; use time_calc::{self as time, Ticks}; use track; pub use env::{Point, PointTrait, Toggle as ToggleValue, Trait as EnvelopeTrait}; /// The envelope type compatible with the `Toggle` automation track. pub type Envelope = env::bounded::Envelope<ToggleValue>; /// For viewing and manipulating series of boolean valued points over time. #[derive(WidgetCommon)] pub struct Toggle<'a> { #[conrod(common_builder)] common: widget::CommonBuilder, envelope: &'a Envelope, bars: &'a [time::TimeSig], ppqn: time::Ppqn, /// The position of the playhead in ticks along with the change in its position in ticks. pub maybe_playhead: Option<(Ticks, Ticks)>, style: Style, } /// Unique state for the Toggle automation. pub struct State { ids: Ids, } widget_ids! { struct Ids { circles[], rectangles[], phantom_line, } } #[derive(Copy, Clone, Debug, Default, PartialEq, WidgetStyle)] pub struct Style { #[conrod(default = "theme.shape_color")] pub color: Option<conrod::Color>, #[conrod(default = "4.0")] pub point_radius: Option<conrod::Scalar>, } /// The various kinds of events returned by an automation track. #[derive(Copy, Clone, Debug)] pub enum Event { /// Upon playhead movement, represents new boolean value at playhead. Interpolate(bool), /// Indicatees that the toggle value has changed since the last update. SwitchTo(bool), /// Some event which would mutate the envelope has occurred. Mutate(super::Mutate<ToggleValue>), } impl<'a> Toggle<'a> { /// Construct a new default Automation. pub fn new(bars: &'a [time::TimeSig], ppqn: time::Ppqn, envelope: &'a Envelope) -> Self { Toggle { bars: bars, ppqn: ppqn, maybe_playhead: None, envelope: envelope, common: widget::CommonBuilder::default(), style: Style::default(), } } builder_methods! { pub point_radius { style.point_radius = Some(conrod::Scalar) } } } impl<'a> track::Widget for Toggle<'a> { fn playhead(mut self, playhead: (Ticks, Ticks)) -> Self { self.maybe_playhead = Some(playhead); self } } impl<'a> conrod::Colorable for Toggle<'a> { builder_method!(color { style.color = Some(conrod::Color) }); } impl<'a> conrod::Widget for Toggle<'a> { type State = State; type Style = Style; type Event = Vec<Event>; fn init_state(&self, id_gen: widget::id::Generator) -> Self::State { State { ids: Ids::new(id_gen), } } fn style(&self) -> Self::Style { self.style.clone() } fn default_y_dimension(&self, ui: &conrod::Ui) -> conrod::position::Dimension { ui.theme .widget_style::<Style>() .and_then(|default| default.common.maybe_y_dimension) .unwrap_or(conrod::position::Dimension::Absolute( super::super::DEFAULT_HEIGHT, )) } fn update(self, args: widget::UpdateArgs<Self>) -> Self::Event { use super::Elem; use conrod_core::utils::{clamp, map_range}; use conrod_core::{Colorable, Positionable}; let widget::UpdateArgs { id, rect, state, style, ui, .. } = args; let Toggle { envelope, bars, ppqn, maybe_playhead, .. } = self; let num_points = envelope.points().count(); let num_rectangles = { let mut points = envelope.points(); points .next() .map(|first| { let mut prev_toggle = first.value; let mut count = 0; for point in points { if prev_toggle == ToggleValue(true) { count += 1; } prev_toggle = point.value; } count }) .unwrap_or(0) }; // Ensure we have a circle index for each point. if state.ids.circles.len() < num_points { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.circles.resize(num_points, id_gen)); } // Ensure we have a rectangle index for each point. if state.ids.rectangles.len() < num_rectangles { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.rectangles.resize(num_rectangles, id_gen)); } let (w, h) = rect.w_h(); let half_h = h / 2.0; let color = style.color(ui.theme()); let point_radius = style.point_radius(ui.theme()); let total_ticks = bars_duration_ticks(bars.iter().cloned(), ppqn); // Get the time in ticks from some position over the Bang automation. let ticks_from_x = |x: conrod::Scalar| { Ticks(map_range( x, rect.left(), rect.right(), 0, total_ticks.ticks(), )) }; // `false` if `y` is closer to the bottom, `true` if y is closer to the top. let value_from_y = |y: conrod::Scalar| { let perc = map_range(y, rect.bottom(), rect.top(), 0.0, 1.0); if perc < 0.5 { ToggleValue(false) } else { ToggleValue(true) } }; // Same as `ticks_from_x` but clamps the ticks to the total_ticks range. let clamped_ticks_from_x = |x: conrod::Scalar| clamp(ticks_from_x(x), Ticks(0), total_ticks); // All that remains is to instantiate the graphics widgets. // // Check whether or not we need to do so by checking whether or not we're visible. if conrod::graph::algo::cropped_area_of_widget(ui.widget_graph(), id).is_none() { return Vec::new(); } // Determine the element range over which the playhead has traversed since the last update. let playhead_delta_range = match maybe_playhead { Some((playhead, delta)) if delta > Ticks(0) => { let start = playhead - delta; let end = playhead; super::maybe_surrounding_elems(total_ticks, envelope, start, end) } _ => None, }; // A function for instantiating a Circle widget for a point. let point_widget = |i: usize, x_offset: conrod::Scalar, value: ToggleValue, point_id: widget::Id, ui: &mut conrod::UiCell, events: &mut Vec<Event>| { for widget_event in ui.widget_input(point_id).events() { use conrod_core::{event, input}; match widget_event { // Check to see if the toggle point is being dragged. event::Widget::Drag(drag) if drag.button == input::MouseButton::Left => { let point_rect = ui.rect_of(point_id).unwrap(); let drag_to_abs_xy = conrod::utils::vec2_add(drag.to, point_rect.xy()); let drag_point = super::DragPoint { idx: i, ticks: clamped_ticks_from_x(drag_to_abs_xy[0]), value: value_from_y(drag_to_abs_xy[1]), }; events.push(Event::Mutate(drag_point.into())); } // Check to see if the toggle point is being removed. event::Widget::Click(click) if click.button == input::MouseButton::Right => { let remove_point = super::RemovePoint { idx: i }; events.push(Event::Mutate(remove_point.into())); } _ => (), } } let y_offset = if value == ToggleValue(false) { -half_h } else { half_h }; let point_elem = Elem::Point(i); let color = super::color_elem_by_playhead(point_elem, playhead_delta_range, color); let color = match ui.widget_input(point_id).mouse() { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; widget::Circle::fill(point_radius) .x_y_relative_to(id, x_offset, y_offset) .graphics_for(id) .parent(id) .color(color) .set(point_id, ui); }; let mut events = Vec::new(); // Instantiate the widgets in a big loop. let mut iter = envelope.points().zip(state.ids.circles.iter()).enumerate(); if let Some((i, (&first, &first_id))) = iter.next() { // The first point widget. let first_offset = ruler::x_offset_from_ticks(first.ticks, total_ticks, w); point_widget(i, first_offset, first.value, first_id, ui, &mut events); let mut prev_offset = first_offset; let mut prev_toggle = first.value; let mut rectangle_ids = state.ids.rectangles.iter(); let mut prev_point_id = first_id; for (i, (&point, &point_id)) in iter { // All following point widgets. let point_x_offset = ruler::x_offset_from_ticks(point.ticks, total_ticks, w); point_widget(i, point_x_offset, point.value, point_id, ui, &mut events); // The rectangle widget. if prev_toggle == ToggleValue(true) { let &rectangle_id = rectangle_ids.next().expect("Not enough rectangle ids"); let right = point_x_offset; let left = prev_offset; let width = right - left; let elem = Elem::BetweenPoints(i - 1, i); let color = super::color_elem_by_playhead(elem, playhead_delta_range, color); let color = match ui .widget_input(prev_point_id) .mouse() .or_else(|| ui.widget_input(point_id).mouse()) { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; let x_offset = left + width / 2.0; widget::Rectangle::fill([width, h]) .depth(2.0) // Place behind lines and circles. .x_relative_to(id, x_offset) .graphics_for(id) .color(color.alpha(0.5)) .parent(id) .set(rectangle_id, ui); } prev_offset = point_x_offset; prev_toggle = point.value; prev_point_id = point_id; } } // // A Line widget to accent the current interaction with the widget. // if let Some(mouse) = ui.widget_input(idx).mouse() { // let (x, ticks, value) = match new_interaction { // Highlighted(Elem::Point(p_idx)) | Clicked(Elem::Point(p_idx), _, _) => { // let p = envelope.env.points[p_idx]; // let x = x_from_ticks(p.ticks); // (x, p.ticks, p.value) // }, // Highlighted(_) | Clicked(_, _, _) => { // let x = mouse.xy[0]; // let ticks = ticks_from_x(x); // let value = value_from_y(mouse.xy[1]); // (x, ticks, value) // }, // _ => return, // }; // let color = match new_interaction { // // If whatever we're interacting with is highlighted, we should be too. // Highlighted(Elem::Point(_)) => color.highlighted(), // Highlighted(_) => color.highlighted().alpha(0.5), // // Only draw the clicked point if it is still between the clicked area. // Clicked(Elem::BetweenPoints(a, b), _, _) => // match (envelope.points().nth(a), envelope.points().nth(b)) { // (Some(p_a), Some(p_b)) if p_a.ticks <= ticks && ticks <= p_b.ticks => // color.clicked().alpha(0.7), // _ => return, // },
// _ => return, // }, // // Only draw the clicked point if it is still after the last point. // Clicked(Elem::AfterLastPoint, _, _) => // match envelope.points().last() { // Some(p) if p.ticks <= ticks => color.clicked().alpha(0.7), // _ => return, // }, // Clicked(Elem::EmptyRect, _, _) => color.clicked().alpha(0.7), // Clicked(Elem::Point(_), _, _) => color.clicked(), // _ => return, // }; // let (y_bottom, y_top) = match value { // ToggleValue(true) => (y + h / 4.0, rect.top()), // ToggleValue(false) => (rect.bottom(), y - h / 4.0), // }; // let start = [x, y_bottom]; // let end = [x, y_top]; // const THICKNESS: Scalar = 2.0; // let line_idx = state.phantom_line_idx.get(&mut ui); // Line::abs(start, end) // .depth(1.0) // Place beind circles but in front of rectangles. // .graphics_for(idx) // .parent(idx) // .color(color) // .thickness(THICKNESS) // .set(line_idx, &mut ui); // }; events } }
// // Only draw the clicked point if it is still before the first point. // Clicked(Elem::BeforeFirstPoint, _, _) => // match envelope.points().nth(0) { // Some(p) if ticks <= p.ticks => color.clicked().alpha(0.7),
random_line_split
ArabicOCR.py
# To Run Arabic OCR # python ArabicOCR.py <featureMethod> <classifier> # featureMethod: StatisticalFeatures - NewGeometricFeatures # classifier: SVM # TRAINING_DATASET = './Letters-Dataset-Generator/LettersDataset' TRAINING_DATASET = './Dataset/scanned' TESTING_DATASET = './Dataset/Testing' from importlib import import_module import argparse from tqdm import tqdm import timeit import glob import cv2 import numpy as np import os from Preprocessing.Lines import LineSegmentation from Preprocessing.Words import WordSegmentation from Preprocessing.Characters import CharacterSegmentation from Classification.TextLabeling import get_labels, getCharFromLabel # from Preprocessing.PreprocessingTrain import get_dataset import h5py from multiprocessing import Process # hdf5_dir = "PreprocessingOutput/1000-2000/" # def get_dataset(chars_file,labels_file,count=-1): # cfile= h5py.File(hdf5_dir +chars_file, "r+") # imgs=[] # i=0 # for img in cfile.keys(): # if count!= -1 and i>=count: # break # i+=1 # words=[] # word_k= len(cfile[img].keys()) # for word in range(word_k): # word_1=[] # for char in cfile[img][str(word)].keys(): # word_1+=[np.array(cfile[img][str(word)][char])] # words+=[word_1] # imgs+=[words] # lfile= h5py.File(hdf5_dir +labels_file, "r+") # labels=[] # i=0 # for img in lfile.keys(): # if count!= -1 and i>=count: # break # i+=1 # label_img=[] # for word in lfile[img].keys(): # label_1=[] # for label in lfile[img][word].keys(): # label_1+=[np.array(lfile[img][word][label])] # label_img+=[label_1] # labels+=[label_img] # return imgs,labels import re def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): return [ atoi(c) for c in re.split(r'(\d+)', text) ] # checking if string contains list element def
(string, str_list): res = [ele for ele in str_list if(ele in string)] return bool(res) def readImages(folder, trainTest = 0): images = [] folders = [f for f in glob.glob(folder + "**/*", recursive=True)] y_vals = [] filesNames = [] for folder in folders: img_files = [img_file for img_file in glob.glob(folder + "**/*.png", recursive=True)] for i in range(len(img_files)): filesNames += [img_files[i]] img = cv2.imread(img_files[i]) images.append(img) if trainTest == 0: label = int(img_files[i][img_files[i].index("label_") + len("label_"):img_files[i].index("_size")]) y_vals.append(label) return images, y_vals, filesNames def readImagesInFolder(folder): images = [] folders = [f for f in glob.glob(folder + "**/*", recursive=True)] y_vals = [] filesNames = [] image_count = 0 for img in folders: # only read first image if image_count == 4: return images, y_vals, filesNames filesNames += [img[img.index("scanned/") + len("scanned/"):]] image_count += 1 return images, y_vals, filesNames def imagePreprocessing(img): # Segment paragraph into lines lines = LineSegmentation(img, saveResults=False) # Segment lines into words words = [] for i in range(len(lines)): words.extend(WordSegmentation(lines[i], lineNumber = i, saveResults=False)) characters = [] # Segment words into characters for word in words: characters.append(CharacterSegmentation(np.array(word, dtype=np.uint8))) # print("lines= ",len(lines)," words= ",len(words)," chars= ",len(characters)) return characters # [[[, , , characters], , , words] , , , lines] def loop(i): pictureFeatures = [] pictureLabels = [] # actualCharacters = [] image = cv2.imread(i) textFileName = i[:-4].replace('scanned', 'text') textWords = open(textFileName + '.txt', encoding='utf-8').read().replace('\n', ' ').split(' ') textWords = [item for item in textWords if item != ''] segmented = imagePreprocessing(image) # Get characters of image # print("segmented", textFileName) # [[[, , , characters], , , words] , , , lines] double_char = "لا" segmentedWords = len(segmented) # TotalImages += 1 # print("Text: ", len(textWords), "Segmented: ", len(segmented)) if len(textWords) != segmentedWords: # skippedImages += 1 # print(i[i.rfind('\\')+1:-4],"is skipped") return # faultyWordSegmented = False # if len(textWords) < segmentedWords: # print("FAULTY IMAGE") # faultyWordSegmented = True for wordIndex in range(len(segmented)): word = segmented[wordIndex] correspondingTextWord = textWords[0] # get count of occurances of "lam-alf" in word occurances_count = correspondingTextWord.count(double_char) text_length = len(correspondingTextWord) # treat every "lam-alf" as one character text_length -= occurances_count # print("NOW Processing: ", correspondingTextWord, "text_length = ", text_length, " occurances_count = ", occurances_count, ' len(word) = ', len(word) ) if len(word) != text_length: # segmented characters != word characters # if faultyWordSegmented and wordIndex + 1 < len(segmented) and occurances_count > 0 and text_length == len(word) + len(segmented[wordIndex+1]): # # There is لا that is causing a problem # correspondingTextWord = correspondingTextWord[:len(word)+1] # print("correspondingTextWord = ", correspondingTextWord) # textWords[0] = textWords[0][len(word)+1:] # processedWords += 1 # else: # ignoredWords += 1 del textWords[0] continue # else: # processedWords += 1 # del textWords[0] pictureLabels.extend(get_labels(correspondingTextWord)) # actualCharacters += [correspondingTextWord] for char in word: # processedCharacters += 1 # print('Currently processing image '+filesNames[0]+' line #', segmented.index(line), ' word #', line.index(word),' char #', word.index(char)) currentCharFeature = features.getFeatures(char, showResults=False, black_background=True) pictureFeatures.append(currentCharFeature) # cv2.resize(char, (100,60)) del textWords[0] # f = open('textFiles/'+i[i.rfind('\\')+1:-4]+'-words.txt','wb+') # for myWord in actualCharacters: # f.write(myWord.encode('utf8')+'\n'.encode('utf8')) # f.close() f = open('textFiles/'+i[i.rfind('\\')+1:-4]+'.txt','w+') for k in range(len(pictureFeatures)): f.write(str(pictureLabels[k])+' ') for current_feature in pictureFeatures[k]: f.write("%s " % current_feature) f.write('\n') f.close() # Read arguments in order parser = argparse.ArgumentParser("Train Module") parser.add_argument("features") parser.add_argument("classifier") args = parser.parse_args() # Parse the arguments written by the user in the commandline # Import Modules ################# # Import Features Type featuresModule = import_module('FeatureExtraction.' + args.features) # Dynamically load the features module featuresClass = getattr(featuresModule, args.features) features = featuresClass() if __name__ == "__main__": # Import classifier Type classifierModule = import_module('Classification.' + args.classifier) # Dynamically load the classifier module classifierClass = getattr(classifierModule, args.classifier) classifier = classifierClass(features.featuresNumber) ########################### mode = int(input("1.Segment\n2.Train\n3.Test existing Model\n")) if mode == 1: # set start time start_time = timeit.default_timer() #trainingImages, classifier.y_vals, __ = readImages(TRAINING_DATASET, trainTest = 0) # if TRAINING_DATASET == './Dataset/scanned': # os.listdir("Dataset/scanned/") print("Reading dataset to segment") trainingImages = [] imagesNames = [] # for i in tqdm(sorted(glob.glob(TRAINING_DATASET + "*/*.png"), key=natural_keys)): # trainingImages += [cv2.imread(i)] # imagesNames += [i[:-4]] print('-----------------------------') print("Preprocessing and feature Extraction Phase") processedCharacters = 0 ignoredWords = 0 processedWords = 0 segmented = None skippedImages = 0 TotalImages = 0 processes = [] start_time = timeit.default_timer() dataset = sorted(glob.glob(TRAINING_DATASET + "*/*.png"), key=natural_keys)[4400:4600] # 3000 DONE for i in list(dataset): p = Process(target=loop, args=(i,)) p.start() processes += [p] for p in processes: p.join() print("running time = ", timeit.default_timer() - start_time) print('Finished All#########################################') elif mode==2: featuresList=list(glob.glob("textFiles" + "/*.txt"))[:1000] for filepath in tqdm(featuresList): with open(filepath) as fp: for line in fp: charData=line.replace('\n','').split(' ') del charData[-1] label=int(charData[0]) charData= [float(i) for i in charData[1:len(charData)]] # print(len(charData),label,charData) classifier.x_vals.append(charData) classifier.y_vals.append(label) print("Done reading segmented files") print('-----------------------------') print('Training Phase') print('-----------------------------') classifier.train() print('Testing Phase') print('-----------------------------') classifier.test() # Calculate and print total runtime print('Runtime: ', (timeit.default_timer() - start_time)/60) # Save Model print('Model Saved as ' +'Models/'+ args.classifier+'-'+args.features+ '-50' + '.sav') classifier.saveModel('Models/'+args.classifier+'-'+args.features+ '-50') else: # modelFileName = input("Model filename:") print('Loading Model') print('-----------------------------') classifier.loadModel('Models/'+args.classifier+'-'+args.features + '-50') print('Load Dataset Phase') print('-----------------------------') # trainingImages, __ , filesNames = readImages(TESTING_DATASET, trainTest = 1) # print(filesNames) print('Processing') print('-----------------------------') # create output directory directory = "./output/text/" if not os.path.exists(directory): os.makedirs(directory) runtime_file = open("./output/running_time.txt",'w+') # read test images and print corresponding text for i in tqdm(sorted(glob.glob(TESTING_DATASET + "*/*.png"))): textFileName = os.path.basename(i)[:-4]+'.txt'#.replace('scanned','text') f = open(directory + textFileName,'wb+') image = cv2.imread(i) start_time = timeit.default_timer() # start timer segmented = imagePreprocessing(image) # Get characters of image # print(len(segmented)) # [[[, , , characters], , , words] , , , lines] for word in segmented: for char in word: currentCharFeature = features.getFeatures(char, False) classificationResult = classifier.getResult([currentCharFeature]) # char = 'أ' char = getCharFromLabel(classificationResult) f.write(char.encode('utf8')) f.write(' '.encode('utf8')) # f.write('\n') runtime_file.write(str(timeit.default_timer() - start_time) + '\n') # write running time to file f.close() # filesNames.pop(0) runtime_file.close()
contains
identifier_name
ArabicOCR.py
# To Run Arabic OCR # python ArabicOCR.py <featureMethod> <classifier> # featureMethod: StatisticalFeatures - NewGeometricFeatures # classifier: SVM # TRAINING_DATASET = './Letters-Dataset-Generator/LettersDataset' TRAINING_DATASET = './Dataset/scanned' TESTING_DATASET = './Dataset/Testing' from importlib import import_module import argparse from tqdm import tqdm import timeit import glob import cv2 import numpy as np import os from Preprocessing.Lines import LineSegmentation from Preprocessing.Words import WordSegmentation from Preprocessing.Characters import CharacterSegmentation from Classification.TextLabeling import get_labels, getCharFromLabel # from Preprocessing.PreprocessingTrain import get_dataset import h5py from multiprocessing import Process # hdf5_dir = "PreprocessingOutput/1000-2000/" # def get_dataset(chars_file,labels_file,count=-1): # cfile= h5py.File(hdf5_dir +chars_file, "r+") # imgs=[] # i=0 # for img in cfile.keys(): # if count!= -1 and i>=count: # break # i+=1 # words=[] # word_k= len(cfile[img].keys()) # for word in range(word_k): # word_1=[] # for char in cfile[img][str(word)].keys(): # word_1+=[np.array(cfile[img][str(word)][char])] # words+=[word_1] # imgs+=[words] # lfile= h5py.File(hdf5_dir +labels_file, "r+") # labels=[] # i=0 # for img in lfile.keys(): # if count!= -1 and i>=count: # break # i+=1 # label_img=[] # for word in lfile[img].keys(): # label_1=[] # for label in lfile[img][word].keys(): # label_1+=[np.array(lfile[img][word][label])] # label_img+=[label_1] # labels+=[label_img] # return imgs,labels import re def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): return [ atoi(c) for c in re.split(r'(\d+)', text) ] # checking if string contains list element def contains(string, str_list): res = [ele for ele in str_list if(ele in string)] return bool(res) def readImages(folder, trainTest = 0): images = [] folders = [f for f in glob.glob(folder + "**/*", recursive=True)] y_vals = [] filesNames = [] for folder in folders: img_files = [img_file for img_file in glob.glob(folder + "**/*.png", recursive=True)] for i in range(len(img_files)): filesNames += [img_files[i]] img = cv2.imread(img_files[i]) images.append(img) if trainTest == 0: label = int(img_files[i][img_files[i].index("label_") + len("label_"):img_files[i].index("_size")]) y_vals.append(label) return images, y_vals, filesNames def readImagesInFolder(folder): images = [] folders = [f for f in glob.glob(folder + "**/*", recursive=True)] y_vals = [] filesNames = [] image_count = 0 for img in folders: # only read first image if image_count == 4: return images, y_vals, filesNames filesNames += [img[img.index("scanned/") + len("scanned/"):]] image_count += 1 return images, y_vals, filesNames def imagePreprocessing(img): # Segment paragraph into lines lines = LineSegmentation(img, saveResults=False) # Segment lines into words words = [] for i in range(len(lines)): words.extend(WordSegmentation(lines[i], lineNumber = i, saveResults=False)) characters = [] # Segment words into characters for word in words: characters.append(CharacterSegmentation(np.array(word, dtype=np.uint8))) # print("lines= ",len(lines)," words= ",len(words)," chars= ",len(characters)) return characters # [[[, , , characters], , , words] , , , lines] def loop(i): pictureFeatures = [] pictureLabels = [] # actualCharacters = [] image = cv2.imread(i) textFileName = i[:-4].replace('scanned', 'text') textWords = open(textFileName + '.txt', encoding='utf-8').read().replace('\n', ' ').split(' ') textWords = [item for item in textWords if item != ''] segmented = imagePreprocessing(image) # Get characters of image # print("segmented", textFileName) # [[[, , , characters], , , words] , , , lines] double_char = "لا" segmentedWords = len(segmented) # TotalImages += 1 # print("Text: ", len(textWords), "Segmented: ", len(segmented)) if len(textWords) != segmentedWords: # skippedImages += 1 # print(i[i.rfind('\\')+1:-4],"is skipped") return # faultyWordSegmented = False # if len(textWords) < segmentedWords: # print("FAULTY IMAGE") # faultyWordSegmented = True for wordIndex in range(len(segmented)): word = segmented[wordIndex] correspondingTextWord = textWords[0] # get count of occurances of "lam-alf" in word occurances_count = correspondingTextWord.count(double_char) text_length = len(correspondingTextWord) # treat every "lam-alf" as one character text_length -= occurances_count # print("NOW Processing: ", correspondingTextWord, "text_length = ", text_length, " occurances_count = ", occurances_count, ' len(word) = ', len(word) ) if len(word) != text_length: # segmented characters != word characters # if faultyWordSegmented and wordIndex + 1 < len(segmented) and occurances_count > 0 and text_length == len(word) + len(segmented[wordIndex+1]): # # There is لا that is causing a problem # correspondingTextWord = correspondingTextWord[:len(word)+1] # print("correspondingTextWord = ", correspondingTextWord) # textWords[0] = textWords[0][len(word)+1:] # processedWords += 1 # else: # ignoredWords += 1 del textWords[0] continue # else: # processedWords += 1 # del textWords[0] pictureLabels.extend(get_labels(correspondingTextWord)) # actualCharacters += [correspondingTextWord] for char in word: # processedCharacters += 1 # print('Currently processing image '+filesNames[0]+' line #', segmented.index(line), ' word #', line.index(word),' char #', word.index(char)) currentCharFeature = features.getFeatures(char, showResults=False, black_background=True) pictureFeatures.append(currentCharFeature) # cv2.resize(char, (100,60)) del textWords[0] # f = open('textFiles/'+i[i.rfind('\\')+1:-4]+'-words.txt','wb+') # for myWord in actualCharacters: # f.write(myWord.encode('utf8')+'\n'.encode('utf8')) # f.close() f = open('textFiles/'+i[i.rfind('\\')+1:-4]+'.txt','w+') for k in range(len(pictureFeatures)): f.write(str(pictureLabels[k])+' ') for current_feature in pictureFeatures[k]: f.write("%s " % current_feature) f.write('\n') f.close() # Read arguments in order parser = argparse.ArgumentParser("Train Module") parser.add_argument("features") parser.add_argument("classifier") args = parser.parse_args() # Parse the arguments written by the user in the commandline # Import Modules ################# # Import Features Type featuresModule = import_module('FeatureExtraction.' + args.features) # Dynamically load the features module featuresClass = getattr(featuresModule, args.features) features = featuresClass() if __name__ == "__main__": # Import classifier Type classifierModule = import_module('Classification.' + args.classifier) # Dynamically load the classifier module classifierClass = getattr(classifierModule, args.classifier) classifier = classifierClass(features.featuresNumber) ########################### mode = int(input("1.Segment\n2.Train\n3.Test existing Model\n")) if mode == 1: # set start time start_time = timeit.default_timer() #trainingImages, classifier.y_vals, __ = readImages(TRAINING_DATASET, trainTest = 0) # if TRAINING_DATASET == './Dataset/scanned': # os.listdir("Dataset/scanned/") print("Reading dataset to segment") trainingImages = [] imagesNames = [] # for i in tqdm(sorted(glob.glob(TRAINING_DATASET + "*/*.png"), key=natural_keys)): # trainingImages += [cv2.imread(i)] # imagesNames += [i[:-4]] print('-----------------------------') print("Preprocessing and feature Extraction Phase")
skippedImages = 0 TotalImages = 0 processes = [] start_time = timeit.default_timer() dataset = sorted(glob.glob(TRAINING_DATASET + "*/*.png"), key=natural_keys)[4400:4600] # 3000 DONE for i in list(dataset): p = Process(target=loop, args=(i,)) p.start() processes += [p] for p in processes: p.join() print("running time = ", timeit.default_timer() - start_time) print('Finished All#########################################') elif mode==2: featuresList=list(glob.glob("textFiles" + "/*.txt"))[:1000] for filepath in tqdm(featuresList): with open(filepath) as fp: for line in fp: charData=line.replace('\n','').split(' ') del charData[-1] label=int(charData[0]) charData= [float(i) for i in charData[1:len(charData)]] # print(len(charData),label,charData) classifier.x_vals.append(charData) classifier.y_vals.append(label) print("Done reading segmented files") print('-----------------------------') print('Training Phase') print('-----------------------------') classifier.train() print('Testing Phase') print('-----------------------------') classifier.test() # Calculate and print total runtime print('Runtime: ', (timeit.default_timer() - start_time)/60) # Save Model print('Model Saved as ' +'Models/'+ args.classifier+'-'+args.features+ '-50' + '.sav') classifier.saveModel('Models/'+args.classifier+'-'+args.features+ '-50') else: # modelFileName = input("Model filename:") print('Loading Model') print('-----------------------------') classifier.loadModel('Models/'+args.classifier+'-'+args.features + '-50') print('Load Dataset Phase') print('-----------------------------') # trainingImages, __ , filesNames = readImages(TESTING_DATASET, trainTest = 1) # print(filesNames) print('Processing') print('-----------------------------') # create output directory directory = "./output/text/" if not os.path.exists(directory): os.makedirs(directory) runtime_file = open("./output/running_time.txt",'w+') # read test images and print corresponding text for i in tqdm(sorted(glob.glob(TESTING_DATASET + "*/*.png"))): textFileName = os.path.basename(i)[:-4]+'.txt'#.replace('scanned','text') f = open(directory + textFileName,'wb+') image = cv2.imread(i) start_time = timeit.default_timer() # start timer segmented = imagePreprocessing(image) # Get characters of image # print(len(segmented)) # [[[, , , characters], , , words] , , , lines] for word in segmented: for char in word: currentCharFeature = features.getFeatures(char, False) classificationResult = classifier.getResult([currentCharFeature]) # char = 'أ' char = getCharFromLabel(classificationResult) f.write(char.encode('utf8')) f.write(' '.encode('utf8')) # f.write('\n') runtime_file.write(str(timeit.default_timer() - start_time) + '\n') # write running time to file f.close() # filesNames.pop(0) runtime_file.close()
processedCharacters = 0 ignoredWords = 0 processedWords = 0 segmented = None
random_line_split
ArabicOCR.py
# To Run Arabic OCR # python ArabicOCR.py <featureMethod> <classifier> # featureMethod: StatisticalFeatures - NewGeometricFeatures # classifier: SVM # TRAINING_DATASET = './Letters-Dataset-Generator/LettersDataset' TRAINING_DATASET = './Dataset/scanned' TESTING_DATASET = './Dataset/Testing' from importlib import import_module import argparse from tqdm import tqdm import timeit import glob import cv2 import numpy as np import os from Preprocessing.Lines import LineSegmentation from Preprocessing.Words import WordSegmentation from Preprocessing.Characters import CharacterSegmentation from Classification.TextLabeling import get_labels, getCharFromLabel # from Preprocessing.PreprocessingTrain import get_dataset import h5py from multiprocessing import Process # hdf5_dir = "PreprocessingOutput/1000-2000/" # def get_dataset(chars_file,labels_file,count=-1): # cfile= h5py.File(hdf5_dir +chars_file, "r+") # imgs=[] # i=0 # for img in cfile.keys(): # if count!= -1 and i>=count: # break # i+=1 # words=[] # word_k= len(cfile[img].keys()) # for word in range(word_k): # word_1=[] # for char in cfile[img][str(word)].keys(): # word_1+=[np.array(cfile[img][str(word)][char])] # words+=[word_1] # imgs+=[words] # lfile= h5py.File(hdf5_dir +labels_file, "r+") # labels=[] # i=0 # for img in lfile.keys(): # if count!= -1 and i>=count: # break # i+=1 # label_img=[] # for word in lfile[img].keys(): # label_1=[] # for label in lfile[img][word].keys(): # label_1+=[np.array(lfile[img][word][label])] # label_img+=[label_1] # labels+=[label_img] # return imgs,labels import re def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): return [ atoi(c) for c in re.split(r'(\d+)', text) ] # checking if string contains list element def contains(string, str_list): res = [ele for ele in str_list if(ele in string)] return bool(res) def readImages(folder, trainTest = 0): images = [] folders = [f for f in glob.glob(folder + "**/*", recursive=True)] y_vals = [] filesNames = [] for folder in folders: img_files = [img_file for img_file in glob.glob(folder + "**/*.png", recursive=True)] for i in range(len(img_files)): filesNames += [img_files[i]] img = cv2.imread(img_files[i]) images.append(img) if trainTest == 0: label = int(img_files[i][img_files[i].index("label_") + len("label_"):img_files[i].index("_size")]) y_vals.append(label) return images, y_vals, filesNames def readImagesInFolder(folder): images = [] folders = [f for f in glob.glob(folder + "**/*", recursive=True)] y_vals = [] filesNames = [] image_count = 0 for img in folders: # only read first image if image_count == 4: return images, y_vals, filesNames filesNames += [img[img.index("scanned/") + len("scanned/"):]] image_count += 1 return images, y_vals, filesNames def imagePreprocessing(img): # Segment paragraph into lines lines = LineSegmentation(img, saveResults=False) # Segment lines into words words = [] for i in range(len(lines)): words.extend(WordSegmentation(lines[i], lineNumber = i, saveResults=False)) characters = [] # Segment words into characters for word in words: characters.append(CharacterSegmentation(np.array(word, dtype=np.uint8))) # print("lines= ",len(lines)," words= ",len(words)," chars= ",len(characters)) return characters # [[[, , , characters], , , words] , , , lines] def loop(i): pictureFeatures = [] pictureLabels = [] # actualCharacters = [] image = cv2.imread(i) textFileName = i[:-4].replace('scanned', 'text') textWords = open(textFileName + '.txt', encoding='utf-8').read().replace('\n', ' ').split(' ') textWords = [item for item in textWords if item != ''] segmented = imagePreprocessing(image) # Get characters of image # print("segmented", textFileName) # [[[, , , characters], , , words] , , , lines] double_char = "لا" segmentedWords = len(segmented) # TotalImages += 1 # print("Text: ", len(textWords), "Segmented: ", len(segmented)) if len(textWords) != segmentedWords: # skippedImages += 1 # print(i[i.rfind('\\')+1:-4],"is skipped") return # faultyWordSegmented = False # if len(textWords) < segmentedWords: # print("FAULTY IMAGE") # faultyWordSegmented = True for wordIndex in range(len(segmented)): word = segmented[wordIndex] correspondingTextWord = textWords[0] # get count of occurances of "lam-alf" in word occurances_count = correspondingTextWord.count(double_char) text_length = len(correspondingTextWord) # treat every "lam-alf" as one character text_length -= occurances_count # print("NOW Processing: ", correspondingTextWord, "text_length = ", text_length, " occurances_count = ", occurances_count, ' len(word) = ', len(word) ) if len(word) != text_length: # segmented characters != word characters # if faultyWordSegmented and wordIndex + 1 < len(segmented) and occurances_count > 0 and text_length == len(word) + len(segmented[wordIndex+1]): # # There is لا that is causing a problem # correspondingTextWord = correspondingTextWord[:len(word)+1] # print("correspondingTextWord = ", correspondingTextWord) # textWords[0] = textWords[0][len(word)+1:] # processedWords += 1 # else: # ignoredWords += 1 del textWords[0] continue # else: # processedWords += 1 # del textWords[0] pictureLabels.extend(get_labels(correspondingTextWord)) # actualCharacters += [correspondingTextWord] for char in word: # processedCharacters += 1 # print('Currently processing image '+filesNames[0]+' line #', segmented.index(line), ' word #', line.index(word),' char #', word.index(char)) currentCharFeature = features.getFeatures(char, showResults=False, black_background=True) pictureFeatures.append(currentCharFeature) # cv2.resize(char, (100,60)) del textWords[0] # f = open('textFiles/'+i[i.rfind('\\')+1:-4]+'-words.txt','wb+') # for myWord in actualCharacters: # f.write(myWord.encode('utf8')+'\n'.encode('utf8')) # f.close() f = open('textFiles/'+i[i.rfind('\\')+1:-4]+'.txt','w+') for k in range(len(pictureFeatures)): f.write(str(pictureLabels[k])+' ') for current_feature in pictureFeatures[k]: f.write("%s " % current_feature) f.write('\n') f.close() # Read arguments in order parser = argparse.ArgumentParser("Train Module") parser.add_argument("features") parser.add_argument("classifier") args = parser.parse_args() # Parse the arguments written by the user in the commandline # Import Modules ################# # Import Features Type featuresModule = import_module('FeatureExtraction.' + args.features) # Dynamically load the features module featuresClass = getattr(featuresModule, args.features) features = featuresClass() if __name__ == "__main__": # Import classifier Type classifierModule = import_module('Classification.' + args.classifier) # Dynamically load the classifier module classifierClass = getattr(classifierModule, args.classifier) classifier = classifierClass(features.featuresNumber) ########################### mode = int(input("1.Segment\n2.Train\n3.Test existing Model\n")) if mode == 1: # set start time start_time = timeit.default_timer() #trainingImages, classifier.y_vals, __ = readImages(TRAINING_DATASET, trainTest = 0) # if TRAINING_DATASET == './Dataset/scanned': # os.listdir("Dataset/scanned/") print("Reading dataset to segment") trainingImages = [] imagesNames = [] # for i in tqdm(sorted(glob.glob(TRAINING_DATASET + "*/*.png"), key=natural_keys)): # trainingImages += [cv2.imread(i)] # imagesNames += [i[:-4]] print('-----------------------------') print("Preprocessing and feature Extraction Phase") processedCharacters = 0 ignoredWords = 0 processedWords = 0 segmented = None skippedImages = 0 TotalImages = 0 processes = [] start_time = timeit.default_timer() dataset = sorted(glob.glob(TRAINING_DATASET + "*/*.png"), key=natural_keys)[4400:4600] # 3000 DONE for i in list(dataset): p = Process(target=loop, args=(i,)) p.start() processes += [p] for p in processes: p.jo
print("running time = ", timeit.default_timer() - start_time) print('Finished All#########################################') elif mode==2: featuresList=list(glob.glob("textFiles" + "/*.txt"))[:1000] for filepath in tqdm(featuresList): with open(filepath) as fp: for line in fp: charData=line.replace('\n','').split(' ') del charData[-1] label=int(charData[0]) charData= [float(i) for i in charData[1:len(charData)]] # print(len(charData),label,charData) classifier.x_vals.append(charData) classifier.y_vals.append(label) print("Done reading segmented files") print('-----------------------------') print('Training Phase') print('-----------------------------') classifier.train() print('Testing Phase') print('-----------------------------') classifier.test() # Calculate and print total runtime print('Runtime: ', (timeit.default_timer() - start_time)/60) # Save Model print('Model Saved as ' +'Models/'+ args.classifier+'-'+args.features+ '-50' + '.sav') classifier.saveModel('Models/'+args.classifier+'-'+args.features+ '-50') else: # modelFileName = input("Model filename:") print('Loading Model') print('-----------------------------') classifier.loadModel('Models/'+args.classifier+'-'+args.features + '-50') print('Load Dataset Phase') print('-----------------------------') # trainingImages, __ , filesNames = readImages(TESTING_DATASET, trainTest = 1) # print(filesNames) print('Processing') print('-----------------------------') # create output directory directory = "./output/text/" if not os.path.exists(directory): os.makedirs(directory) runtime_file = open("./output/running_time.txt",'w+') # read test images and print corresponding text for i in tqdm(sorted(glob.glob(TESTING_DATASET + "*/*.png"))): textFileName = os.path.basename(i)[:-4]+'.txt'#.replace('scanned','text') f = open(directory + textFileName,'wb+') image = cv2.imread(i) start_time = timeit.default_timer() # start timer segmented = imagePreprocessing(image) # Get characters of image # print(len(segmented)) # [[[, , , characters], , , words] , , , lines] for word in segmented: for char in word: currentCharFeature = features.getFeatures(char, False) classificationResult = classifier.getResult([currentCharFeature]) # char = 'أ' char = getCharFromLabel(classificationResult) f.write(char.encode('utf8')) f.write(' '.encode('utf8')) # f.write('\n') runtime_file.write(str(timeit.default_timer() - start_time) + '\n') # write running time to file f.close() # filesNames.pop(0) runtime_file.close()
in()
conditional_block
ArabicOCR.py
# To Run Arabic OCR # python ArabicOCR.py <featureMethod> <classifier> # featureMethod: StatisticalFeatures - NewGeometricFeatures # classifier: SVM # TRAINING_DATASET = './Letters-Dataset-Generator/LettersDataset' TRAINING_DATASET = './Dataset/scanned' TESTING_DATASET = './Dataset/Testing' from importlib import import_module import argparse from tqdm import tqdm import timeit import glob import cv2 import numpy as np import os from Preprocessing.Lines import LineSegmentation from Preprocessing.Words import WordSegmentation from Preprocessing.Characters import CharacterSegmentation from Classification.TextLabeling import get_labels, getCharFromLabel # from Preprocessing.PreprocessingTrain import get_dataset import h5py from multiprocessing import Process # hdf5_dir = "PreprocessingOutput/1000-2000/" # def get_dataset(chars_file,labels_file,count=-1): # cfile= h5py.File(hdf5_dir +chars_file, "r+") # imgs=[] # i=0 # for img in cfile.keys(): # if count!= -1 and i>=count: # break # i+=1 # words=[] # word_k= len(cfile[img].keys()) # for word in range(word_k): # word_1=[] # for char in cfile[img][str(word)].keys(): # word_1+=[np.array(cfile[img][str(word)][char])] # words+=[word_1] # imgs+=[words] # lfile= h5py.File(hdf5_dir +labels_file, "r+") # labels=[] # i=0 # for img in lfile.keys(): # if count!= -1 and i>=count: # break # i+=1 # label_img=[] # for word in lfile[img].keys(): # label_1=[] # for label in lfile[img][word].keys(): # label_1+=[np.array(lfile[img][word][label])] # label_img+=[label_1] # labels+=[label_img] # return imgs,labels import re def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): return [ atoi(c) for c in re.split(r'(\d+)', text) ] # checking if string contains list element def contains(string, str_list): res = [ele for ele in str_list if(ele in string)] return bool(res) def readImages(folder, trainTest = 0): images = [] folders = [f for f in glob.glob(folder + "**/*", recursive=True)] y_vals = [] filesNames = [] for folder in folders: img_files = [img_file for img_file in glob.glob(folder + "**/*.png", recursive=True)] for i in range(len(img_files)): filesNames += [img_files[i]] img = cv2.imread(img_files[i]) images.append(img) if trainTest == 0: label = int(img_files[i][img_files[i].index("label_") + len("label_"):img_files[i].index("_size")]) y_vals.append(label) return images, y_vals, filesNames def readImagesInFolder(folder): images = [] folders = [f for f in glob.glob(folder + "**/*", recursive=True)] y_vals = [] filesNames = [] image_count = 0 for img in folders: # only read first image if image_count == 4: return images, y_vals, filesNames filesNames += [img[img.index("scanned/") + len("scanned/"):]] image_count += 1 return images, y_vals, filesNames def imagePreprocessing(img): # Segment paragraph into lines
def loop(i): pictureFeatures = [] pictureLabels = [] # actualCharacters = [] image = cv2.imread(i) textFileName = i[:-4].replace('scanned', 'text') textWords = open(textFileName + '.txt', encoding='utf-8').read().replace('\n', ' ').split(' ') textWords = [item for item in textWords if item != ''] segmented = imagePreprocessing(image) # Get characters of image # print("segmented", textFileName) # [[[, , , characters], , , words] , , , lines] double_char = "لا" segmentedWords = len(segmented) # TotalImages += 1 # print("Text: ", len(textWords), "Segmented: ", len(segmented)) if len(textWords) != segmentedWords: # skippedImages += 1 # print(i[i.rfind('\\')+1:-4],"is skipped") return # faultyWordSegmented = False # if len(textWords) < segmentedWords: # print("FAULTY IMAGE") # faultyWordSegmented = True for wordIndex in range(len(segmented)): word = segmented[wordIndex] correspondingTextWord = textWords[0] # get count of occurances of "lam-alf" in word occurances_count = correspondingTextWord.count(double_char) text_length = len(correspondingTextWord) # treat every "lam-alf" as one character text_length -= occurances_count # print("NOW Processing: ", correspondingTextWord, "text_length = ", text_length, " occurances_count = ", occurances_count, ' len(word) = ', len(word) ) if len(word) != text_length: # segmented characters != word characters # if faultyWordSegmented and wordIndex + 1 < len(segmented) and occurances_count > 0 and text_length == len(word) + len(segmented[wordIndex+1]): # # There is لا that is causing a problem # correspondingTextWord = correspondingTextWord[:len(word)+1] # print("correspondingTextWord = ", correspondingTextWord) # textWords[0] = textWords[0][len(word)+1:] # processedWords += 1 # else: # ignoredWords += 1 del textWords[0] continue # else: # processedWords += 1 # del textWords[0] pictureLabels.extend(get_labels(correspondingTextWord)) # actualCharacters += [correspondingTextWord] for char in word: # processedCharacters += 1 # print('Currently processing image '+filesNames[0]+' line #', segmented.index(line), ' word #', line.index(word),' char #', word.index(char)) currentCharFeature = features.getFeatures(char, showResults=False, black_background=True) pictureFeatures.append(currentCharFeature) # cv2.resize(char, (100,60)) del textWords[0] # f = open('textFiles/'+i[i.rfind('\\')+1:-4]+'-words.txt','wb+') # for myWord in actualCharacters: # f.write(myWord.encode('utf8')+'\n'.encode('utf8')) # f.close() f = open('textFiles/'+i[i.rfind('\\')+1:-4]+'.txt','w+') for k in range(len(pictureFeatures)): f.write(str(pictureLabels[k])+' ') for current_feature in pictureFeatures[k]: f.write("%s " % current_feature) f.write('\n') f.close() # Read arguments in order parser = argparse.ArgumentParser("Train Module") parser.add_argument("features") parser.add_argument("classifier") args = parser.parse_args() # Parse the arguments written by the user in the commandline # Import Modules ################# # Import Features Type featuresModule = import_module('FeatureExtraction.' + args.features) # Dynamically load the features module featuresClass = getattr(featuresModule, args.features) features = featuresClass() if __name__ == "__main__": # Import classifier Type classifierModule = import_module('Classification.' + args.classifier) # Dynamically load the classifier module classifierClass = getattr(classifierModule, args.classifier) classifier = classifierClass(features.featuresNumber) ########################### mode = int(input("1.Segment\n2.Train\n3.Test existing Model\n")) if mode == 1: # set start time start_time = timeit.default_timer() #trainingImages, classifier.y_vals, __ = readImages(TRAINING_DATASET, trainTest = 0) # if TRAINING_DATASET == './Dataset/scanned': # os.listdir("Dataset/scanned/") print("Reading dataset to segment") trainingImages = [] imagesNames = [] # for i in tqdm(sorted(glob.glob(TRAINING_DATASET + "*/*.png"), key=natural_keys)): # trainingImages += [cv2.imread(i)] # imagesNames += [i[:-4]] print('-----------------------------') print("Preprocessing and feature Extraction Phase") processedCharacters = 0 ignoredWords = 0 processedWords = 0 segmented = None skippedImages = 0 TotalImages = 0 processes = [] start_time = timeit.default_timer() dataset = sorted(glob.glob(TRAINING_DATASET + "*/*.png"), key=natural_keys)[4400:4600] # 3000 DONE for i in list(dataset): p = Process(target=loop, args=(i,)) p.start() processes += [p] for p in processes: p.join() print("running time = ", timeit.default_timer() - start_time) print('Finished All#########################################') elif mode==2: featuresList=list(glob.glob("textFiles" + "/*.txt"))[:1000] for filepath in tqdm(featuresList): with open(filepath) as fp: for line in fp: charData=line.replace('\n','').split(' ') del charData[-1] label=int(charData[0]) charData= [float(i) for i in charData[1:len(charData)]] # print(len(charData),label,charData) classifier.x_vals.append(charData) classifier.y_vals.append(label) print("Done reading segmented files") print('-----------------------------') print('Training Phase') print('-----------------------------') classifier.train() print('Testing Phase') print('-----------------------------') classifier.test() # Calculate and print total runtime print('Runtime: ', (timeit.default_timer() - start_time)/60) # Save Model print('Model Saved as ' +'Models/'+ args.classifier+'-'+args.features+ '-50' + '.sav') classifier.saveModel('Models/'+args.classifier+'-'+args.features+ '-50') else: # modelFileName = input("Model filename:") print('Loading Model') print('-----------------------------') classifier.loadModel('Models/'+args.classifier+'-'+args.features + '-50') print('Load Dataset Phase') print('-----------------------------') # trainingImages, __ , filesNames = readImages(TESTING_DATASET, trainTest = 1) # print(filesNames) print('Processing') print('-----------------------------') # create output directory directory = "./output/text/" if not os.path.exists(directory): os.makedirs(directory) runtime_file = open("./output/running_time.txt",'w+') # read test images and print corresponding text for i in tqdm(sorted(glob.glob(TESTING_DATASET + "*/*.png"))): textFileName = os.path.basename(i)[:-4]+'.txt'#.replace('scanned','text') f = open(directory + textFileName,'wb+') image = cv2.imread(i) start_time = timeit.default_timer() # start timer segmented = imagePreprocessing(image) # Get characters of image # print(len(segmented)) # [[[, , , characters], , , words] , , , lines] for word in segmented: for char in word: currentCharFeature = features.getFeatures(char, False) classificationResult = classifier.getResult([currentCharFeature]) # char = 'أ' char = getCharFromLabel(classificationResult) f.write(char.encode('utf8')) f.write(' '.encode('utf8')) # f.write('\n') runtime_file.write(str(timeit.default_timer() - start_time) + '\n') # write running time to file f.close() # filesNames.pop(0) runtime_file.close()
lines = LineSegmentation(img, saveResults=False) # Segment lines into words words = [] for i in range(len(lines)): words.extend(WordSegmentation(lines[i], lineNumber = i, saveResults=False)) characters = [] # Segment words into characters for word in words: characters.append(CharacterSegmentation(np.array(word, dtype=np.uint8))) # print("lines= ",len(lines)," words= ",len(words)," chars= ",len(characters)) return characters # [[[, , , characters], , , words] , , , lines]
identifier_body
trace_service.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: envoy/service/trace/v3alpha/trace_service.proto package envoy_service_trace_v3alpha import ( context "context" fmt "fmt" core "github.com/altipla-consulting/envoy-api/envoy/api/v3alpha/core" v1 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" _ "github.com/envoyproxy/protoc-gen-validate/validate" _ "github.com/gogo/googleapis/google/api" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type StreamTracesResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesResponse) Reset() { *m = StreamTracesResponse{} } func (m *StreamTracesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTracesResponse) ProtoMessage() {} func (*StreamTracesResponse) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{0} } func (m *StreamTracesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesResponse.Unmarshal(m, b) } func (m *StreamTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesResponse.Marshal(b, m, deterministic) } func (m *StreamTracesResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesResponse.Merge(m, src) } func (m *StreamTracesResponse) XXX_Size() int { return xxx_messageInfo_StreamTracesResponse.Size(m) } func (m *StreamTracesResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesResponse proto.InternalMessageInfo type StreamTracesMessage struct { // Identifier data effectively is a structured metadata. // As a performance optimization this will only be sent in the first message // on the stream. Identifier *StreamTracesMessage_Identifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` // A list of Span entries Spans []*v1.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesMessage)
() { *m = StreamTracesMessage{} } func (m *StreamTracesMessage) String() string { return proto.CompactTextString(m) } func (*StreamTracesMessage) ProtoMessage() {} func (*StreamTracesMessage) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{1} } func (m *StreamTracesMessage) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesMessage.Unmarshal(m, b) } func (m *StreamTracesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesMessage.Marshal(b, m, deterministic) } func (m *StreamTracesMessage) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesMessage.Merge(m, src) } func (m *StreamTracesMessage) XXX_Size() int { return xxx_messageInfo_StreamTracesMessage.Size(m) } func (m *StreamTracesMessage) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesMessage.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesMessage proto.InternalMessageInfo func (m *StreamTracesMessage) GetIdentifier() *StreamTracesMessage_Identifier { if m != nil { return m.Identifier } return nil } func (m *StreamTracesMessage) GetSpans() []*v1.Span { if m != nil { return m.Spans } return nil } type StreamTracesMessage_Identifier struct { // The node sending the access log messages over the stream. Node *core.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesMessage_Identifier) Reset() { *m = StreamTracesMessage_Identifier{} } func (m *StreamTracesMessage_Identifier) String() string { return proto.CompactTextString(m) } func (*StreamTracesMessage_Identifier) ProtoMessage() {} func (*StreamTracesMessage_Identifier) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{1, 0} } func (m *StreamTracesMessage_Identifier) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesMessage_Identifier.Unmarshal(m, b) } func (m *StreamTracesMessage_Identifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesMessage_Identifier.Marshal(b, m, deterministic) } func (m *StreamTracesMessage_Identifier) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesMessage_Identifier.Merge(m, src) } func (m *StreamTracesMessage_Identifier) XXX_Size() int { return xxx_messageInfo_StreamTracesMessage_Identifier.Size(m) } func (m *StreamTracesMessage_Identifier) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesMessage_Identifier.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesMessage_Identifier proto.InternalMessageInfo func (m *StreamTracesMessage_Identifier) GetNode() *core.Node { if m != nil { return m.Node } return nil } func init() { proto.RegisterType((*StreamTracesResponse)(nil), "envoy.service.trace.v3alpha.StreamTracesResponse") proto.RegisterType((*StreamTracesMessage)(nil), "envoy.service.trace.v3alpha.StreamTracesMessage") proto.RegisterType((*StreamTracesMessage_Identifier)(nil), "envoy.service.trace.v3alpha.StreamTracesMessage.Identifier") } func init() { proto.RegisterFile("envoy/service/trace/v3alpha/trace_service.proto", fileDescriptor_838555a39f11343b) } var fileDescriptor_838555a39f11343b = []byte{ // 349 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4b, 0xfb, 0x40, 0x10, 0xc5, 0xff, 0xdb, 0xbf, 0x15, 0xd9, 0xf6, 0xa0, 0x51, 0xb4, 0xc4, 0x82, 0xb5, 0x20, 0xd4, 0xcb, 0xc6, 0xb6, 0x78, 0xd1, 0x5b, 0xbc, 0xe8, 0x41, 0x29, 0xa9, 0x37, 0x0f, 0x32, 0x4d, 0xc6, 0xba, 0x50, 0x77, 0x96, 0x6c, 0x8c, 0xf6, 0x13, 0x28, 0x7e, 0x5d, 0x6f, 0x9e, 0xa4, 0xd9, 0x24, 0xcd, 0x41, 0x8a, 0xde, 0x76, 0x67, 0xde, 0x7b, 0xfc, 0xe6, 0x71, 0x0f, 0x55, 0x4a, 0x73, 0xcf, 0x60, 0x9c, 0xca, 0x10, 0xbd, 0x24, 0x86, 0x10, 0xbd, 0x74, 0x08, 0x33, 0xfd, 0x08, 0xf6, 0x77, 0x9f, 0xef, 0x84, 0x8e, 0x29, 0x21, 0x67, 0x3f, 0x33, 0x88, 0x62, 0x98, 0x49, 0x44, 0x6e, 0x70, 0x0f, 0x6d, 0x1a, 0x68, 0x59, 0x66, 0x84, 0x14, 0xa3, 0x37, 0x01, 0x93, 0xfb, 0xdd, 0xf6, 0x94, 0x68, 0x3a, 0xc3, 0x4c, 0x03, 0x4a, 0x51, 0x02, 0x89, 0x24, 0x65, 0xf2, 0xed, 0x11, 0x69, 0x54, 0x21, 0x2a, 0xf3, 0x6c, 0xbc, 0x6c, 0x52, 0x10, 0xf5, 0xed, 0x23, 0x97, 0xed, 0xa5, 0x30, 0x93, 0x11, 0x24, 0xe8, 0x15, 0x0f, 0xbb, 0xe8, 0xee, 0xf2, 0x9d, 0x71, 0x12, 0x23, 0x3c, 0xdd, 0x2e, 0xd4, 0x26, 0x40, 0xa3, 0x49, 0x19, 0xec, 0x7e, 0x32, 0xbe, 0x5d, 0x5d, 0x5c, 0xa3, 0x31, 0x30, 0x45, 0xe7, 0x8e, 0x73, 0x19, 0xa1, 0x4a, 0xe4, 0x83, 0xc4, 0xb8, 0xc5, 0x3a, 0xac, 0xd7, 0x18, 0x9c, 0x8b, 0x15, 0x27, 0x8a, 0x1f, 0x52, 0xc4, 0x55, 0x19, 0x11, 0x54, 0xe2, 0x9c, 0x53, 0x5e, 0x37, 0x1a, 0x94, 0x69, 0xd5, 0x3a, 0xff, 0x7b, 0x8d, 0xc1, 0x81, 0x58, 0x1e, 0x67, 0x71, 0x8b, 0xe8, 0xbe, 0x18, 0x6b, 0x50, 0x81, 0x55, 0xbb, 0x97, 0x9c, 0x2f, 0x03, 0x9d, 0x33, 0xbe, 0xa6, 0x28, 0xc2, 0x9c, 0xad, 0x9d, 0xb3, 0x81, 0x96, 0x25, 0xd1, 0xa2, 0x61, 0x71, 0x43, 0x11, 0xfa, 0x1b, 0x5f, 0x7e, 0xfd, 0x83, 0xd5, 0x36, 0x59, 0x90, 0x79, 0x06, 0x6f, 0x8c, 0x37, 0x33, 0xd2, 0xb1, 0x3d, 0xc5, 0x79, 0xe1, 0xcd, 0x2a, 0xbf, 0x73, 0xf2, 0xd7, 0x53, 0xdd, 0xfe, 0xaf, 0x1d, 0x65, 0xf7, 0xff, 0x7a, 0xcc, 0xbf, 0xe0, 0xc7, 0x92, 0xac, 0x55, 0xc7, 0xf4, 0x3a, 0x5f, 0x95, 0xe2, 0x6f, 0x55, 0x99, 0x47, 0x8b, 0xa2, 0x46, 0xec, 0x9d, 0xb1, 0xc9, 0x7a, 0x56, 0xda, 0xf0, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x17, 0x26, 0x06, 0x23, 0xb4, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // TraceServiceClient is the client API for TraceService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type TraceServiceClient interface { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. StreamTraces(ctx context.Context, opts ...grpc.CallOption) (TraceService_StreamTracesClient, error) } type traceServiceClient struct { cc *grpc.ClientConn } func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { return &traceServiceClient{cc} } func (c *traceServiceClient) StreamTraces(ctx context.Context, opts ...grpc.CallOption) (TraceService_StreamTracesClient, error) { stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/envoy.service.trace.v3alpha.TraceService/StreamTraces", opts...) if err != nil { return nil, err } x := &traceServiceStreamTracesClient{stream} return x, nil } type TraceService_StreamTracesClient interface { Send(*StreamTracesMessage) error CloseAndRecv() (*StreamTracesResponse, error) grpc.ClientStream } type traceServiceStreamTracesClient struct { grpc.ClientStream } func (x *traceServiceStreamTracesClient) Send(m *StreamTracesMessage) error { return x.ClientStream.SendMsg(m) } func (x *traceServiceStreamTracesClient) CloseAndRecv() (*StreamTracesResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(StreamTracesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // TraceServiceServer is the server API for TraceService service. type TraceServiceServer interface { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. StreamTraces(TraceService_StreamTracesServer) error } func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { s.RegisterService(&_TraceService_serviceDesc, srv) } func _TraceService_StreamTraces_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TraceServiceServer).StreamTraces(&traceServiceStreamTracesServer{stream}) } type TraceService_StreamTracesServer interface { SendAndClose(*StreamTracesResponse) error Recv() (*StreamTracesMessage, error) grpc.ServerStream } type traceServiceStreamTracesServer struct { grpc.ServerStream } func (x *traceServiceStreamTracesServer) SendAndClose(m *StreamTracesResponse) error { return x.ServerStream.SendMsg(m) } func (x *traceServiceStreamTracesServer) Recv() (*StreamTracesMessage, error) { m := new(StreamTracesMessage) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _TraceService_serviceDesc = grpc.ServiceDesc{ ServiceName: "envoy.service.trace.v3alpha.TraceService", HandlerType: (*TraceServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "StreamTraces", Handler: _TraceService_StreamTraces_Handler, ClientStreams: true, }, }, Metadata: "envoy/service/trace/v3alpha/trace_service.proto", }
Reset
identifier_name
trace_service.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: envoy/service/trace/v3alpha/trace_service.proto package envoy_service_trace_v3alpha import ( context "context" fmt "fmt" core "github.com/altipla-consulting/envoy-api/envoy/api/v3alpha/core" v1 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" _ "github.com/envoyproxy/protoc-gen-validate/validate" _ "github.com/gogo/googleapis/google/api" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type StreamTracesResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesResponse) Reset() { *m = StreamTracesResponse{} } func (m *StreamTracesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTracesResponse) ProtoMessage() {} func (*StreamTracesResponse) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{0} } func (m *StreamTracesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesResponse.Unmarshal(m, b) } func (m *StreamTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesResponse.Marshal(b, m, deterministic) } func (m *StreamTracesResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesResponse.Merge(m, src) } func (m *StreamTracesResponse) XXX_Size() int { return xxx_messageInfo_StreamTracesResponse.Size(m) } func (m *StreamTracesResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesResponse proto.InternalMessageInfo type StreamTracesMessage struct { // Identifier data effectively is a structured metadata. // As a performance optimization this will only be sent in the first message // on the stream. Identifier *StreamTracesMessage_Identifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` // A list of Span entries Spans []*v1.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesMessage) Reset() { *m = StreamTracesMessage{} } func (m *StreamTracesMessage) String() string { return proto.CompactTextString(m) } func (*StreamTracesMessage) ProtoMessage() {} func (*StreamTracesMessage) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{1} } func (m *StreamTracesMessage) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesMessage.Unmarshal(m, b) } func (m *StreamTracesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesMessage.Marshal(b, m, deterministic) } func (m *StreamTracesMessage) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesMessage.Merge(m, src) } func (m *StreamTracesMessage) XXX_Size() int { return xxx_messageInfo_StreamTracesMessage.Size(m) } func (m *StreamTracesMessage) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesMessage.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesMessage proto.InternalMessageInfo func (m *StreamTracesMessage) GetIdentifier() *StreamTracesMessage_Identifier { if m != nil { return m.Identifier } return nil } func (m *StreamTracesMessage) GetSpans() []*v1.Span { if m != nil { return m.Spans } return nil } type StreamTracesMessage_Identifier struct { // The node sending the access log messages over the stream. Node *core.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesMessage_Identifier) Reset() { *m = StreamTracesMessage_Identifier{} } func (m *StreamTracesMessage_Identifier) String() string { return proto.CompactTextString(m) } func (*StreamTracesMessage_Identifier) ProtoMessage() {} func (*StreamTracesMessage_Identifier) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{1, 0} } func (m *StreamTracesMessage_Identifier) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesMessage_Identifier.Unmarshal(m, b) } func (m *StreamTracesMessage_Identifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesMessage_Identifier.Marshal(b, m, deterministic) } func (m *StreamTracesMessage_Identifier) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesMessage_Identifier.Merge(m, src) } func (m *StreamTracesMessage_Identifier) XXX_Size() int { return xxx_messageInfo_StreamTracesMessage_Identifier.Size(m) } func (m *StreamTracesMessage_Identifier) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesMessage_Identifier.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesMessage_Identifier proto.InternalMessageInfo func (m *StreamTracesMessage_Identifier) GetNode() *core.Node { if m != nil
return nil } func init() { proto.RegisterType((*StreamTracesResponse)(nil), "envoy.service.trace.v3alpha.StreamTracesResponse") proto.RegisterType((*StreamTracesMessage)(nil), "envoy.service.trace.v3alpha.StreamTracesMessage") proto.RegisterType((*StreamTracesMessage_Identifier)(nil), "envoy.service.trace.v3alpha.StreamTracesMessage.Identifier") } func init() { proto.RegisterFile("envoy/service/trace/v3alpha/trace_service.proto", fileDescriptor_838555a39f11343b) } var fileDescriptor_838555a39f11343b = []byte{ // 349 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4b, 0xfb, 0x40, 0x10, 0xc5, 0xff, 0xdb, 0xbf, 0x15, 0xd9, 0xf6, 0xa0, 0x51, 0xb4, 0xc4, 0x82, 0xb5, 0x20, 0xd4, 0xcb, 0xc6, 0xb6, 0x78, 0xd1, 0x5b, 0xbc, 0xe8, 0x41, 0x29, 0xa9, 0x37, 0x0f, 0x32, 0x4d, 0xc6, 0xba, 0x50, 0x77, 0x96, 0x6c, 0x8c, 0xf6, 0x13, 0x28, 0x7e, 0x5d, 0x6f, 0x9e, 0xa4, 0xd9, 0x24, 0xcd, 0x41, 0x8a, 0xde, 0x76, 0x67, 0xde, 0x7b, 0xfc, 0xe6, 0x71, 0x0f, 0x55, 0x4a, 0x73, 0xcf, 0x60, 0x9c, 0xca, 0x10, 0xbd, 0x24, 0x86, 0x10, 0xbd, 0x74, 0x08, 0x33, 0xfd, 0x08, 0xf6, 0x77, 0x9f, 0xef, 0x84, 0x8e, 0x29, 0x21, 0x67, 0x3f, 0x33, 0x88, 0x62, 0x98, 0x49, 0x44, 0x6e, 0x70, 0x0f, 0x6d, 0x1a, 0x68, 0x59, 0x66, 0x84, 0x14, 0xa3, 0x37, 0x01, 0x93, 0xfb, 0xdd, 0xf6, 0x94, 0x68, 0x3a, 0xc3, 0x4c, 0x03, 0x4a, 0x51, 0x02, 0x89, 0x24, 0x65, 0xf2, 0xed, 0x11, 0x69, 0x54, 0x21, 0x2a, 0xf3, 0x6c, 0xbc, 0x6c, 0x52, 0x10, 0xf5, 0xed, 0x23, 0x97, 0xed, 0xa5, 0x30, 0x93, 0x11, 0x24, 0xe8, 0x15, 0x0f, 0xbb, 0xe8, 0xee, 0xf2, 0x9d, 0x71, 0x12, 0x23, 0x3c, 0xdd, 0x2e, 0xd4, 0x26, 0x40, 0xa3, 0x49, 0x19, 0xec, 0x7e, 0x32, 0xbe, 0x5d, 0x5d, 0x5c, 0xa3, 0x31, 0x30, 0x45, 0xe7, 0x8e, 0x73, 0x19, 0xa1, 0x4a, 0xe4, 0x83, 0xc4, 0xb8, 0xc5, 0x3a, 0xac, 0xd7, 0x18, 0x9c, 0x8b, 0x15, 0x27, 0x8a, 0x1f, 0x52, 0xc4, 0x55, 0x19, 0x11, 0x54, 0xe2, 0x9c, 0x53, 0x5e, 0x37, 0x1a, 0x94, 0x69, 0xd5, 0x3a, 0xff, 0x7b, 0x8d, 0xc1, 0x81, 0x58, 0x1e, 0x67, 0x71, 0x8b, 0xe8, 0xbe, 0x18, 0x6b, 0x50, 0x81, 0x55, 0xbb, 0x97, 0x9c, 0x2f, 0x03, 0x9d, 0x33, 0xbe, 0xa6, 0x28, 0xc2, 0x9c, 0xad, 0x9d, 0xb3, 0x81, 0x96, 0x25, 0xd1, 0xa2, 0x61, 0x71, 0x43, 0x11, 0xfa, 0x1b, 0x5f, 0x7e, 0xfd, 0x83, 0xd5, 0x36, 0x59, 0x90, 0x79, 0x06, 0x6f, 0x8c, 0x37, 0x33, 0xd2, 0xb1, 0x3d, 0xc5, 0x79, 0xe1, 0xcd, 0x2a, 0xbf, 0x73, 0xf2, 0xd7, 0x53, 0xdd, 0xfe, 0xaf, 0x1d, 0x65, 0xf7, 0xff, 0x7a, 0xcc, 0xbf, 0xe0, 0xc7, 0x92, 0xac, 0x55, 0xc7, 0xf4, 0x3a, 0x5f, 0x95, 0xe2, 0x6f, 0x55, 0x99, 0x47, 0x8b, 0xa2, 0x46, 0xec, 0x9d, 0xb1, 0xc9, 0x7a, 0x56, 0xda, 0xf0, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x17, 0x26, 0x06, 0x23, 0xb4, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // TraceServiceClient is the client API for TraceService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type TraceServiceClient interface { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. StreamTraces(ctx context.Context, opts ...grpc.CallOption) (TraceService_StreamTracesClient, error) } type traceServiceClient struct { cc *grpc.ClientConn } func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { return &traceServiceClient{cc} } func (c *traceServiceClient) StreamTraces(ctx context.Context, opts ...grpc.CallOption) (TraceService_StreamTracesClient, error) { stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/envoy.service.trace.v3alpha.TraceService/StreamTraces", opts...) if err != nil { return nil, err } x := &traceServiceStreamTracesClient{stream} return x, nil } type TraceService_StreamTracesClient interface { Send(*StreamTracesMessage) error CloseAndRecv() (*StreamTracesResponse, error) grpc.ClientStream } type traceServiceStreamTracesClient struct { grpc.ClientStream } func (x *traceServiceStreamTracesClient) Send(m *StreamTracesMessage) error { return x.ClientStream.SendMsg(m) } func (x *traceServiceStreamTracesClient) CloseAndRecv() (*StreamTracesResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(StreamTracesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // TraceServiceServer is the server API for TraceService service. type TraceServiceServer interface { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. StreamTraces(TraceService_StreamTracesServer) error } func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { s.RegisterService(&_TraceService_serviceDesc, srv) } func _TraceService_StreamTraces_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TraceServiceServer).StreamTraces(&traceServiceStreamTracesServer{stream}) } type TraceService_StreamTracesServer interface { SendAndClose(*StreamTracesResponse) error Recv() (*StreamTracesMessage, error) grpc.ServerStream } type traceServiceStreamTracesServer struct { grpc.ServerStream } func (x *traceServiceStreamTracesServer) SendAndClose(m *StreamTracesResponse) error { return x.ServerStream.SendMsg(m) } func (x *traceServiceStreamTracesServer) Recv() (*StreamTracesMessage, error) { m := new(StreamTracesMessage) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _TraceService_serviceDesc = grpc.ServiceDesc{ ServiceName: "envoy.service.trace.v3alpha.TraceService", HandlerType: (*TraceServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "StreamTraces", Handler: _TraceService_StreamTraces_Handler, ClientStreams: true, }, }, Metadata: "envoy/service/trace/v3alpha/trace_service.proto", }
{ return m.Node }
conditional_block
trace_service.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: envoy/service/trace/v3alpha/trace_service.proto package envoy_service_trace_v3alpha import ( context "context" fmt "fmt" core "github.com/altipla-consulting/envoy-api/envoy/api/v3alpha/core" v1 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" _ "github.com/envoyproxy/protoc-gen-validate/validate" _ "github.com/gogo/googleapis/google/api" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type StreamTracesResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesResponse) Reset() { *m = StreamTracesResponse{} } func (m *StreamTracesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTracesResponse) ProtoMessage() {} func (*StreamTracesResponse) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{0} } func (m *StreamTracesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesResponse.Unmarshal(m, b) } func (m *StreamTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (m *StreamTracesResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesResponse.Merge(m, src) } func (m *StreamTracesResponse) XXX_Size() int { return xxx_messageInfo_StreamTracesResponse.Size(m) } func (m *StreamTracesResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesResponse proto.InternalMessageInfo type StreamTracesMessage struct { // Identifier data effectively is a structured metadata. // As a performance optimization this will only be sent in the first message // on the stream. Identifier *StreamTracesMessage_Identifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` // A list of Span entries Spans []*v1.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesMessage) Reset() { *m = StreamTracesMessage{} } func (m *StreamTracesMessage) String() string { return proto.CompactTextString(m) } func (*StreamTracesMessage) ProtoMessage() {} func (*StreamTracesMessage) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{1} } func (m *StreamTracesMessage) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesMessage.Unmarshal(m, b) } func (m *StreamTracesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesMessage.Marshal(b, m, deterministic) } func (m *StreamTracesMessage) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesMessage.Merge(m, src) } func (m *StreamTracesMessage) XXX_Size() int { return xxx_messageInfo_StreamTracesMessage.Size(m) } func (m *StreamTracesMessage) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesMessage.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesMessage proto.InternalMessageInfo func (m *StreamTracesMessage) GetIdentifier() *StreamTracesMessage_Identifier { if m != nil { return m.Identifier } return nil } func (m *StreamTracesMessage) GetSpans() []*v1.Span { if m != nil { return m.Spans } return nil } type StreamTracesMessage_Identifier struct { // The node sending the access log messages over the stream. Node *core.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesMessage_Identifier) Reset() { *m = StreamTracesMessage_Identifier{} } func (m *StreamTracesMessage_Identifier) String() string { return proto.CompactTextString(m) } func (*StreamTracesMessage_Identifier) ProtoMessage() {} func (*StreamTracesMessage_Identifier) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{1, 0} } func (m *StreamTracesMessage_Identifier) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesMessage_Identifier.Unmarshal(m, b) } func (m *StreamTracesMessage_Identifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesMessage_Identifier.Marshal(b, m, deterministic) } func (m *StreamTracesMessage_Identifier) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesMessage_Identifier.Merge(m, src) } func (m *StreamTracesMessage_Identifier) XXX_Size() int { return xxx_messageInfo_StreamTracesMessage_Identifier.Size(m) } func (m *StreamTracesMessage_Identifier) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesMessage_Identifier.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesMessage_Identifier proto.InternalMessageInfo func (m *StreamTracesMessage_Identifier) GetNode() *core.Node { if m != nil { return m.Node } return nil } func init() { proto.RegisterType((*StreamTracesResponse)(nil), "envoy.service.trace.v3alpha.StreamTracesResponse") proto.RegisterType((*StreamTracesMessage)(nil), "envoy.service.trace.v3alpha.StreamTracesMessage") proto.RegisterType((*StreamTracesMessage_Identifier)(nil), "envoy.service.trace.v3alpha.StreamTracesMessage.Identifier") } func init() { proto.RegisterFile("envoy/service/trace/v3alpha/trace_service.proto", fileDescriptor_838555a39f11343b) } var fileDescriptor_838555a39f11343b = []byte{ // 349 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4b, 0xfb, 0x40, 0x10, 0xc5, 0xff, 0xdb, 0xbf, 0x15, 0xd9, 0xf6, 0xa0, 0x51, 0xb4, 0xc4, 0x82, 0xb5, 0x20, 0xd4, 0xcb, 0xc6, 0xb6, 0x78, 0xd1, 0x5b, 0xbc, 0xe8, 0x41, 0x29, 0xa9, 0x37, 0x0f, 0x32, 0x4d, 0xc6, 0xba, 0x50, 0x77, 0x96, 0x6c, 0x8c, 0xf6, 0x13, 0x28, 0x7e, 0x5d, 0x6f, 0x9e, 0xa4, 0xd9, 0x24, 0xcd, 0x41, 0x8a, 0xde, 0x76, 0x67, 0xde, 0x7b, 0xfc, 0xe6, 0x71, 0x0f, 0x55, 0x4a, 0x73, 0xcf, 0x60, 0x9c, 0xca, 0x10, 0xbd, 0x24, 0x86, 0x10, 0xbd, 0x74, 0x08, 0x33, 0xfd, 0x08, 0xf6, 0x77, 0x9f, 0xef, 0x84, 0x8e, 0x29, 0x21, 0x67, 0x3f, 0x33, 0x88, 0x62, 0x98, 0x49, 0x44, 0x6e, 0x70, 0x0f, 0x6d, 0x1a, 0x68, 0x59, 0x66, 0x84, 0x14, 0xa3, 0x37, 0x01, 0x93, 0xfb, 0xdd, 0xf6, 0x94, 0x68, 0x3a, 0xc3, 0x4c, 0x03, 0x4a, 0x51, 0x02, 0x89, 0x24, 0x65, 0xf2, 0xed, 0x11, 0x69, 0x54, 0x21, 0x2a, 0xf3, 0x6c, 0xbc, 0x6c, 0x52, 0x10, 0xf5, 0xed, 0x23, 0x97, 0xed, 0xa5, 0x30, 0x93, 0x11, 0x24, 0xe8, 0x15, 0x0f, 0xbb, 0xe8, 0xee, 0xf2, 0x9d, 0x71, 0x12, 0x23, 0x3c, 0xdd, 0x2e, 0xd4, 0x26, 0x40, 0xa3, 0x49, 0x19, 0xec, 0x7e, 0x32, 0xbe, 0x5d, 0x5d, 0x5c, 0xa3, 0x31, 0x30, 0x45, 0xe7, 0x8e, 0x73, 0x19, 0xa1, 0x4a, 0xe4, 0x83, 0xc4, 0xb8, 0xc5, 0x3a, 0xac, 0xd7, 0x18, 0x9c, 0x8b, 0x15, 0x27, 0x8a, 0x1f, 0x52, 0xc4, 0x55, 0x19, 0x11, 0x54, 0xe2, 0x9c, 0x53, 0x5e, 0x37, 0x1a, 0x94, 0x69, 0xd5, 0x3a, 0xff, 0x7b, 0x8d, 0xc1, 0x81, 0x58, 0x1e, 0x67, 0x71, 0x8b, 0xe8, 0xbe, 0x18, 0x6b, 0x50, 0x81, 0x55, 0xbb, 0x97, 0x9c, 0x2f, 0x03, 0x9d, 0x33, 0xbe, 0xa6, 0x28, 0xc2, 0x9c, 0xad, 0x9d, 0xb3, 0x81, 0x96, 0x25, 0xd1, 0xa2, 0x61, 0x71, 0x43, 0x11, 0xfa, 0x1b, 0x5f, 0x7e, 0xfd, 0x83, 0xd5, 0x36, 0x59, 0x90, 0x79, 0x06, 0x6f, 0x8c, 0x37, 0x33, 0xd2, 0xb1, 0x3d, 0xc5, 0x79, 0xe1, 0xcd, 0x2a, 0xbf, 0x73, 0xf2, 0xd7, 0x53, 0xdd, 0xfe, 0xaf, 0x1d, 0x65, 0xf7, 0xff, 0x7a, 0xcc, 0xbf, 0xe0, 0xc7, 0x92, 0xac, 0x55, 0xc7, 0xf4, 0x3a, 0x5f, 0x95, 0xe2, 0x6f, 0x55, 0x99, 0x47, 0x8b, 0xa2, 0x46, 0xec, 0x9d, 0xb1, 0xc9, 0x7a, 0x56, 0xda, 0xf0, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x17, 0x26, 0x06, 0x23, 0xb4, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // TraceServiceClient is the client API for TraceService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type TraceServiceClient interface { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. StreamTraces(ctx context.Context, opts ...grpc.CallOption) (TraceService_StreamTracesClient, error) } type traceServiceClient struct { cc *grpc.ClientConn } func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { return &traceServiceClient{cc} } func (c *traceServiceClient) StreamTraces(ctx context.Context, opts ...grpc.CallOption) (TraceService_StreamTracesClient, error) { stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/envoy.service.trace.v3alpha.TraceService/StreamTraces", opts...) if err != nil { return nil, err } x := &traceServiceStreamTracesClient{stream} return x, nil } type TraceService_StreamTracesClient interface { Send(*StreamTracesMessage) error CloseAndRecv() (*StreamTracesResponse, error) grpc.ClientStream } type traceServiceStreamTracesClient struct { grpc.ClientStream } func (x *traceServiceStreamTracesClient) Send(m *StreamTracesMessage) error { return x.ClientStream.SendMsg(m) } func (x *traceServiceStreamTracesClient) CloseAndRecv() (*StreamTracesResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(StreamTracesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // TraceServiceServer is the server API for TraceService service. type TraceServiceServer interface { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. StreamTraces(TraceService_StreamTracesServer) error } func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { s.RegisterService(&_TraceService_serviceDesc, srv) } func _TraceService_StreamTraces_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TraceServiceServer).StreamTraces(&traceServiceStreamTracesServer{stream}) } type TraceService_StreamTracesServer interface { SendAndClose(*StreamTracesResponse) error Recv() (*StreamTracesMessage, error) grpc.ServerStream } type traceServiceStreamTracesServer struct { grpc.ServerStream } func (x *traceServiceStreamTracesServer) SendAndClose(m *StreamTracesResponse) error { return x.ServerStream.SendMsg(m) } func (x *traceServiceStreamTracesServer) Recv() (*StreamTracesMessage, error) { m := new(StreamTracesMessage) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _TraceService_serviceDesc = grpc.ServiceDesc{ ServiceName: "envoy.service.trace.v3alpha.TraceService", HandlerType: (*TraceServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "StreamTraces", Handler: _TraceService_StreamTraces_Handler, ClientStreams: true, }, }, Metadata: "envoy/service/trace/v3alpha/trace_service.proto", }
{ return xxx_messageInfo_StreamTracesResponse.Marshal(b, m, deterministic) }
identifier_body
trace_service.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: envoy/service/trace/v3alpha/trace_service.proto package envoy_service_trace_v3alpha import ( context "context" fmt "fmt" core "github.com/altipla-consulting/envoy-api/envoy/api/v3alpha/core" v1 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" _ "github.com/envoyproxy/protoc-gen-validate/validate" _ "github.com/gogo/googleapis/google/api" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type StreamTracesResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesResponse) Reset() { *m = StreamTracesResponse{} } func (m *StreamTracesResponse) String() string { return proto.CompactTextString(m) } func (*StreamTracesResponse) ProtoMessage() {} func (*StreamTracesResponse) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{0} } func (m *StreamTracesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesResponse.Unmarshal(m, b) } func (m *StreamTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesResponse.Marshal(b, m, deterministic) } func (m *StreamTracesResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesResponse.Merge(m, src) } func (m *StreamTracesResponse) XXX_Size() int { return xxx_messageInfo_StreamTracesResponse.Size(m) } func (m *StreamTracesResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesResponse proto.InternalMessageInfo type StreamTracesMessage struct { // Identifier data effectively is a structured metadata. // As a performance optimization this will only be sent in the first message // on the stream. Identifier *StreamTracesMessage_Identifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` // A list of Span entries Spans []*v1.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesMessage) Reset() { *m = StreamTracesMessage{} } func (m *StreamTracesMessage) String() string { return proto.CompactTextString(m) } func (*StreamTracesMessage) ProtoMessage() {} func (*StreamTracesMessage) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{1} } func (m *StreamTracesMessage) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesMessage.Unmarshal(m, b) } func (m *StreamTracesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesMessage.Marshal(b, m, deterministic) } func (m *StreamTracesMessage) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesMessage.Merge(m, src) } func (m *StreamTracesMessage) XXX_Size() int { return xxx_messageInfo_StreamTracesMessage.Size(m) } func (m *StreamTracesMessage) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesMessage.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesMessage proto.InternalMessageInfo func (m *StreamTracesMessage) GetIdentifier() *StreamTracesMessage_Identifier { if m != nil { return m.Identifier } return nil } func (m *StreamTracesMessage) GetSpans() []*v1.Span { if m != nil { return m.Spans } return nil } type StreamTracesMessage_Identifier struct { // The node sending the access log messages over the stream. Node *core.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamTracesMessage_Identifier) Reset() { *m = StreamTracesMessage_Identifier{} } func (m *StreamTracesMessage_Identifier) String() string { return proto.CompactTextString(m) } func (*StreamTracesMessage_Identifier) ProtoMessage() {} func (*StreamTracesMessage_Identifier) Descriptor() ([]byte, []int) { return fileDescriptor_838555a39f11343b, []int{1, 0} } func (m *StreamTracesMessage_Identifier) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamTracesMessage_Identifier.Unmarshal(m, b) } func (m *StreamTracesMessage_Identifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamTracesMessage_Identifier.Marshal(b, m, deterministic) } func (m *StreamTracesMessage_Identifier) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamTracesMessage_Identifier.Merge(m, src) } func (m *StreamTracesMessage_Identifier) XXX_Size() int { return xxx_messageInfo_StreamTracesMessage_Identifier.Size(m) } func (m *StreamTracesMessage_Identifier) XXX_DiscardUnknown() { xxx_messageInfo_StreamTracesMessage_Identifier.DiscardUnknown(m) } var xxx_messageInfo_StreamTracesMessage_Identifier proto.InternalMessageInfo func (m *StreamTracesMessage_Identifier) GetNode() *core.Node { if m != nil { return m.Node } return nil } func init() { proto.RegisterType((*StreamTracesResponse)(nil), "envoy.service.trace.v3alpha.StreamTracesResponse") proto.RegisterType((*StreamTracesMessage)(nil), "envoy.service.trace.v3alpha.StreamTracesMessage") proto.RegisterType((*StreamTracesMessage_Identifier)(nil), "envoy.service.trace.v3alpha.StreamTracesMessage.Identifier") } func init() { proto.RegisterFile("envoy/service/trace/v3alpha/trace_service.proto", fileDescriptor_838555a39f11343b) } var fileDescriptor_838555a39f11343b = []byte{ // 349 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x41, 0x4b, 0xfb, 0x40, 0x10, 0xc5, 0xff, 0xdb, 0xbf, 0x15, 0xd9, 0xf6, 0xa0, 0x51, 0xb4, 0xc4, 0x82, 0xb5, 0x20, 0xd4, 0xcb, 0xc6, 0xb6, 0x78, 0xd1, 0x5b, 0xbc, 0xe8, 0x41, 0x29, 0xa9, 0x37, 0x0f, 0x32, 0x4d, 0xc6, 0xba, 0x50, 0x77, 0x96, 0x6c, 0x8c, 0xf6, 0x13, 0x28, 0x7e, 0x5d, 0x6f, 0x9e, 0xa4, 0xd9, 0x24, 0xcd, 0x41, 0x8a, 0xde, 0x76, 0x67, 0xde, 0x7b, 0xfc, 0xe6, 0x71, 0x0f, 0x55, 0x4a, 0x73, 0xcf, 0x60, 0x9c, 0xca, 0x10, 0xbd, 0x24, 0x86, 0x10, 0xbd, 0x74, 0x08, 0x33, 0xfd, 0x08, 0xf6, 0x77, 0x9f, 0xef, 0x84, 0x8e, 0x29, 0x21, 0x67, 0x3f, 0x33, 0x88, 0x62, 0x98, 0x49, 0x44, 0x6e, 0x70, 0x0f, 0x6d, 0x1a, 0x68, 0x59, 0x66, 0x84, 0x14, 0xa3, 0x37, 0x01, 0x93, 0xfb, 0xdd, 0xf6, 0x94, 0x68, 0x3a, 0xc3, 0x4c, 0x03, 0x4a, 0x51, 0x02, 0x89, 0x24, 0x65, 0xf2, 0xed, 0x11, 0x69, 0x54, 0x21, 0x2a, 0xf3, 0x6c, 0xbc, 0x6c, 0x52, 0x10, 0xf5, 0xed, 0x23, 0x97, 0xed, 0xa5, 0x30, 0x93, 0x11, 0x24, 0xe8, 0x15, 0x0f, 0xbb, 0xe8, 0xee, 0xf2, 0x9d, 0x71, 0x12, 0x23, 0x3c, 0xdd, 0x2e, 0xd4, 0x26, 0x40, 0xa3, 0x49, 0x19, 0xec, 0x7e, 0x32, 0xbe, 0x5d, 0x5d, 0x5c, 0xa3, 0x31, 0x30, 0x45, 0xe7, 0x8e, 0x73, 0x19, 0xa1, 0x4a, 0xe4, 0x83, 0xc4, 0xb8, 0xc5, 0x3a, 0xac, 0xd7, 0x18, 0x9c, 0x8b, 0x15, 0x27, 0x8a, 0x1f, 0x52, 0xc4, 0x55, 0x19, 0x11, 0x54, 0xe2, 0x9c, 0x53, 0x5e, 0x37, 0x1a, 0x94, 0x69, 0xd5, 0x3a, 0xff, 0x7b, 0x8d, 0xc1, 0x81, 0x58, 0x1e, 0x67, 0x71, 0x8b, 0xe8, 0xbe, 0x18, 0x6b, 0x50, 0x81, 0x55, 0xbb, 0x97, 0x9c, 0x2f, 0x03, 0x9d, 0x33, 0xbe, 0xa6, 0x28, 0xc2, 0x9c, 0xad, 0x9d, 0xb3, 0x81, 0x96, 0x25, 0xd1, 0xa2, 0x61, 0x71, 0x43, 0x11, 0xfa, 0x1b, 0x5f, 0x7e, 0xfd, 0x83, 0xd5, 0x36, 0x59, 0x90, 0x79, 0x06, 0x6f, 0x8c, 0x37, 0x33, 0xd2, 0xb1, 0x3d, 0xc5, 0x79, 0xe1, 0xcd, 0x2a, 0xbf, 0x73, 0xf2, 0xd7, 0x53, 0xdd, 0xfe, 0xaf, 0x1d, 0x65, 0xf7, 0xff, 0x7a, 0xcc, 0xbf, 0xe0, 0xc7, 0x92, 0xac, 0x55, 0xc7, 0xf4, 0x3a, 0x5f, 0x95, 0xe2, 0x6f, 0x55, 0x99, 0x47, 0x8b, 0xa2, 0x46, 0xec, 0x9d, 0xb1, 0xc9, 0x7a, 0x56, 0xda, 0xf0, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x17, 0x26, 0x06, 0x23, 0xb4, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // TraceServiceClient is the client API for TraceService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type TraceServiceClient interface { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. StreamTraces(ctx context.Context, opts ...grpc.CallOption) (TraceService_StreamTracesClient, error) } type traceServiceClient struct { cc *grpc.ClientConn } func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { return &traceServiceClient{cc} } func (c *traceServiceClient) StreamTraces(ctx context.Context, opts ...grpc.CallOption) (TraceService_StreamTracesClient, error) { stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/envoy.service.trace.v3alpha.TraceService/StreamTraces", opts...) if err != nil { return nil, err } x := &traceServiceStreamTracesClient{stream} return x, nil } type TraceService_StreamTracesClient interface { Send(*StreamTracesMessage) error CloseAndRecv() (*StreamTracesResponse, error) grpc.ClientStream } type traceServiceStreamTracesClient struct { grpc.ClientStream } func (x *traceServiceStreamTracesClient) Send(m *StreamTracesMessage) error { return x.ClientStream.SendMsg(m) } func (x *traceServiceStreamTracesClient) CloseAndRecv() (*StreamTracesResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(StreamTracesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // TraceServiceServer is the server API for TraceService service. type TraceServiceServer interface { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. StreamTraces(TraceService_StreamTracesServer) error } func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { s.RegisterService(&_TraceService_serviceDesc, srv) } func _TraceService_StreamTraces_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TraceServiceServer).StreamTraces(&traceServiceStreamTracesServer{stream}) } type TraceService_StreamTracesServer interface { SendAndClose(*StreamTracesResponse) error Recv() (*StreamTracesMessage, error) grpc.ServerStream } type traceServiceStreamTracesServer struct { grpc.ServerStream } func (x *traceServiceStreamTracesServer) SendAndClose(m *StreamTracesResponse) error { return x.ServerStream.SendMsg(m) } func (x *traceServiceStreamTracesServer) Recv() (*StreamTracesMessage, error) { m := new(StreamTracesMessage) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _TraceService_serviceDesc = grpc.ServiceDesc{ ServiceName: "envoy.service.trace.v3alpha.TraceService", HandlerType: (*TraceServiceServer)(nil),
Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "StreamTraces", Handler: _TraceService_StreamTraces_Handler, ClientStreams: true, }, }, Metadata: "envoy/service/trace/v3alpha/trace_service.proto", }
random_line_split
msMHE_stgen.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue May 22 07:56:11 2018 @author: flemmingholtorf """ from __future__ import print_function from main.mods.SemiBatchPolymerization.mod_class_stgen import SemiBatchPolymerization_multistage from main.mods.SemiBatchPolymerization.mod_class import SemiBatchPolymerization from main.dync.MHEGen import msMHEGen from main.examples.SemiBatchPolymerization.noise_characteristics import * from scipy.stats import chi2 from mpl_toolkits.mplot3d import Axes3D import numpy as np import matplotlib.pyplot as plt # don't write messy bytecode files # might want to change if ran multiple times for performance increase # sys.dont_write_bytecode = True # discretization parameters: nfe, ncp = 24, 3 # Radau nodes assumed in code # state variables: x_vars = {"PO":[()], "Y":[()], "W":[()], "PO_fed":[()], "MY":[()], "MX":[(0,),(1,)], "T":[()], "T_cw":[()]} # corrupted state variables: x_noisy = [] # output variables: y_vars = {"PO":[()], "Y":[()], "MY":[()], 'T':[()], 'T_cw':[()]} # controls: u = ["u1", "u2"] u_bounds = {"u1": (0.0,0.4), "u2": (0.0, 3.0)} # uncertain parameters: p_noisy = {"A":[('p',),('i',)],'kA':[()]} # noisy initial conditions: noisy_ics = {'PO_ic':[()],'T_ic':[()],'MY_ic':[()],'MX_ic':[(1,)]} # initial uncertainty set description (hyperrectangular): p_bounds = {('A', ('i',)):(-0.2,0.2),('A', ('p',)):(-0.2,0.2),('kA',()):(-0.2,0.2), ('PO_ic',()):(-0.02,0.02),('T_ic',()):(-0.005,0.005), ('MY_ic',()):(-0.01,0.01),('MX_ic',(1,)):(-0.002,0.002)} # time horizon bounds: tf_bounds = [10.0*24/nfe, 30.0*24/nfe] # path constrained properties to be monitored: pc = ['Tad','T'] # monitored vars: poi = [x for x in x_vars] + u #parameter scenario: scenario = {('A',('p',)):-0.2,('A',('i',)):-0.2,('kA',()):-0.2} # scenario-tree definition: st = {} # scenario tree : {parent_node, scenario_number on current stage, base node (True/False), scenario values {'name',(index):value}} s_max, nr, alpha = 4, 2, 0.2 dummy ={(1, 2): {('A', ('p',)): 1-alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1+0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1+0.01, ('PO_ic', ()): 1+0.02}, (1, 3): {('A', ('p',)): 1-alpha, ('kA', ()): 1+alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 4): {('A', ('p',)): 1-alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 5): {('A', ('p',)): 1-alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1+alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 6): {('A', ('p',)): 1+alpha, ('kA', ()): 1+alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 7): {('A', ('p',)): 1+alpha, ('kA', ()): 1+alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1+alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 8): {('A', ('p',)): 1+alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 9): {('A', ('p',)): 1+alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1+alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (2, 2): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1+alpha}, (2, 3): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1-alpha}, (2, 4): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1-alpha}, (3, 2): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1+alpha}, (3, 3): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1-alpha}} for i in range(1,nfe+1): if i < nr + 1: for s in range(1,s_max**i+1): if s%s_max == 1: st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),True,{('A',('p',)):1.0,('A',('i',)):1.0,('kA',()):1.0}) else: scen = s%s_max if s%s_max != 0 else 3 st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,dummy[(i,scen)]) else: for s in range(1,s_max**nr+1): st[(i,s)] = (i-1,s,True,st[(i-1,s)][3]) #s_max, nr, alpha = 9, 1, 0.2 #for i in range(1,nfe+1): # if i < nr + 1: # for s in range(1,s_max**i+1): # if s%s_max == 1: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),True,{('A',('p',)):1.0,('A',('i',)):1.0,('kA',()):1.0}) # elif s%s_max == 2: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0+alpha,('kA',()):1.0-alpha}) # elif s%s_max == 3: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0+alpha,('kA',()):1.0-alpha}) # elif s%s_max == 4: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0-alpha,('kA',()):1.0-alpha}) # elif s%s_max == 5: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0-alpha,('kA',()):1.0-alpha}) # elif s%s_max == 6: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0+alpha,('kA',()):1.0+alpha}) # elif s%s_max == 7: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0+alpha,('kA',()):1.0+alpha}) # elif s%s_max == 8: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0-alpha,('kA',()):1.0+alpha}) # else: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0-alpha,('kA',()):1.0+alpha}) # else: # for s in range(1,s_max**nr+1): # st[(i,s)] = (i-1,s,True,st[(i-1,s)][3]) sr = s_max**nr # create MHE-NMPC-controller object c = msMHEGen(d_mod = SemiBatchPolymerization_multistage, d_mod_mhe = SemiBatchPolymerization, y=y_vars, x=x_vars, x_noisy=x_noisy, p_noisy=p_noisy, u=u, u_bounds = u_bounds, tf_bounds = tf_bounds, poi = x_vars, scenario_tree = st, robust_horizon = nr, s_max = sr, noisy_inputs = False, noisy_params = False, adapt_params = False, update_scenario_tree = False, process_noise_model = 'params_bias', uncertainty_set = p_bounds, confidence_threshold = alpha, robustness_threshold = 0.05, obj_type='economic', nfe_t=nfe, ncp_t=ncp, path_constraints=pc) # arguments for closed-loop simulation: disturbance_src = {'disturbance_src':'parameter_scenario','scenario':scenario} cov_matrices = {'y_cov':mcov,'q_cov':qcov,'u_cov':ucov,'p_cov':pcov} reg_weights = {'K_w':1.0} stgen_in = {'epc':['PO_ptg','mw','unsat'],'pc':['T_max','T_min','temp_b'],'noisy_ics':noisy_ics,'par_bounds':p_bounds} # run closed-loop simulation: performance, iters = c.run(fix_noise=True, advanced_step=False, stgen=True, disturbance_src=disturbance_src, cov_matrices=cov_matrices, regularization_weights=reg_weights, meas_noise=x_measurement, stgen_args=stgen_in) c.plant_simulation_model.check_feasibility(display=True) """ visualization""" #plot state trajectories and estimates x = [] for i in range(1,iters+1): for cp in range(ncp+1):
x_e = [c.nmpc_trajectory[i,'tf'] for i in range(1,iters)] for var in poi[:-2]: if var == 'MX': for k in [0,1]: y_e = [c.nmpc_trajectory[i,(var,(k,))] for i in range(1,iters)] y = [c.monitor[i][var,(1,cp,k,1)] for i in range(1,iters+1) for cp in range(ncp+1)] plt.figure(), plt.plot(x,y), plt.plot(x_e,y_e,'r',marker='x',linestyle='None'), plt.xlabel('time [min]'), plt.ylabel(var+str(k)) else: y_e = [c.nmpc_trajectory[i,(var,())] for i in range(1,iters)] y = [c.monitor[i][var,(1,cp,1)] for i in range(1,iters+1) for cp in range(ncp+1)] plt.figure(), plt.plot(x,y), plt.plot(x_e,y_e,'r',marker='x',linestyle='None'), plt.xlabel('time [min]'), plt.ylabel(var) # path constraints x = [] for i in range(1,iters+1): for cp in range(1,ncp+1): x.append(x[-cp]+c.pc_trajectory['tf',(i,cp)] if i > 1 else c.pc_trajectory['tf',(i,cp)]) y = [c.pc_trajectory['T',(i,(cp,))] for i in range(1,iters+1) for cp in range(1,ncp+1)] plt.figure(), plt.plot(x,y,color='grey'), plt.plot([0,x[-1]],[423.15e-2,423.15e-2],'r--'), plt.plot([0,x[-1]],[373.15e-2,373.15e-2],'r--') plt.xlabel('time [min]'), plt.ylabel('T') y = [c.pc_trajectory['Tad',(i,(cp,))] for i in range(1,iters+1) for cp in range(1,ncp+1)] plt.figure(), plt.plot(x,y,color='grey'), plt.plot([0,x[-1]],[443.15e-2,443.15e-2],'r--') plt.xlabel('time [min]'), plt.ylabel('Tad') #plot control profiles x = [c.nmpc_trajectory[i,'tf'] for i in range(1,iters+1)] for control in u: y = [c.nmpc_trajectory[i,control] for i in range(1,iters+1)] plt.figure(), plt.step(x,y),plt.step([0,x[0]],[y[0],y[0]],'C0'),plt.xlabel('time [min]'), plt.ylabel(control) # visualize confidence region: if c.update_scenario_tree: dimension = 3 # dimension n of the n x n matrix = #DoF rhs_confidence = chi2.isf(1.0-0.99,dimension) # 0.1**2*5% measurment noise, 95% confidence level, dimension degrees of freedo rows = {} # plot cube cube kA = np.array([0.8,1.2])#*e.nominal_parameter_values['kA',()] Ap = np.array([0.8,1.2])#*e.nominal_parameter_values['A',('p',)] Ai = np.array([0.8,1.2])#*e.nominal_parameter_values['A',('i',)] x = [Ap[0],Ap[1],Ap[0],Ap[1]] y = [Ai[1],Ai[1],Ai[0],Ai[0]] X,Y = np.meshgrid(x,y) Z_u = np.array([[kA[1],kA[1],kA[1],kA[1]] for i in range(len(x))]) Z_l = np.array([[kA[0],kA[0],kA[0],kA[0]] for i in range(len(x))]) aux = {1:X,2:Y,3:(Z_l,Z_u)} combinations = [[1,2,3],[1,3,2],[3,1,2]] facets = {} b = 0 for combination in combinations: facets[b] = np.array([aux[i] if i != 3 else aux[i][0] for i in combination]) facets[b+1] = np.array([aux[i] if i != 3 else aux[i][1] for i in combination]) b += 2 p_star = np.zeros(dimension) for key in scenario: p_star[c.PI_indices[key]]=(1+scenario[key])*c.nominal_parameter_values[key] p_star[2] *= c.olnmpc.Hrxn['p'].value # for facet in facets: # f = open('results/face'+str(facet)+'.txt','wb') # for i in range(4): # for j in range(4): # f.write(str(facets[facet][0][i][j]*c.nominal_parameter_values['A',('i',)]) + '\t' + str(facets[facet][1][i][j]*c.nominal_parameter_values['A',('p',)]) + '\t' + str(facets[facet][2][i][j]*c.nominal_parameter_values['kA',()]*c.olnmpc.Hrxn['p'].value) + '\n') # f.write('\n') # f.close() for r in range(1,7): A_dict = c.mhe_confidence_ellipsoids[r] center = np.zeros(dimension) for par in c.nmpc_trajectory[r,'e_pars']: center[c.PI_indices[par]] = c.nmpc_trajectory[r,'e_pars'][par] for m in range(dimension): rows[m] = np.array([A_dict[(m,i)] for i in range(dimension)]) A = 1/rhs_confidence*np.array([np.array(rows[i]) for i in range(dimension)]) U, s, V = np.linalg.svd(A) # singular value decomposition radii = 1/np.sqrt(s) # length of half axes, V rotation # transform in polar coordinates for simpler waz of plotting u = np.linspace(0.0, 2.0 * np.pi, 30) # angle = idenpendent variable v = np.linspace(0.0, np.pi, 30) # angle = idenpendent variable x = radii[0] * np.outer(np.cos(u), np.sin(v)) # x-coordinate y = radii[1] * np.outer(np.sin(u), np.sin(v)) # y-coordinate z = radii[2] * np.outer(np.ones_like(u), np.cos(v)) #f = open('results/data_ellipsoid'+str(r)+'.txt','wb') for i in range(len(x[0][:])): for j in range(len(x[:][0])): [x[i][j],y[i][j],z[i][j]] = np.dot(U,[x[i][j],y[i][j],z[i][j]]) + center z[i][j] *= c.olnmpc.Hrxn['p'].value #f.write(str(x[i][j]) + '\t' + str(y[i][j]) + '\t' + str(z[i][j]) + '\n') #f.write('\n') #f.close() fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(x,y,z,alpha = 0.1, edgecolor='r') ax.scatter(center[0],center[1],center[2]*c.olnmpc.Hrxn['p'].value,marker='o',color='r') ax.scatter(p_star[0],p_star[1],p_star[2],marker='o',color='k') for i in facets: ax.plot_surface(facets[i][0]*c.nominal_parameter_values['A',('i',)],facets[i][1]*c.nominal_parameter_values['A',('p',)],facets[i][2]*c.nominal_parameter_values['kA',()]*c.olnmpc.Hrxn['p'].value,edgecolors='k',color='grey',alpha=0.1) scaling = np.array([0.5,1.5]) ax.set_xlim(scaling*c.nominal_parameter_values['A',('i',)]) ax.set_xlabel('\n' + r'$A_i$ [$\frac{m^3}{mol s}$]', linespacing=1.2) ax.w_xaxis.set_pane_color((1.0,1.0,1.0,1.0)) ax.set_xticks(np.array([2.5e5,4e5,5.5e5])*1e-4) ax.set_ylim(scaling*c.nominal_parameter_values['A',('p',)]) ax.set_ylabel('\n' + r'$A_p$ [$\frac{m^3}{mol s}$]', linespacing=1.2) ax.w_yaxis.set_pane_color((1.0,1.0,1.0,1.0)) ax.set_yticks(np.array([8e3,14e3,20e3])*1e-4) ax.set_zlim(scaling*c.nominal_parameter_values['kA',()]*c.olnmpc.Hrxn['p'].value) ax.set_zlabel('\n' + r'$kA$ [$\frac{kJ}{K}$]', linespacing=1.2) ax.w_zaxis.set_pane_color((1.0,1.0,1.0,1.0)) ax.set_zticks(np.array([0.04*2,0.07*2,0.1*2])*c.olnmpc.Hrxn['p'].value) fig.tight_layout() #fig.savefig('results/125grid/'+str(r)+'.pdf') #ax.tick_params(axis='both',direction='in') #ax.view_init(15,35) # plot half axis plt.xlabel(r'$\Delta A_i$') plt.ylabel(r'$\Delta A_p$') # plot CPU times: x = range(1,iters) for k in ['olnmpc','lsmhe']: utime = [sum(performance[k,i][1][l].ru_utime-performance[k,i][0][l].ru_utime for l in [1]) for i in range(1,iters)] stime = [sum(performance[k,i][1][l].ru_stime-performance[k,i][0][l].ru_stime for l in [1]) for i in range(1,iters)] plt.figure(), plt.title(k+' - required CPU time') plt.bar(x,utime,label='utime'), plt.bar(x,stime,bottom=utime,color='C1',label='stime') plt.ylabel(r'$t_{CPU} [s]$'), plt.xlabel('iteration'), plt.legend()
x.append(x[-cp-1]+c.pc_trajectory['tf',(i,cp)] if i > 1 else c.pc_trajectory['tf',(i,cp)])
conditional_block
msMHE_stgen.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue May 22 07:56:11 2018 @author: flemmingholtorf """ from __future__ import print_function from main.mods.SemiBatchPolymerization.mod_class_stgen import SemiBatchPolymerization_multistage from main.mods.SemiBatchPolymerization.mod_class import SemiBatchPolymerization from main.dync.MHEGen import msMHEGen from main.examples.SemiBatchPolymerization.noise_characteristics import * from scipy.stats import chi2 from mpl_toolkits.mplot3d import Axes3D import numpy as np import matplotlib.pyplot as plt # don't write messy bytecode files # might want to change if ran multiple times for performance increase # sys.dont_write_bytecode = True # discretization parameters: nfe, ncp = 24, 3 # Radau nodes assumed in code # state variables: x_vars = {"PO":[()], "Y":[()], "W":[()], "PO_fed":[()], "MY":[()], "MX":[(0,),(1,)], "T":[()], "T_cw":[()]} # corrupted state variables: x_noisy = [] # output variables: y_vars = {"PO":[()], "Y":[()], "MY":[()], 'T':[()], 'T_cw':[()]} # controls: u = ["u1", "u2"] u_bounds = {"u1": (0.0,0.4), "u2": (0.0, 3.0)} # uncertain parameters: p_noisy = {"A":[('p',),('i',)],'kA':[()]} # noisy initial conditions: noisy_ics = {'PO_ic':[()],'T_ic':[()],'MY_ic':[()],'MX_ic':[(1,)]} # initial uncertainty set description (hyperrectangular): p_bounds = {('A', ('i',)):(-0.2,0.2),('A', ('p',)):(-0.2,0.2),('kA',()):(-0.2,0.2), ('PO_ic',()):(-0.02,0.02),('T_ic',()):(-0.005,0.005), ('MY_ic',()):(-0.01,0.01),('MX_ic',(1,)):(-0.002,0.002)} # time horizon bounds: tf_bounds = [10.0*24/nfe, 30.0*24/nfe] # path constrained properties to be monitored: pc = ['Tad','T'] # monitored vars: poi = [x for x in x_vars] + u #parameter scenario: scenario = {('A',('p',)):-0.2,('A',('i',)):-0.2,('kA',()):-0.2} # scenario-tree definition: st = {} # scenario tree : {parent_node, scenario_number on current stage, base node (True/False), scenario values {'name',(index):value}} s_max, nr, alpha = 4, 2, 0.2 dummy ={(1, 2): {('A', ('p',)): 1-alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1+0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1+0.01, ('PO_ic', ()): 1+0.02}, (1, 3): {('A', ('p',)): 1-alpha, ('kA', ()): 1+alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 4): {('A', ('p',)): 1-alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 5): {('A', ('p',)): 1-alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1+alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 6): {('A', ('p',)): 1+alpha, ('kA', ()): 1+alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 7): {('A', ('p',)): 1+alpha, ('kA', ()): 1+alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1+alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 8): {('A', ('p',)): 1+alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1-alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (1, 9): {('A', ('p',)): 1+alpha, ('kA', ()): 1-alpha, ('T_ic', ()): 1-0.005, ('A', ('i',)): 1+alpha, ('MY_ic', ()): 1-0.01, ('PO_ic', ()): 1+0.02}, (2, 2): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1+alpha}, (2, 3): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1-alpha}, (2, 4): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1-alpha}, (3, 2): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1+alpha}, (3, 3): {('A', ('p',)): 1-alpha, ('A', ('i',)): 1-alpha, ('kA', ()): 1-alpha}} for i in range(1,nfe+1): if i < nr + 1: for s in range(1,s_max**i+1): if s%s_max == 1: st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),True,{('A',('p',)):1.0,('A',('i',)):1.0,('kA',()):1.0}) else: scen = s%s_max if s%s_max != 0 else 3 st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,dummy[(i,scen)]) else: for s in range(1,s_max**nr+1): st[(i,s)] = (i-1,s,True,st[(i-1,s)][3]) #s_max, nr, alpha = 9, 1, 0.2 #for i in range(1,nfe+1): # if i < nr + 1: # for s in range(1,s_max**i+1): # if s%s_max == 1: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),True,{('A',('p',)):1.0,('A',('i',)):1.0,('kA',()):1.0}) # elif s%s_max == 2: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0+alpha,('kA',()):1.0-alpha}) # elif s%s_max == 3: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0+alpha,('kA',()):1.0-alpha}) # elif s%s_max == 4: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0-alpha,('kA',()):1.0-alpha}) # elif s%s_max == 5: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0-alpha,('kA',()):1.0-alpha}) # elif s%s_max == 6: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0+alpha,('kA',()):1.0+alpha}) # elif s%s_max == 7: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0+alpha,('kA',()):1.0+alpha}) # elif s%s_max == 8: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0+alpha,('A',('i',)):1.0-alpha,('kA',()):1.0+alpha}) # else: # st[(i,s)] = (i-1,int(np.ceil(s/float(s_max))),False,{('A',('p',)):1.0-alpha,('A',('i',)):1.0-alpha,('kA',()):1.0+alpha}) # else: # for s in range(1,s_max**nr+1): # st[(i,s)] = (i-1,s,True,st[(i-1,s)][3]) sr = s_max**nr # create MHE-NMPC-controller object c = msMHEGen(d_mod = SemiBatchPolymerization_multistage, d_mod_mhe = SemiBatchPolymerization, y=y_vars, x=x_vars, x_noisy=x_noisy, p_noisy=p_noisy, u=u, u_bounds = u_bounds, tf_bounds = tf_bounds, poi = x_vars, scenario_tree = st, robust_horizon = nr, s_max = sr, noisy_inputs = False, noisy_params = False, adapt_params = False, update_scenario_tree = False, process_noise_model = 'params_bias', uncertainty_set = p_bounds, confidence_threshold = alpha, robustness_threshold = 0.05, obj_type='economic', nfe_t=nfe, ncp_t=ncp, path_constraints=pc) # arguments for closed-loop simulation: disturbance_src = {'disturbance_src':'parameter_scenario','scenario':scenario} cov_matrices = {'y_cov':mcov,'q_cov':qcov,'u_cov':ucov,'p_cov':pcov} reg_weights = {'K_w':1.0} stgen_in = {'epc':['PO_ptg','mw','unsat'],'pc':['T_max','T_min','temp_b'],'noisy_ics':noisy_ics,'par_bounds':p_bounds} # run closed-loop simulation: performance, iters = c.run(fix_noise=True, advanced_step=False, stgen=True, disturbance_src=disturbance_src, cov_matrices=cov_matrices, regularization_weights=reg_weights, meas_noise=x_measurement, stgen_args=stgen_in) c.plant_simulation_model.check_feasibility(display=True) """ visualization""" #plot state trajectories and estimates x = [] for i in range(1,iters+1): for cp in range(ncp+1): x.append(x[-cp-1]+c.pc_trajectory['tf',(i,cp)] if i > 1 else c.pc_trajectory['tf',(i,cp)]) x_e = [c.nmpc_trajectory[i,'tf'] for i in range(1,iters)] for var in poi[:-2]: if var == 'MX': for k in [0,1]: y_e = [c.nmpc_trajectory[i,(var,(k,))] for i in range(1,iters)] y = [c.monitor[i][var,(1,cp,k,1)] for i in range(1,iters+1) for cp in range(ncp+1)] plt.figure(), plt.plot(x,y), plt.plot(x_e,y_e,'r',marker='x',linestyle='None'), plt.xlabel('time [min]'), plt.ylabel(var+str(k)) else: y_e = [c.nmpc_trajectory[i,(var,())] for i in range(1,iters)] y = [c.monitor[i][var,(1,cp,1)] for i in range(1,iters+1) for cp in range(ncp+1)] plt.figure(), plt.plot(x,y), plt.plot(x_e,y_e,'r',marker='x',linestyle='None'), plt.xlabel('time [min]'), plt.ylabel(var) # path constraints x = [] for i in range(1,iters+1): for cp in range(1,ncp+1): x.append(x[-cp]+c.pc_trajectory['tf',(i,cp)] if i > 1 else c.pc_trajectory['tf',(i,cp)]) y = [c.pc_trajectory['T',(i,(cp,))] for i in range(1,iters+1) for cp in range(1,ncp+1)] plt.figure(), plt.plot(x,y,color='grey'), plt.plot([0,x[-1]],[423.15e-2,423.15e-2],'r--'), plt.plot([0,x[-1]],[373.15e-2,373.15e-2],'r--') plt.xlabel('time [min]'), plt.ylabel('T') y = [c.pc_trajectory['Tad',(i,(cp,))] for i in range(1,iters+1) for cp in range(1,ncp+1)] plt.figure(), plt.plot(x,y,color='grey'), plt.plot([0,x[-1]],[443.15e-2,443.15e-2],'r--') plt.xlabel('time [min]'), plt.ylabel('Tad') #plot control profiles x = [c.nmpc_trajectory[i,'tf'] for i in range(1,iters+1)] for control in u: y = [c.nmpc_trajectory[i,control] for i in range(1,iters+1)] plt.figure(), plt.step(x,y),plt.step([0,x[0]],[y[0],y[0]],'C0'),plt.xlabel('time [min]'), plt.ylabel(control) # visualize confidence region: if c.update_scenario_tree: dimension = 3 # dimension n of the n x n matrix = #DoF rhs_confidence = chi2.isf(1.0-0.99,dimension) # 0.1**2*5% measurment noise, 95% confidence level, dimension degrees of freedo rows = {} # plot cube cube kA = np.array([0.8,1.2])#*e.nominal_parameter_values['kA',()] Ap = np.array([0.8,1.2])#*e.nominal_parameter_values['A',('p',)] Ai = np.array([0.8,1.2])#*e.nominal_parameter_values['A',('i',)] x = [Ap[0],Ap[1],Ap[0],Ap[1]] y = [Ai[1],Ai[1],Ai[0],Ai[0]] X,Y = np.meshgrid(x,y) Z_u = np.array([[kA[1],kA[1],kA[1],kA[1]] for i in range(len(x))]) Z_l = np.array([[kA[0],kA[0],kA[0],kA[0]] for i in range(len(x))]) aux = {1:X,2:Y,3:(Z_l,Z_u)} combinations = [[1,2,3],[1,3,2],[3,1,2]] facets = {} b = 0 for combination in combinations: facets[b] = np.array([aux[i] if i != 3 else aux[i][0] for i in combination]) facets[b+1] = np.array([aux[i] if i != 3 else aux[i][1] for i in combination]) b += 2 p_star = np.zeros(dimension) for key in scenario: p_star[c.PI_indices[key]]=(1+scenario[key])*c.nominal_parameter_values[key] p_star[2] *= c.olnmpc.Hrxn['p'].value # for facet in facets: # f = open('results/face'+str(facet)+'.txt','wb') # for i in range(4): # for j in range(4): # f.write(str(facets[facet][0][i][j]*c.nominal_parameter_values['A',('i',)]) + '\t' + str(facets[facet][1][i][j]*c.nominal_parameter_values['A',('p',)]) + '\t' + str(facets[facet][2][i][j]*c.nominal_parameter_values['kA',()]*c.olnmpc.Hrxn['p'].value) + '\n') # f.write('\n') # f.close() for r in range(1,7): A_dict = c.mhe_confidence_ellipsoids[r] center = np.zeros(dimension) for par in c.nmpc_trajectory[r,'e_pars']: center[c.PI_indices[par]] = c.nmpc_trajectory[r,'e_pars'][par] for m in range(dimension): rows[m] = np.array([A_dict[(m,i)] for i in range(dimension)]) A = 1/rhs_confidence*np.array([np.array(rows[i]) for i in range(dimension)]) U, s, V = np.linalg.svd(A) # singular value decomposition radii = 1/np.sqrt(s) # length of half axes, V rotation # transform in polar coordinates for simpler waz of plotting u = np.linspace(0.0, 2.0 * np.pi, 30) # angle = idenpendent variable v = np.linspace(0.0, np.pi, 30) # angle = idenpendent variable x = radii[0] * np.outer(np.cos(u), np.sin(v)) # x-coordinate y = radii[1] * np.outer(np.sin(u), np.sin(v)) # y-coordinate z = radii[2] * np.outer(np.ones_like(u), np.cos(v)) #f = open('results/data_ellipsoid'+str(r)+'.txt','wb') for i in range(len(x[0][:])): for j in range(len(x[:][0])):
[x[i][j],y[i][j],z[i][j]] = np.dot(U,[x[i][j],y[i][j],z[i][j]]) + center z[i][j] *= c.olnmpc.Hrxn['p'].value #f.write(str(x[i][j]) + '\t' + str(y[i][j]) + '\t' + str(z[i][j]) + '\n') #f.write('\n') #f.close() fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(x,y,z,alpha = 0.1, edgecolor='r') ax.scatter(center[0],center[1],center[2]*c.olnmpc.Hrxn['p'].value,marker='o',color='r') ax.scatter(p_star[0],p_star[1],p_star[2],marker='o',color='k') for i in facets: ax.plot_surface(facets[i][0]*c.nominal_parameter_values['A',('i',)],facets[i][1]*c.nominal_parameter_values['A',('p',)],facets[i][2]*c.nominal_parameter_values['kA',()]*c.olnmpc.Hrxn['p'].value,edgecolors='k',color='grey',alpha=0.1) scaling = np.array([0.5,1.5]) ax.set_xlim(scaling*c.nominal_parameter_values['A',('i',)]) ax.set_xlabel('\n' + r'$A_i$ [$\frac{m^3}{mol s}$]', linespacing=1.2) ax.w_xaxis.set_pane_color((1.0,1.0,1.0,1.0)) ax.set_xticks(np.array([2.5e5,4e5,5.5e5])*1e-4) ax.set_ylim(scaling*c.nominal_parameter_values['A',('p',)]) ax.set_ylabel('\n' + r'$A_p$ [$\frac{m^3}{mol s}$]', linespacing=1.2) ax.w_yaxis.set_pane_color((1.0,1.0,1.0,1.0)) ax.set_yticks(np.array([8e3,14e3,20e3])*1e-4) ax.set_zlim(scaling*c.nominal_parameter_values['kA',()]*c.olnmpc.Hrxn['p'].value) ax.set_zlabel('\n' + r'$kA$ [$\frac{kJ}{K}$]', linespacing=1.2) ax.w_zaxis.set_pane_color((1.0,1.0,1.0,1.0)) ax.set_zticks(np.array([0.04*2,0.07*2,0.1*2])*c.olnmpc.Hrxn['p'].value) fig.tight_layout() #fig.savefig('results/125grid/'+str(r)+'.pdf') #ax.tick_params(axis='both',direction='in') #ax.view_init(15,35) # plot half axis plt.xlabel(r'$\Delta A_i$') plt.ylabel(r'$\Delta A_p$') # plot CPU times: x = range(1,iters) for k in ['olnmpc','lsmhe']: utime = [sum(performance[k,i][1][l].ru_utime-performance[k,i][0][l].ru_utime for l in [1]) for i in range(1,iters)] stime = [sum(performance[k,i][1][l].ru_stime-performance[k,i][0][l].ru_stime for l in [1]) for i in range(1,iters)] plt.figure(), plt.title(k+' - required CPU time') plt.bar(x,utime,label='utime'), plt.bar(x,stime,bottom=utime,color='C1',label='stime') plt.ylabel(r'$t_{CPU} [s]$'), plt.xlabel('iteration'), plt.legend()
random_line_split
islandora_bulk_downloader.py
#!/usr/bin/env python3 import sys import os import csv import re import logging import argparse import urllib.request import requests from PIL import Image from PyPDF2 import PdfFileMerger from shutil import rmtree, copytree # Functions def pid_to_path(pid): # Converts PID into a string suitable for use in filesystem paths. # Uses __ in case some PIDs contain a single _.
def get_rels_ext_properties(pid): rels_ext_properties = dict() url = args.host.rstrip('/') + '/islandora/object/' + pid + '/datastream/RELS-EXT/view' request_url = urllib.request.urlopen(url) rels_ext_xml = request_url.read().decode('utf-8').strip() rels_ext_properties['PID'] = pid # <fedora:isMemberOfCollection rdf:resource="info:fedora/km:collection"/> isMemberOfCollections = re.findall('fedora:isMemberOfCollection rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isMemberOfCollections) > 0: isMemberOfCollection = isMemberOfCollections[0].replace('fedora:isMemberOfCollection rdf:resource="info:fedora/', '') isMemberOfCollection = isMemberOfCollection.strip('"') rels_ext_properties['isMemberOfCollection'] = isMemberOfCollection else: rels_ext_properties['isMemberOfCollection'] = None # Newspaper issues use isMemberOf in relationship to their newspaper and isSequenceNumber to sequence within that newspaper. # <fedora:isMemberOf rdf:resource="info:fedora/ctimes:1"/> # <islandora:isSequenceNumber>16219</islandora:isSequenceNumber> isMemberOfs = re.findall('fedora:isMemberOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isMemberOfs) > 0: isMemberOf = isMemberOfs[0].replace('fedora:isMemberOf rdf:resource="info:fedora/', '') isMemberOf = isMemberOf.strip('"') rels_ext_properties['isMemberOf'] = isMemberOf else: rels_ext_properties['isMemberOf'] = None # Objects of cmodel islandora:newspaperPageCModel and islandora:pageCModel use this property. isSequenceNumbers = re.findall('<islandora:isSequenceNumber>.*<', rels_ext_xml) if len(isSequenceNumbers) > 0: # Assumes that the object has only one parent. isSequenceNumber = isSequenceNumbers[0].replace('<.*', '') isSequenceNumbers = re.findall('>.*<', isSequenceNumber) isSequenceNumber = isSequenceNumbers[0].lstrip('>') isSequenceNumber = isSequenceNumber.rstrip('<') rels_ext_properties['isSequenceNumber'] = isSequenceNumber else: rels_ext_properties['isSequenceNumber'] = None # isPageOf is used in pages of books and newspaper issues. <islandora:isPageOf rdf:resource="info:fedora/aldine:12541"/> isPageOfs = re.findall('fedora:isPageOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isPageOfs) > 0: isPageOf = isPageOfs[0].replace('fedora:isPageOf rdf:resource="info:fedora/', '') isPageOf = isPageOf.strip('"') rels_ext_properties['isPageOf'] = isPageOf else: rels_ext_properties['isPageOf'] = None # isConstituentOf is used in children of compound objects. isConstituentOfs = re.findall('fedora:isConstituentOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isConstituentOfs) > 0: # Assumes that the object has only one parent. isConstituentOf = isConstituentOfs[0].replace('fedora:isConstituentOf rdf:resource="info:fedora/', '') isConstituentOf = isConstituentOf.strip('"') rels_ext_properties['isConstituentOf'] = isConstituentOf else: rels_ext_properties['isConstituentOf'] = None # isSequenceNumberOf{PID} is used in paged content (of books, newspapers), and in children of compound objects. # <islandora:isSequenceNumberOfkm_8352>5</islandora:isSequenceNumberOfkm_8352> isSequenceNumberOfs = re.findall('<islandora:isSequenceNumberOf.*>.*<', rels_ext_xml) if len(isSequenceNumberOfs) > 0: # Assumes that the object has only one parent. isSequenceNumberOf = isSequenceNumberOfs[0].replace('<.*', '') isSequenceNumberOfs = re.findall('>.*<', isSequenceNumberOf) isSequenceNumberOf = isSequenceNumberOfs[0].lstrip('>') isSequenceNumberOf = isSequenceNumberOf.rstrip('<') rels_ext_properties['isSequenceNumberOf'] = isSequenceNumberOf else: rels_ext_properties['isSequenceNumberOf'] = None # Standard models: # islandora:collectionCModel # islandora:pageCModel # islandora:sp_pdf # islandora:sp-audioCModel # islandora:sp_disk_image # islandora:sp_videoCModel # islandora:sp_basic_image # islandora:sp_web_archive # islandora:sp_large_image_cmodel # ir:citationCModel # ir:thesisCModel # islandora:bookCModel # islandora:newspaperCModel # islandora:newspaperPageCModel # islandora:newspaperIssueCModel # islandora:compoundCModel # Note: it is possible for objects of these content models to have multiple content models. # islandora:entityCModel # islandora:eventCModel # islandora:placeCModel # islandora:personCModel # islandora:organizationCModel models = re.findall('fedora-model:hasModel rdf:resource="info:fedora/.*"', rels_ext_xml) # Assumes a single model. model = models[0].replace('fedora-model:hasModel rdf:resource="info:fedora/', '') model = model.strip('"') rels_ext_properties['model'] = model return rels_ext_properties # Main program logic. parser = argparse.ArgumentParser() parser.add_argument('--pid_file', required=True, help='Relative or absolute path to the file listing all PIDs to harvest.') parser.add_argument('--log', required=True, help='Relative or absolute path to the log file.') parser.add_argument('--host', required=True, help='Islandora hostname, including the "https://". Trailing / is optional.') parser.add_argument('--output_dir', required=True, help='Relative or absolute path to the directory to put the harvested content in. Created if does not exist.') args = parser.parse_args() logging.basicConfig( filename=args.log, level=logging.INFO, filemode='a+', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') if not os.path.exists(args.pid_file): message = "CSV file " + args.pid_file + " does not exist." logging.error(message) sys.exit("ERROR: " + message) if os.path.exists(args.output_dir): logging.info("Output directory " + args.output_dir + " exists.") else: os.mkdir(args.output_dir) logging.info("Creating output directory " + args.output_dir + ".") with open(args.pid_file, 'r', newline='') as csv_reader_file_handle: csv_reader = csv.DictReader(csv_reader_file_handle) for row in csv_reader: properties = get_rels_ext_properties(row['PID']) if properties['model'] == 'islandora:compoundCModel': continue obj_url = args.host + '/islandora/object/' + row['PID'] + '/datastream/OBJ/download' obj = requests.get(obj_url) content_disp = obj.headers['content-disposition'] m = re.search(r'filename="(.*)"', content_disp) fname = m.group(1) ext = os.path.splitext(fname)[1] if not bool(properties['isConstituentOf']): content_path = os.path.join(args.output_dir, (pid_to_path(row['PID'])+ext)) elif not os.path.exists(os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']))): os.mkdir(os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']))) if bool(properties['isSequenceNumberOf']): content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), (properties['isSequenceNumberOf']+"_"+pid_to_path(row['PID']))+ext) elif bool(properties['isSequenceNumber']): content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), (properties['isSequenceNumber']+"_"+pid_to_path(row['PID']))+ext) else: content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), ("_" + pid_to_path(row['PID'])) + ext) with open(content_path, 'wb') as file: file.write(obj.content) # loop through folders in output directory (for folders in dir) for root, dirs, files in os.walk(args.output_dir): for dir in dirs: if dir.startswith('km'): dir_files = os.listdir(os.path.join(root, dir)) if dir_files[0].endswith('mp3'): if not os.path.exists(os.path.join(root, 'video')): os.mkdir(os.path.join(root, 'audio')) dest = os.path.join(root,'audio', dir) copytree(os.path.join(root, dir), dest) continue elif dir_files[0].endswith('mp4'): if not os.path.exists(os.path.join(root, 'video')): os.mkdir(os.path.join(root, 'video')) dest = os.path.join(root, 'video', dir) copytree(os.path.join(root, dir), dest) continue elif len(dir_files)>1: #if there's more than one component in folder, sort & save as one pdf dir_files.sort(key=lambda x: int(x.split("_")[0])) pdfs = [] merger = PdfFileMerger() for obj in dir_files: if obj.endswith('.jp2') or obj.endswith('.tiff'): im = Image.open(os.path.join(root, dir, obj)) filename = os.path.splitext(obj)[0] new_fn = filename + '.pdf' im.save(os.path.join(root, dir, new_fn)) im.close() pdfs.append(new_fn) os.remove(os.path.join(root,dir,obj)) for pdf in pdfs: fpath = os.path.join(root, dir, pdf) merger.append(fpath) merger.write(os.path.join(root, (dir+'.pdf'))) merger.close() elif len(dir_files)==1: item = dir_files[0] if item.endswith('.jp2') or item.endswith('.tiff'): im = Image.open(os.path.join(root, dir, item)) filename = os.path.splitext(item)[0] new_fn = filename + '.pdf' im.save(os.path.join(root,dir, new_fn)) im.close() os.remove(os.path.join(root,dir,item)) dir = os.listdir(args.output_dir) for item in dir: if item == 'audio' or item == 'video': continue else: path = os.path.join(args.output_dir, item) if not os.path.isfile(path): rmtree(path)
return pid.replace(':', '__')
identifier_body
islandora_bulk_downloader.py
#!/usr/bin/env python3 import sys import os import csv import re import logging import argparse import urllib.request import requests from PIL import Image from PyPDF2 import PdfFileMerger from shutil import rmtree, copytree # Functions def pid_to_path(pid): # Converts PID into a string suitable for use in filesystem paths. # Uses __ in case some PIDs contain a single _. return pid.replace(':', '__') def get_rels_ext_properties(pid): rels_ext_properties = dict() url = args.host.rstrip('/') + '/islandora/object/' + pid + '/datastream/RELS-EXT/view' request_url = urllib.request.urlopen(url) rels_ext_xml = request_url.read().decode('utf-8').strip() rels_ext_properties['PID'] = pid # <fedora:isMemberOfCollection rdf:resource="info:fedora/km:collection"/> isMemberOfCollections = re.findall('fedora:isMemberOfCollection rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isMemberOfCollections) > 0: isMemberOfCollection = isMemberOfCollections[0].replace('fedora:isMemberOfCollection rdf:resource="info:fedora/', '') isMemberOfCollection = isMemberOfCollection.strip('"') rels_ext_properties['isMemberOfCollection'] = isMemberOfCollection else: rels_ext_properties['isMemberOfCollection'] = None # Newspaper issues use isMemberOf in relationship to their newspaper and isSequenceNumber to sequence within that newspaper. # <fedora:isMemberOf rdf:resource="info:fedora/ctimes:1"/> # <islandora:isSequenceNumber>16219</islandora:isSequenceNumber> isMemberOfs = re.findall('fedora:isMemberOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isMemberOfs) > 0: isMemberOf = isMemberOfs[0].replace('fedora:isMemberOf rdf:resource="info:fedora/', '') isMemberOf = isMemberOf.strip('"') rels_ext_properties['isMemberOf'] = isMemberOf else: rels_ext_properties['isMemberOf'] = None # Objects of cmodel islandora:newspaperPageCModel and islandora:pageCModel use this property. isSequenceNumbers = re.findall('<islandora:isSequenceNumber>.*<', rels_ext_xml) if len(isSequenceNumbers) > 0: # Assumes that the object has only one parent. isSequenceNumber = isSequenceNumbers[0].replace('<.*', '') isSequenceNumbers = re.findall('>.*<', isSequenceNumber) isSequenceNumber = isSequenceNumbers[0].lstrip('>') isSequenceNumber = isSequenceNumber.rstrip('<') rels_ext_properties['isSequenceNumber'] = isSequenceNumber else: rels_ext_properties['isSequenceNumber'] = None # isPageOf is used in pages of books and newspaper issues. <islandora:isPageOf rdf:resource="info:fedora/aldine:12541"/> isPageOfs = re.findall('fedora:isPageOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isPageOfs) > 0: isPageOf = isPageOfs[0].replace('fedora:isPageOf rdf:resource="info:fedora/', '') isPageOf = isPageOf.strip('"') rels_ext_properties['isPageOf'] = isPageOf else: rels_ext_properties['isPageOf'] = None # isConstituentOf is used in children of compound objects. isConstituentOfs = re.findall('fedora:isConstituentOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isConstituentOfs) > 0: # Assumes that the object has only one parent. isConstituentOf = isConstituentOfs[0].replace('fedora:isConstituentOf rdf:resource="info:fedora/', '') isConstituentOf = isConstituentOf.strip('"') rels_ext_properties['isConstituentOf'] = isConstituentOf else: rels_ext_properties['isConstituentOf'] = None # isSequenceNumberOf{PID} is used in paged content (of books, newspapers), and in children of compound objects. # <islandora:isSequenceNumberOfkm_8352>5</islandora:isSequenceNumberOfkm_8352>
isSequenceNumberOfs = re.findall('>.*<', isSequenceNumberOf) isSequenceNumberOf = isSequenceNumberOfs[0].lstrip('>') isSequenceNumberOf = isSequenceNumberOf.rstrip('<') rels_ext_properties['isSequenceNumberOf'] = isSequenceNumberOf else: rels_ext_properties['isSequenceNumberOf'] = None # Standard models: # islandora:collectionCModel # islandora:pageCModel # islandora:sp_pdf # islandora:sp-audioCModel # islandora:sp_disk_image # islandora:sp_videoCModel # islandora:sp_basic_image # islandora:sp_web_archive # islandora:sp_large_image_cmodel # ir:citationCModel # ir:thesisCModel # islandora:bookCModel # islandora:newspaperCModel # islandora:newspaperPageCModel # islandora:newspaperIssueCModel # islandora:compoundCModel # Note: it is possible for objects of these content models to have multiple content models. # islandora:entityCModel # islandora:eventCModel # islandora:placeCModel # islandora:personCModel # islandora:organizationCModel models = re.findall('fedora-model:hasModel rdf:resource="info:fedora/.*"', rels_ext_xml) # Assumes a single model. model = models[0].replace('fedora-model:hasModel rdf:resource="info:fedora/', '') model = model.strip('"') rels_ext_properties['model'] = model return rels_ext_properties # Main program logic. parser = argparse.ArgumentParser() parser.add_argument('--pid_file', required=True, help='Relative or absolute path to the file listing all PIDs to harvest.') parser.add_argument('--log', required=True, help='Relative or absolute path to the log file.') parser.add_argument('--host', required=True, help='Islandora hostname, including the "https://". Trailing / is optional.') parser.add_argument('--output_dir', required=True, help='Relative or absolute path to the directory to put the harvested content in. Created if does not exist.') args = parser.parse_args() logging.basicConfig( filename=args.log, level=logging.INFO, filemode='a+', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') if not os.path.exists(args.pid_file): message = "CSV file " + args.pid_file + " does not exist." logging.error(message) sys.exit("ERROR: " + message) if os.path.exists(args.output_dir): logging.info("Output directory " + args.output_dir + " exists.") else: os.mkdir(args.output_dir) logging.info("Creating output directory " + args.output_dir + ".") with open(args.pid_file, 'r', newline='') as csv_reader_file_handle: csv_reader = csv.DictReader(csv_reader_file_handle) for row in csv_reader: properties = get_rels_ext_properties(row['PID']) if properties['model'] == 'islandora:compoundCModel': continue obj_url = args.host + '/islandora/object/' + row['PID'] + '/datastream/OBJ/download' obj = requests.get(obj_url) content_disp = obj.headers['content-disposition'] m = re.search(r'filename="(.*)"', content_disp) fname = m.group(1) ext = os.path.splitext(fname)[1] if not bool(properties['isConstituentOf']): content_path = os.path.join(args.output_dir, (pid_to_path(row['PID'])+ext)) elif not os.path.exists(os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']))): os.mkdir(os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']))) if bool(properties['isSequenceNumberOf']): content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), (properties['isSequenceNumberOf']+"_"+pid_to_path(row['PID']))+ext) elif bool(properties['isSequenceNumber']): content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), (properties['isSequenceNumber']+"_"+pid_to_path(row['PID']))+ext) else: content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), ("_" + pid_to_path(row['PID'])) + ext) with open(content_path, 'wb') as file: file.write(obj.content) # loop through folders in output directory (for folders in dir) for root, dirs, files in os.walk(args.output_dir): for dir in dirs: if dir.startswith('km'): dir_files = os.listdir(os.path.join(root, dir)) if dir_files[0].endswith('mp3'): if not os.path.exists(os.path.join(root, 'video')): os.mkdir(os.path.join(root, 'audio')) dest = os.path.join(root,'audio', dir) copytree(os.path.join(root, dir), dest) continue elif dir_files[0].endswith('mp4'): if not os.path.exists(os.path.join(root, 'video')): os.mkdir(os.path.join(root, 'video')) dest = os.path.join(root, 'video', dir) copytree(os.path.join(root, dir), dest) continue elif len(dir_files)>1: #if there's more than one component in folder, sort & save as one pdf dir_files.sort(key=lambda x: int(x.split("_")[0])) pdfs = [] merger = PdfFileMerger() for obj in dir_files: if obj.endswith('.jp2') or obj.endswith('.tiff'): im = Image.open(os.path.join(root, dir, obj)) filename = os.path.splitext(obj)[0] new_fn = filename + '.pdf' im.save(os.path.join(root, dir, new_fn)) im.close() pdfs.append(new_fn) os.remove(os.path.join(root,dir,obj)) for pdf in pdfs: fpath = os.path.join(root, dir, pdf) merger.append(fpath) merger.write(os.path.join(root, (dir+'.pdf'))) merger.close() elif len(dir_files)==1: item = dir_files[0] if item.endswith('.jp2') or item.endswith('.tiff'): im = Image.open(os.path.join(root, dir, item)) filename = os.path.splitext(item)[0] new_fn = filename + '.pdf' im.save(os.path.join(root,dir, new_fn)) im.close() os.remove(os.path.join(root,dir,item)) dir = os.listdir(args.output_dir) for item in dir: if item == 'audio' or item == 'video': continue else: path = os.path.join(args.output_dir, item) if not os.path.isfile(path): rmtree(path)
isSequenceNumberOfs = re.findall('<islandora:isSequenceNumberOf.*>.*<', rels_ext_xml) if len(isSequenceNumberOfs) > 0: # Assumes that the object has only one parent. isSequenceNumberOf = isSequenceNumberOfs[0].replace('<.*', '')
random_line_split
islandora_bulk_downloader.py
#!/usr/bin/env python3 import sys import os import csv import re import logging import argparse import urllib.request import requests from PIL import Image from PyPDF2 import PdfFileMerger from shutil import rmtree, copytree # Functions def
(pid): # Converts PID into a string suitable for use in filesystem paths. # Uses __ in case some PIDs contain a single _. return pid.replace(':', '__') def get_rels_ext_properties(pid): rels_ext_properties = dict() url = args.host.rstrip('/') + '/islandora/object/' + pid + '/datastream/RELS-EXT/view' request_url = urllib.request.urlopen(url) rels_ext_xml = request_url.read().decode('utf-8').strip() rels_ext_properties['PID'] = pid # <fedora:isMemberOfCollection rdf:resource="info:fedora/km:collection"/> isMemberOfCollections = re.findall('fedora:isMemberOfCollection rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isMemberOfCollections) > 0: isMemberOfCollection = isMemberOfCollections[0].replace('fedora:isMemberOfCollection rdf:resource="info:fedora/', '') isMemberOfCollection = isMemberOfCollection.strip('"') rels_ext_properties['isMemberOfCollection'] = isMemberOfCollection else: rels_ext_properties['isMemberOfCollection'] = None # Newspaper issues use isMemberOf in relationship to their newspaper and isSequenceNumber to sequence within that newspaper. # <fedora:isMemberOf rdf:resource="info:fedora/ctimes:1"/> # <islandora:isSequenceNumber>16219</islandora:isSequenceNumber> isMemberOfs = re.findall('fedora:isMemberOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isMemberOfs) > 0: isMemberOf = isMemberOfs[0].replace('fedora:isMemberOf rdf:resource="info:fedora/', '') isMemberOf = isMemberOf.strip('"') rels_ext_properties['isMemberOf'] = isMemberOf else: rels_ext_properties['isMemberOf'] = None # Objects of cmodel islandora:newspaperPageCModel and islandora:pageCModel use this property. isSequenceNumbers = re.findall('<islandora:isSequenceNumber>.*<', rels_ext_xml) if len(isSequenceNumbers) > 0: # Assumes that the object has only one parent. isSequenceNumber = isSequenceNumbers[0].replace('<.*', '') isSequenceNumbers = re.findall('>.*<', isSequenceNumber) isSequenceNumber = isSequenceNumbers[0].lstrip('>') isSequenceNumber = isSequenceNumber.rstrip('<') rels_ext_properties['isSequenceNumber'] = isSequenceNumber else: rels_ext_properties['isSequenceNumber'] = None # isPageOf is used in pages of books and newspaper issues. <islandora:isPageOf rdf:resource="info:fedora/aldine:12541"/> isPageOfs = re.findall('fedora:isPageOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isPageOfs) > 0: isPageOf = isPageOfs[0].replace('fedora:isPageOf rdf:resource="info:fedora/', '') isPageOf = isPageOf.strip('"') rels_ext_properties['isPageOf'] = isPageOf else: rels_ext_properties['isPageOf'] = None # isConstituentOf is used in children of compound objects. isConstituentOfs = re.findall('fedora:isConstituentOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isConstituentOfs) > 0: # Assumes that the object has only one parent. isConstituentOf = isConstituentOfs[0].replace('fedora:isConstituentOf rdf:resource="info:fedora/', '') isConstituentOf = isConstituentOf.strip('"') rels_ext_properties['isConstituentOf'] = isConstituentOf else: rels_ext_properties['isConstituentOf'] = None # isSequenceNumberOf{PID} is used in paged content (of books, newspapers), and in children of compound objects. # <islandora:isSequenceNumberOfkm_8352>5</islandora:isSequenceNumberOfkm_8352> isSequenceNumberOfs = re.findall('<islandora:isSequenceNumberOf.*>.*<', rels_ext_xml) if len(isSequenceNumberOfs) > 0: # Assumes that the object has only one parent. isSequenceNumberOf = isSequenceNumberOfs[0].replace('<.*', '') isSequenceNumberOfs = re.findall('>.*<', isSequenceNumberOf) isSequenceNumberOf = isSequenceNumberOfs[0].lstrip('>') isSequenceNumberOf = isSequenceNumberOf.rstrip('<') rels_ext_properties['isSequenceNumberOf'] = isSequenceNumberOf else: rels_ext_properties['isSequenceNumberOf'] = None # Standard models: # islandora:collectionCModel # islandora:pageCModel # islandora:sp_pdf # islandora:sp-audioCModel # islandora:sp_disk_image # islandora:sp_videoCModel # islandora:sp_basic_image # islandora:sp_web_archive # islandora:sp_large_image_cmodel # ir:citationCModel # ir:thesisCModel # islandora:bookCModel # islandora:newspaperCModel # islandora:newspaperPageCModel # islandora:newspaperIssueCModel # islandora:compoundCModel # Note: it is possible for objects of these content models to have multiple content models. # islandora:entityCModel # islandora:eventCModel # islandora:placeCModel # islandora:personCModel # islandora:organizationCModel models = re.findall('fedora-model:hasModel rdf:resource="info:fedora/.*"', rels_ext_xml) # Assumes a single model. model = models[0].replace('fedora-model:hasModel rdf:resource="info:fedora/', '') model = model.strip('"') rels_ext_properties['model'] = model return rels_ext_properties # Main program logic. parser = argparse.ArgumentParser() parser.add_argument('--pid_file', required=True, help='Relative or absolute path to the file listing all PIDs to harvest.') parser.add_argument('--log', required=True, help='Relative or absolute path to the log file.') parser.add_argument('--host', required=True, help='Islandora hostname, including the "https://". Trailing / is optional.') parser.add_argument('--output_dir', required=True, help='Relative or absolute path to the directory to put the harvested content in. Created if does not exist.') args = parser.parse_args() logging.basicConfig( filename=args.log, level=logging.INFO, filemode='a+', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') if not os.path.exists(args.pid_file): message = "CSV file " + args.pid_file + " does not exist." logging.error(message) sys.exit("ERROR: " + message) if os.path.exists(args.output_dir): logging.info("Output directory " + args.output_dir + " exists.") else: os.mkdir(args.output_dir) logging.info("Creating output directory " + args.output_dir + ".") with open(args.pid_file, 'r', newline='') as csv_reader_file_handle: csv_reader = csv.DictReader(csv_reader_file_handle) for row in csv_reader: properties = get_rels_ext_properties(row['PID']) if properties['model'] == 'islandora:compoundCModel': continue obj_url = args.host + '/islandora/object/' + row['PID'] + '/datastream/OBJ/download' obj = requests.get(obj_url) content_disp = obj.headers['content-disposition'] m = re.search(r'filename="(.*)"', content_disp) fname = m.group(1) ext = os.path.splitext(fname)[1] if not bool(properties['isConstituentOf']): content_path = os.path.join(args.output_dir, (pid_to_path(row['PID'])+ext)) elif not os.path.exists(os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']))): os.mkdir(os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']))) if bool(properties['isSequenceNumberOf']): content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), (properties['isSequenceNumberOf']+"_"+pid_to_path(row['PID']))+ext) elif bool(properties['isSequenceNumber']): content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), (properties['isSequenceNumber']+"_"+pid_to_path(row['PID']))+ext) else: content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), ("_" + pid_to_path(row['PID'])) + ext) with open(content_path, 'wb') as file: file.write(obj.content) # loop through folders in output directory (for folders in dir) for root, dirs, files in os.walk(args.output_dir): for dir in dirs: if dir.startswith('km'): dir_files = os.listdir(os.path.join(root, dir)) if dir_files[0].endswith('mp3'): if not os.path.exists(os.path.join(root, 'video')): os.mkdir(os.path.join(root, 'audio')) dest = os.path.join(root,'audio', dir) copytree(os.path.join(root, dir), dest) continue elif dir_files[0].endswith('mp4'): if not os.path.exists(os.path.join(root, 'video')): os.mkdir(os.path.join(root, 'video')) dest = os.path.join(root, 'video', dir) copytree(os.path.join(root, dir), dest) continue elif len(dir_files)>1: #if there's more than one component in folder, sort & save as one pdf dir_files.sort(key=lambda x: int(x.split("_")[0])) pdfs = [] merger = PdfFileMerger() for obj in dir_files: if obj.endswith('.jp2') or obj.endswith('.tiff'): im = Image.open(os.path.join(root, dir, obj)) filename = os.path.splitext(obj)[0] new_fn = filename + '.pdf' im.save(os.path.join(root, dir, new_fn)) im.close() pdfs.append(new_fn) os.remove(os.path.join(root,dir,obj)) for pdf in pdfs: fpath = os.path.join(root, dir, pdf) merger.append(fpath) merger.write(os.path.join(root, (dir+'.pdf'))) merger.close() elif len(dir_files)==1: item = dir_files[0] if item.endswith('.jp2') or item.endswith('.tiff'): im = Image.open(os.path.join(root, dir, item)) filename = os.path.splitext(item)[0] new_fn = filename + '.pdf' im.save(os.path.join(root,dir, new_fn)) im.close() os.remove(os.path.join(root,dir,item)) dir = os.listdir(args.output_dir) for item in dir: if item == 'audio' or item == 'video': continue else: path = os.path.join(args.output_dir, item) if not os.path.isfile(path): rmtree(path)
pid_to_path
identifier_name
islandora_bulk_downloader.py
#!/usr/bin/env python3 import sys import os import csv import re import logging import argparse import urllib.request import requests from PIL import Image from PyPDF2 import PdfFileMerger from shutil import rmtree, copytree # Functions def pid_to_path(pid): # Converts PID into a string suitable for use in filesystem paths. # Uses __ in case some PIDs contain a single _. return pid.replace(':', '__') def get_rels_ext_properties(pid): rels_ext_properties = dict() url = args.host.rstrip('/') + '/islandora/object/' + pid + '/datastream/RELS-EXT/view' request_url = urllib.request.urlopen(url) rels_ext_xml = request_url.read().decode('utf-8').strip() rels_ext_properties['PID'] = pid # <fedora:isMemberOfCollection rdf:resource="info:fedora/km:collection"/> isMemberOfCollections = re.findall('fedora:isMemberOfCollection rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isMemberOfCollections) > 0:
else: rels_ext_properties['isMemberOfCollection'] = None # Newspaper issues use isMemberOf in relationship to their newspaper and isSequenceNumber to sequence within that newspaper. # <fedora:isMemberOf rdf:resource="info:fedora/ctimes:1"/> # <islandora:isSequenceNumber>16219</islandora:isSequenceNumber> isMemberOfs = re.findall('fedora:isMemberOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isMemberOfs) > 0: isMemberOf = isMemberOfs[0].replace('fedora:isMemberOf rdf:resource="info:fedora/', '') isMemberOf = isMemberOf.strip('"') rels_ext_properties['isMemberOf'] = isMemberOf else: rels_ext_properties['isMemberOf'] = None # Objects of cmodel islandora:newspaperPageCModel and islandora:pageCModel use this property. isSequenceNumbers = re.findall('<islandora:isSequenceNumber>.*<', rels_ext_xml) if len(isSequenceNumbers) > 0: # Assumes that the object has only one parent. isSequenceNumber = isSequenceNumbers[0].replace('<.*', '') isSequenceNumbers = re.findall('>.*<', isSequenceNumber) isSequenceNumber = isSequenceNumbers[0].lstrip('>') isSequenceNumber = isSequenceNumber.rstrip('<') rels_ext_properties['isSequenceNumber'] = isSequenceNumber else: rels_ext_properties['isSequenceNumber'] = None # isPageOf is used in pages of books and newspaper issues. <islandora:isPageOf rdf:resource="info:fedora/aldine:12541"/> isPageOfs = re.findall('fedora:isPageOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isPageOfs) > 0: isPageOf = isPageOfs[0].replace('fedora:isPageOf rdf:resource="info:fedora/', '') isPageOf = isPageOf.strip('"') rels_ext_properties['isPageOf'] = isPageOf else: rels_ext_properties['isPageOf'] = None # isConstituentOf is used in children of compound objects. isConstituentOfs = re.findall('fedora:isConstituentOf rdf:resource="info:fedora/.*"', rels_ext_xml) if len(isConstituentOfs) > 0: # Assumes that the object has only one parent. isConstituentOf = isConstituentOfs[0].replace('fedora:isConstituentOf rdf:resource="info:fedora/', '') isConstituentOf = isConstituentOf.strip('"') rels_ext_properties['isConstituentOf'] = isConstituentOf else: rels_ext_properties['isConstituentOf'] = None # isSequenceNumberOf{PID} is used in paged content (of books, newspapers), and in children of compound objects. # <islandora:isSequenceNumberOfkm_8352>5</islandora:isSequenceNumberOfkm_8352> isSequenceNumberOfs = re.findall('<islandora:isSequenceNumberOf.*>.*<', rels_ext_xml) if len(isSequenceNumberOfs) > 0: # Assumes that the object has only one parent. isSequenceNumberOf = isSequenceNumberOfs[0].replace('<.*', '') isSequenceNumberOfs = re.findall('>.*<', isSequenceNumberOf) isSequenceNumberOf = isSequenceNumberOfs[0].lstrip('>') isSequenceNumberOf = isSequenceNumberOf.rstrip('<') rels_ext_properties['isSequenceNumberOf'] = isSequenceNumberOf else: rels_ext_properties['isSequenceNumberOf'] = None # Standard models: # islandora:collectionCModel # islandora:pageCModel # islandora:sp_pdf # islandora:sp-audioCModel # islandora:sp_disk_image # islandora:sp_videoCModel # islandora:sp_basic_image # islandora:sp_web_archive # islandora:sp_large_image_cmodel # ir:citationCModel # ir:thesisCModel # islandora:bookCModel # islandora:newspaperCModel # islandora:newspaperPageCModel # islandora:newspaperIssueCModel # islandora:compoundCModel # Note: it is possible for objects of these content models to have multiple content models. # islandora:entityCModel # islandora:eventCModel # islandora:placeCModel # islandora:personCModel # islandora:organizationCModel models = re.findall('fedora-model:hasModel rdf:resource="info:fedora/.*"', rels_ext_xml) # Assumes a single model. model = models[0].replace('fedora-model:hasModel rdf:resource="info:fedora/', '') model = model.strip('"') rels_ext_properties['model'] = model return rels_ext_properties # Main program logic. parser = argparse.ArgumentParser() parser.add_argument('--pid_file', required=True, help='Relative or absolute path to the file listing all PIDs to harvest.') parser.add_argument('--log', required=True, help='Relative or absolute path to the log file.') parser.add_argument('--host', required=True, help='Islandora hostname, including the "https://". Trailing / is optional.') parser.add_argument('--output_dir', required=True, help='Relative or absolute path to the directory to put the harvested content in. Created if does not exist.') args = parser.parse_args() logging.basicConfig( filename=args.log, level=logging.INFO, filemode='a+', format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') if not os.path.exists(args.pid_file): message = "CSV file " + args.pid_file + " does not exist." logging.error(message) sys.exit("ERROR: " + message) if os.path.exists(args.output_dir): logging.info("Output directory " + args.output_dir + " exists.") else: os.mkdir(args.output_dir) logging.info("Creating output directory " + args.output_dir + ".") with open(args.pid_file, 'r', newline='') as csv_reader_file_handle: csv_reader = csv.DictReader(csv_reader_file_handle) for row in csv_reader: properties = get_rels_ext_properties(row['PID']) if properties['model'] == 'islandora:compoundCModel': continue obj_url = args.host + '/islandora/object/' + row['PID'] + '/datastream/OBJ/download' obj = requests.get(obj_url) content_disp = obj.headers['content-disposition'] m = re.search(r'filename="(.*)"', content_disp) fname = m.group(1) ext = os.path.splitext(fname)[1] if not bool(properties['isConstituentOf']): content_path = os.path.join(args.output_dir, (pid_to_path(row['PID'])+ext)) elif not os.path.exists(os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']))): os.mkdir(os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']))) if bool(properties['isSequenceNumberOf']): content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), (properties['isSequenceNumberOf']+"_"+pid_to_path(row['PID']))+ext) elif bool(properties['isSequenceNumber']): content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), (properties['isSequenceNumber']+"_"+pid_to_path(row['PID']))+ext) else: content_path = os.path.join(args.output_dir, pid_to_path(properties['isConstituentOf']), ("_" + pid_to_path(row['PID'])) + ext) with open(content_path, 'wb') as file: file.write(obj.content) # loop through folders in output directory (for folders in dir) for root, dirs, files in os.walk(args.output_dir): for dir in dirs: if dir.startswith('km'): dir_files = os.listdir(os.path.join(root, dir)) if dir_files[0].endswith('mp3'): if not os.path.exists(os.path.join(root, 'video')): os.mkdir(os.path.join(root, 'audio')) dest = os.path.join(root,'audio', dir) copytree(os.path.join(root, dir), dest) continue elif dir_files[0].endswith('mp4'): if not os.path.exists(os.path.join(root, 'video')): os.mkdir(os.path.join(root, 'video')) dest = os.path.join(root, 'video', dir) copytree(os.path.join(root, dir), dest) continue elif len(dir_files)>1: #if there's more than one component in folder, sort & save as one pdf dir_files.sort(key=lambda x: int(x.split("_")[0])) pdfs = [] merger = PdfFileMerger() for obj in dir_files: if obj.endswith('.jp2') or obj.endswith('.tiff'): im = Image.open(os.path.join(root, dir, obj)) filename = os.path.splitext(obj)[0] new_fn = filename + '.pdf' im.save(os.path.join(root, dir, new_fn)) im.close() pdfs.append(new_fn) os.remove(os.path.join(root,dir,obj)) for pdf in pdfs: fpath = os.path.join(root, dir, pdf) merger.append(fpath) merger.write(os.path.join(root, (dir+'.pdf'))) merger.close() elif len(dir_files)==1: item = dir_files[0] if item.endswith('.jp2') or item.endswith('.tiff'): im = Image.open(os.path.join(root, dir, item)) filename = os.path.splitext(item)[0] new_fn = filename + '.pdf' im.save(os.path.join(root,dir, new_fn)) im.close() os.remove(os.path.join(root,dir,item)) dir = os.listdir(args.output_dir) for item in dir: if item == 'audio' or item == 'video': continue else: path = os.path.join(args.output_dir, item) if not os.path.isfile(path): rmtree(path)
isMemberOfCollection = isMemberOfCollections[0].replace('fedora:isMemberOfCollection rdf:resource="info:fedora/', '') isMemberOfCollection = isMemberOfCollection.strip('"') rels_ext_properties['isMemberOfCollection'] = isMemberOfCollection
conditional_block
route_planner.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ future_help::{Observer, PollMutex}, labels::{NodeId, NodeLinkId}, link::LinkStatus, router::Router, }; use anyhow::{bail, format_err, Error}; use fuchsia_async::Timer; use futures::{future::poll_fn, lock::Mutex, prelude::*, ready}; use std::{ collections::{BTreeMap, BinaryHeap}, sync::{Arc, Weak}, task::{Context, Poll, Waker}, time::Duration, }; /// Assumed forwarding time through a node. /// This is a temporary hack to alleviate some bad route selection. const FORWARDING_TIME: Duration = Duration::from_millis(100); /// Collects all information about a node in one place #[derive(Debug)] struct Node { links: BTreeMap<NodeLinkId, Link>, } /// During pathfinding, collects the shortest path so far to a node #[derive(Debug, Clone, Copy)] struct NodeProgress { round_trip_time: Duration, outgoing_link: NodeLinkId, } /// Describes the state of a link #[derive(Debug, Clone)] pub struct LinkDescription { /// Current round trip time estimate for this link pub round_trip_time: Duration, } /// Collects all information about one link on one node /// Links that are owned by NodeTable should remain owned (mutable references should not be given /// out) #[derive(Debug)] pub struct Link { /// Destination node for this link pub to: NodeId, /// Description of this link pub desc: LinkDescription, } /// Table of all nodes (and links between them) known to an instance struct NodeTable { root_node: NodeId, nodes: BTreeMap<NodeId, Node>, version: u64, wake_on_version_change: Option<Waker>, } impl NodeTable { /// Create a new node table rooted at `root_node` pub fn new(root_node: NodeId) -> NodeTable { NodeTable { root_node, nodes: BTreeMap::new(), version: 0, wake_on_version_change: None } } fn poll_new_version(&mut self, ctx: &mut Context<'_>, last_version: &mut u64) -> Poll<()> { if *last_version == self.version { self.wake_on_version_change = Some(ctx.waker().clone()); Poll::Pending } else { *last_version = self.version; Poll::Ready(()) } } fn get_or_create_node_mut(&mut self, node_id: NodeId) -> &mut Node { self.nodes.entry(node_id).or_insert_with(|| Node { links: BTreeMap::new() }) } /// Update a single link on a node. fn update_link( &mut self, from: NodeId, to: NodeId, link_id: NodeLinkId, desc: LinkDescription, ) -> Result<(), Error> { log::trace!( "{:?} update_link: from:{:?} to:{:?} link_id:{:?} desc:{:?}", self.root_node, from, to, link_id, desc ); if from == to { bail!("Circular link seen"); } self.get_or_create_node_mut(to); self.get_or_create_node_mut(from).links.insert(link_id, Link { to, desc }); Ok(()) } fn update_links(&mut self, from: NodeId, links: Vec<LinkStatus>) -> Result<(), Error> { self.get_or_create_node_mut(from).links.clear(); for LinkStatus { to, local_id, round_trip_time } in links.into_iter() { self.update_link(from, to, local_id, LinkDescription { round_trip_time })?; } self.version += 1; self.wake_on_version_change.take().map(|w| w.wake()); Ok(()) } /// Build a routing table for our node based on current link data fn build_routes(&self) -> impl Iterator<Item = (NodeId, NodeLinkId)> { let mut todo = BinaryHeap::new(); log::trace!("{:?} BUILD ROUTES: {:?}", self.root_node, self.nodes); let mut progress = BTreeMap::<NodeId, NodeProgress>::new(); for (link_id, link) in self.nodes.get(&self.root_node).unwrap().links.iter() { if link.to == self.root_node { continue; } todo.push(link.to); let new_progress = NodeProgress { round_trip_time: link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: *link_id, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; } }) .or_insert_with(|| new_progress); } log::trace!("BUILD START: progress={:?} todo={:?}", progress, todo); while let Some(from) = todo.pop() { log::trace!("STEP {:?}: progress={:?} todo={:?}", from, progress, todo); let progress_from = progress.get(&from).unwrap().clone(); for (_, link) in self.nodes.get(&from).unwrap().links.iter() { if link.to == self.root_node { continue; } let new_progress = NodeProgress { round_trip_time: progress_from.round_trip_time + link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: progress_from.outgoing_link, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; todo.push(link.to); } }) .or_insert_with(|| { todo.push(link.to); new_progress }); } } log::trace!("DONE: progress={:?} todo={:?}", progress, todo); progress .into_iter() .map(|(node_id, NodeProgress { outgoing_link: link_id, .. })| (node_id, link_id)) } } #[derive(Debug)] pub(crate) struct RemoteRoutingUpdate { pub(crate) from_node_id: NodeId, pub(crate) status: Vec<LinkStatus>, } pub(crate) type RemoteRoutingUpdateSender = futures::channel::mpsc::Sender<RemoteRoutingUpdate>; pub(crate) type RemoteRoutingUpdateReceiver = futures::channel::mpsc::Receiver<RemoteRoutingUpdate>; pub(crate) fn routing_update_channel() -> (RemoteRoutingUpdateSender, RemoteRoutingUpdateReceiver) { futures::channel::mpsc::channel(1) } pub(crate) async fn run_route_planner( router: &Weak<Router>, mut remote_updates: RemoteRoutingUpdateReceiver, mut local_updates: Observer<Vec<LinkStatus>>, ) -> Result<(), Error> { let get_router = move || Weak::upgrade(router).ok_or_else(|| format_err!("router gone")); let node_table = Arc::new(Mutex::new(NodeTable::new(get_router()?.node_id()))); let remote_node_table = node_table.clone(); let local_node_table = node_table.clone(); let update_node_table = node_table; let _: ((), (), ()) = futures::future::try_join3( async move { while let Some(RemoteRoutingUpdate { from_node_id, status }) = remote_updates.next().await { let mut node_table = remote_node_table.lock().await; if from_node_id == node_table.root_node { log::warn!("Attempt to update own node id links as remote"); continue; } if let Err(e) = node_table.update_links(from_node_id, status)
} Ok::<_, Error>(()) }, async move { while let Some(status) = local_updates.next().await { let mut node_table = local_node_table.lock().await; let root_node = node_table.root_node; if let Err(e) = node_table.update_links(root_node, status) { log::warn!("Update local links failed: {:?}", e); continue; } } Ok(()) }, async move { let mut pm = PollMutex::new(&*update_node_table); let mut current_version = 0; let mut poll_version = move |ctx: &mut Context<'_>| { let mut node_table = ready!(pm.poll(ctx)); ready!(node_table.poll_new_version(ctx, &mut current_version)); Poll::Ready(node_table) }; loop { let node_table = poll_fn(&mut poll_version).await; get_router()?.update_routes(node_table.build_routes(), "new_routes").await?; drop(node_table); Timer::new(Duration::from_millis(100)).await; } }, ) .await?; Ok(()) } #[cfg(test)] mod test { use super::*; use arbitrary::{Arbitrary, Unstructured}; use rand::Rng; use std::collections::HashMap; use std::time::Instant; fn remove_item<T: Eq>(value: &T, from: &mut Vec<T>) -> bool { let len = from.len(); for i in 0..len { if from[i] == *value { from.remove(i); return true; } } return false; } fn construct_node_table_from_links(links: &[(u64, u64, u64, u64)]) -> NodeTable { let mut node_table = NodeTable::new(1.into()); for (from, to, link_id, rtt) in links { node_table .update_link( (*from).into(), (*to).into(), (*link_id).into(), LinkDescription { round_trip_time: Duration::from_millis(*rtt) }, ) .unwrap(); } node_table } fn is_outcome(mut got: Vec<(NodeId, NodeLinkId)>, outcome: &[(u64, u64)]) -> bool { let mut result = true; for (node_id, link_id) in outcome { if !remove_item(&((*node_id).into(), (*link_id).into()), &mut got) { log::trace!("Expected outcome not found: {}#{}", node_id, link_id); result = false; } } for (node_id, link_id) in got { log::trace!("Unexpected outcome: {}#{}", node_id.0, link_id.0); result = false; } result } fn builds_route_ok(links: &[(u64, u64, u64, u64)], outcome: &[(u64, u64)]) -> bool { log::trace!("TEST: {:?} --> {:?}", links, outcome); let node_table = construct_node_table_from_links(links); let built: Vec<(NodeId, NodeLinkId)> = node_table.build_routes().collect(); let r = is_outcome(built.clone(), outcome); if !r { log::trace!("NODE_TABLE: {:?}", node_table.nodes); log::trace!("BUILT: {:?}", built); } r } #[test] fn test_build_routes() { crate::test_util::init(); assert!(builds_route_ok(&[(1, 2, 1, 10), (2, 1, 123, 5)], &[(2, 1)])); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 10), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 2)] )); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 1000), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 1)] )); } #[derive(Arbitrary, Debug, Clone, Copy)] struct DoesntFormLoops { a_to_b: u64, b_to_a: u64, a_to_c: u64, c_to_a: u64, } fn verify_no_loops(config: DoesntFormLoops) { // With node configuration: // B(2) - A(1) - C(3) // Verify that routes from A to B do not point at C // and that routes from A to C do not point at B println!("{:?}", config); let built: HashMap<NodeId, NodeLinkId> = construct_node_table_from_links(&[ (1, 2, 100, config.a_to_b), (2, 1, 200, config.b_to_a), (1, 3, 300, config.a_to_c), (3, 1, 400, config.c_to_a), ]) .build_routes() .collect(); assert_eq!(built.get(&2.into()), Some(&100.into())); assert_eq!(built.get(&3.into()), Some(&300.into())); } #[test] fn no_loops() { crate::test_util::init(); let start = Instant::now(); while Instant::now() - start < Duration::from_secs(1) { let mut random_junk = [0u8; 64]; rand::thread_rng().fill(&mut random_junk); verify_no_loops(Arbitrary::arbitrary(&mut Unstructured::new(&random_junk)).unwrap()); } } }
{ log::warn!("Update remote links from {:?} failed: {:?}", from_node_id, e); continue; }
conditional_block
route_planner.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ future_help::{Observer, PollMutex}, labels::{NodeId, NodeLinkId}, link::LinkStatus, router::Router, }; use anyhow::{bail, format_err, Error}; use fuchsia_async::Timer; use futures::{future::poll_fn, lock::Mutex, prelude::*, ready}; use std::{ collections::{BTreeMap, BinaryHeap}, sync::{Arc, Weak}, task::{Context, Poll, Waker}, time::Duration, }; /// Assumed forwarding time through a node. /// This is a temporary hack to alleviate some bad route selection. const FORWARDING_TIME: Duration = Duration::from_millis(100); /// Collects all information about a node in one place #[derive(Debug)] struct Node { links: BTreeMap<NodeLinkId, Link>, } /// During pathfinding, collects the shortest path so far to a node #[derive(Debug, Clone, Copy)] struct NodeProgress { round_trip_time: Duration, outgoing_link: NodeLinkId, } /// Describes the state of a link #[derive(Debug, Clone)] pub struct LinkDescription { /// Current round trip time estimate for this link pub round_trip_time: Duration, } /// Collects all information about one link on one node /// Links that are owned by NodeTable should remain owned (mutable references should not be given /// out) #[derive(Debug)] pub struct Link { /// Destination node for this link pub to: NodeId, /// Description of this link pub desc: LinkDescription, } /// Table of all nodes (and links between them) known to an instance struct NodeTable { root_node: NodeId, nodes: BTreeMap<NodeId, Node>, version: u64, wake_on_version_change: Option<Waker>, } impl NodeTable { /// Create a new node table rooted at `root_node` pub fn new(root_node: NodeId) -> NodeTable { NodeTable { root_node, nodes: BTreeMap::new(), version: 0, wake_on_version_change: None } } fn poll_new_version(&mut self, ctx: &mut Context<'_>, last_version: &mut u64) -> Poll<()> { if *last_version == self.version { self.wake_on_version_change = Some(ctx.waker().clone()); Poll::Pending } else { *last_version = self.version; Poll::Ready(()) } } fn get_or_create_node_mut(&mut self, node_id: NodeId) -> &mut Node { self.nodes.entry(node_id).or_insert_with(|| Node { links: BTreeMap::new() }) } /// Update a single link on a node. fn update_link( &mut self, from: NodeId, to: NodeId, link_id: NodeLinkId, desc: LinkDescription, ) -> Result<(), Error> { log::trace!( "{:?} update_link: from:{:?} to:{:?} link_id:{:?} desc:{:?}", self.root_node, from, to, link_id, desc ); if from == to { bail!("Circular link seen"); } self.get_or_create_node_mut(to); self.get_or_create_node_mut(from).links.insert(link_id, Link { to, desc }); Ok(()) } fn update_links(&mut self, from: NodeId, links: Vec<LinkStatus>) -> Result<(), Error> { self.get_or_create_node_mut(from).links.clear(); for LinkStatus { to, local_id, round_trip_time } in links.into_iter() { self.update_link(from, to, local_id, LinkDescription { round_trip_time })?; } self.version += 1; self.wake_on_version_change.take().map(|w| w.wake()); Ok(()) } /// Build a routing table for our node based on current link data fn build_routes(&self) -> impl Iterator<Item = (NodeId, NodeLinkId)> { let mut todo = BinaryHeap::new(); log::trace!("{:?} BUILD ROUTES: {:?}", self.root_node, self.nodes); let mut progress = BTreeMap::<NodeId, NodeProgress>::new(); for (link_id, link) in self.nodes.get(&self.root_node).unwrap().links.iter() { if link.to == self.root_node { continue; } todo.push(link.to); let new_progress = NodeProgress { round_trip_time: link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: *link_id, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; } }) .or_insert_with(|| new_progress); } log::trace!("BUILD START: progress={:?} todo={:?}", progress, todo); while let Some(from) = todo.pop() { log::trace!("STEP {:?}: progress={:?} todo={:?}", from, progress, todo); let progress_from = progress.get(&from).unwrap().clone(); for (_, link) in self.nodes.get(&from).unwrap().links.iter() { if link.to == self.root_node { continue; } let new_progress = NodeProgress { round_trip_time: progress_from.round_trip_time + link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: progress_from.outgoing_link, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; todo.push(link.to); } }) .or_insert_with(|| { todo.push(link.to); new_progress }); } } log::trace!("DONE: progress={:?} todo={:?}", progress, todo); progress .into_iter() .map(|(node_id, NodeProgress { outgoing_link: link_id, .. })| (node_id, link_id)) } } #[derive(Debug)] pub(crate) struct RemoteRoutingUpdate { pub(crate) from_node_id: NodeId, pub(crate) status: Vec<LinkStatus>, } pub(crate) type RemoteRoutingUpdateSender = futures::channel::mpsc::Sender<RemoteRoutingUpdate>; pub(crate) type RemoteRoutingUpdateReceiver = futures::channel::mpsc::Receiver<RemoteRoutingUpdate>; pub(crate) fn routing_update_channel() -> (RemoteRoutingUpdateSender, RemoteRoutingUpdateReceiver) { futures::channel::mpsc::channel(1) } pub(crate) async fn run_route_planner( router: &Weak<Router>, mut remote_updates: RemoteRoutingUpdateReceiver, mut local_updates: Observer<Vec<LinkStatus>>, ) -> Result<(), Error> { let get_router = move || Weak::upgrade(router).ok_or_else(|| format_err!("router gone")); let node_table = Arc::new(Mutex::new(NodeTable::new(get_router()?.node_id()))); let remote_node_table = node_table.clone(); let local_node_table = node_table.clone(); let update_node_table = node_table; let _: ((), (), ()) = futures::future::try_join3( async move { while let Some(RemoteRoutingUpdate { from_node_id, status }) = remote_updates.next().await { let mut node_table = remote_node_table.lock().await; if from_node_id == node_table.root_node { log::warn!("Attempt to update own node id links as remote"); continue; } if let Err(e) = node_table.update_links(from_node_id, status) { log::warn!("Update remote links from {:?} failed: {:?}", from_node_id, e); continue; }
async move { while let Some(status) = local_updates.next().await { let mut node_table = local_node_table.lock().await; let root_node = node_table.root_node; if let Err(e) = node_table.update_links(root_node, status) { log::warn!("Update local links failed: {:?}", e); continue; } } Ok(()) }, async move { let mut pm = PollMutex::new(&*update_node_table); let mut current_version = 0; let mut poll_version = move |ctx: &mut Context<'_>| { let mut node_table = ready!(pm.poll(ctx)); ready!(node_table.poll_new_version(ctx, &mut current_version)); Poll::Ready(node_table) }; loop { let node_table = poll_fn(&mut poll_version).await; get_router()?.update_routes(node_table.build_routes(), "new_routes").await?; drop(node_table); Timer::new(Duration::from_millis(100)).await; } }, ) .await?; Ok(()) } #[cfg(test)] mod test { use super::*; use arbitrary::{Arbitrary, Unstructured}; use rand::Rng; use std::collections::HashMap; use std::time::Instant; fn remove_item<T: Eq>(value: &T, from: &mut Vec<T>) -> bool { let len = from.len(); for i in 0..len { if from[i] == *value { from.remove(i); return true; } } return false; } fn construct_node_table_from_links(links: &[(u64, u64, u64, u64)]) -> NodeTable { let mut node_table = NodeTable::new(1.into()); for (from, to, link_id, rtt) in links { node_table .update_link( (*from).into(), (*to).into(), (*link_id).into(), LinkDescription { round_trip_time: Duration::from_millis(*rtt) }, ) .unwrap(); } node_table } fn is_outcome(mut got: Vec<(NodeId, NodeLinkId)>, outcome: &[(u64, u64)]) -> bool { let mut result = true; for (node_id, link_id) in outcome { if !remove_item(&((*node_id).into(), (*link_id).into()), &mut got) { log::trace!("Expected outcome not found: {}#{}", node_id, link_id); result = false; } } for (node_id, link_id) in got { log::trace!("Unexpected outcome: {}#{}", node_id.0, link_id.0); result = false; } result } fn builds_route_ok(links: &[(u64, u64, u64, u64)], outcome: &[(u64, u64)]) -> bool { log::trace!("TEST: {:?} --> {:?}", links, outcome); let node_table = construct_node_table_from_links(links); let built: Vec<(NodeId, NodeLinkId)> = node_table.build_routes().collect(); let r = is_outcome(built.clone(), outcome); if !r { log::trace!("NODE_TABLE: {:?}", node_table.nodes); log::trace!("BUILT: {:?}", built); } r } #[test] fn test_build_routes() { crate::test_util::init(); assert!(builds_route_ok(&[(1, 2, 1, 10), (2, 1, 123, 5)], &[(2, 1)])); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 10), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 2)] )); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 1000), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 1)] )); } #[derive(Arbitrary, Debug, Clone, Copy)] struct DoesntFormLoops { a_to_b: u64, b_to_a: u64, a_to_c: u64, c_to_a: u64, } fn verify_no_loops(config: DoesntFormLoops) { // With node configuration: // B(2) - A(1) - C(3) // Verify that routes from A to B do not point at C // and that routes from A to C do not point at B println!("{:?}", config); let built: HashMap<NodeId, NodeLinkId> = construct_node_table_from_links(&[ (1, 2, 100, config.a_to_b), (2, 1, 200, config.b_to_a), (1, 3, 300, config.a_to_c), (3, 1, 400, config.c_to_a), ]) .build_routes() .collect(); assert_eq!(built.get(&2.into()), Some(&100.into())); assert_eq!(built.get(&3.into()), Some(&300.into())); } #[test] fn no_loops() { crate::test_util::init(); let start = Instant::now(); while Instant::now() - start < Duration::from_secs(1) { let mut random_junk = [0u8; 64]; rand::thread_rng().fill(&mut random_junk); verify_no_loops(Arbitrary::arbitrary(&mut Unstructured::new(&random_junk)).unwrap()); } } }
} Ok::<_, Error>(()) },
random_line_split
route_planner.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ future_help::{Observer, PollMutex}, labels::{NodeId, NodeLinkId}, link::LinkStatus, router::Router, }; use anyhow::{bail, format_err, Error}; use fuchsia_async::Timer; use futures::{future::poll_fn, lock::Mutex, prelude::*, ready}; use std::{ collections::{BTreeMap, BinaryHeap}, sync::{Arc, Weak}, task::{Context, Poll, Waker}, time::Duration, }; /// Assumed forwarding time through a node. /// This is a temporary hack to alleviate some bad route selection. const FORWARDING_TIME: Duration = Duration::from_millis(100); /// Collects all information about a node in one place #[derive(Debug)] struct Node { links: BTreeMap<NodeLinkId, Link>, } /// During pathfinding, collects the shortest path so far to a node #[derive(Debug, Clone, Copy)] struct NodeProgress { round_trip_time: Duration, outgoing_link: NodeLinkId, } /// Describes the state of a link #[derive(Debug, Clone)] pub struct LinkDescription { /// Current round trip time estimate for this link pub round_trip_time: Duration, } /// Collects all information about one link on one node /// Links that are owned by NodeTable should remain owned (mutable references should not be given /// out) #[derive(Debug)] pub struct Link { /// Destination node for this link pub to: NodeId, /// Description of this link pub desc: LinkDescription, } /// Table of all nodes (and links between them) known to an instance struct NodeTable { root_node: NodeId, nodes: BTreeMap<NodeId, Node>, version: u64, wake_on_version_change: Option<Waker>, } impl NodeTable { /// Create a new node table rooted at `root_node` pub fn new(root_node: NodeId) -> NodeTable { NodeTable { root_node, nodes: BTreeMap::new(), version: 0, wake_on_version_change: None } } fn poll_new_version(&mut self, ctx: &mut Context<'_>, last_version: &mut u64) -> Poll<()> { if *last_version == self.version { self.wake_on_version_change = Some(ctx.waker().clone()); Poll::Pending } else { *last_version = self.version; Poll::Ready(()) } } fn get_or_create_node_mut(&mut self, node_id: NodeId) -> &mut Node { self.nodes.entry(node_id).or_insert_with(|| Node { links: BTreeMap::new() }) } /// Update a single link on a node. fn update_link( &mut self, from: NodeId, to: NodeId, link_id: NodeLinkId, desc: LinkDescription, ) -> Result<(), Error>
fn update_links(&mut self, from: NodeId, links: Vec<LinkStatus>) -> Result<(), Error> { self.get_or_create_node_mut(from).links.clear(); for LinkStatus { to, local_id, round_trip_time } in links.into_iter() { self.update_link(from, to, local_id, LinkDescription { round_trip_time })?; } self.version += 1; self.wake_on_version_change.take().map(|w| w.wake()); Ok(()) } /// Build a routing table for our node based on current link data fn build_routes(&self) -> impl Iterator<Item = (NodeId, NodeLinkId)> { let mut todo = BinaryHeap::new(); log::trace!("{:?} BUILD ROUTES: {:?}", self.root_node, self.nodes); let mut progress = BTreeMap::<NodeId, NodeProgress>::new(); for (link_id, link) in self.nodes.get(&self.root_node).unwrap().links.iter() { if link.to == self.root_node { continue; } todo.push(link.to); let new_progress = NodeProgress { round_trip_time: link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: *link_id, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; } }) .or_insert_with(|| new_progress); } log::trace!("BUILD START: progress={:?} todo={:?}", progress, todo); while let Some(from) = todo.pop() { log::trace!("STEP {:?}: progress={:?} todo={:?}", from, progress, todo); let progress_from = progress.get(&from).unwrap().clone(); for (_, link) in self.nodes.get(&from).unwrap().links.iter() { if link.to == self.root_node { continue; } let new_progress = NodeProgress { round_trip_time: progress_from.round_trip_time + link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: progress_from.outgoing_link, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; todo.push(link.to); } }) .or_insert_with(|| { todo.push(link.to); new_progress }); } } log::trace!("DONE: progress={:?} todo={:?}", progress, todo); progress .into_iter() .map(|(node_id, NodeProgress { outgoing_link: link_id, .. })| (node_id, link_id)) } } #[derive(Debug)] pub(crate) struct RemoteRoutingUpdate { pub(crate) from_node_id: NodeId, pub(crate) status: Vec<LinkStatus>, } pub(crate) type RemoteRoutingUpdateSender = futures::channel::mpsc::Sender<RemoteRoutingUpdate>; pub(crate) type RemoteRoutingUpdateReceiver = futures::channel::mpsc::Receiver<RemoteRoutingUpdate>; pub(crate) fn routing_update_channel() -> (RemoteRoutingUpdateSender, RemoteRoutingUpdateReceiver) { futures::channel::mpsc::channel(1) } pub(crate) async fn run_route_planner( router: &Weak<Router>, mut remote_updates: RemoteRoutingUpdateReceiver, mut local_updates: Observer<Vec<LinkStatus>>, ) -> Result<(), Error> { let get_router = move || Weak::upgrade(router).ok_or_else(|| format_err!("router gone")); let node_table = Arc::new(Mutex::new(NodeTable::new(get_router()?.node_id()))); let remote_node_table = node_table.clone(); let local_node_table = node_table.clone(); let update_node_table = node_table; let _: ((), (), ()) = futures::future::try_join3( async move { while let Some(RemoteRoutingUpdate { from_node_id, status }) = remote_updates.next().await { let mut node_table = remote_node_table.lock().await; if from_node_id == node_table.root_node { log::warn!("Attempt to update own node id links as remote"); continue; } if let Err(e) = node_table.update_links(from_node_id, status) { log::warn!("Update remote links from {:?} failed: {:?}", from_node_id, e); continue; } } Ok::<_, Error>(()) }, async move { while let Some(status) = local_updates.next().await { let mut node_table = local_node_table.lock().await; let root_node = node_table.root_node; if let Err(e) = node_table.update_links(root_node, status) { log::warn!("Update local links failed: {:?}", e); continue; } } Ok(()) }, async move { let mut pm = PollMutex::new(&*update_node_table); let mut current_version = 0; let mut poll_version = move |ctx: &mut Context<'_>| { let mut node_table = ready!(pm.poll(ctx)); ready!(node_table.poll_new_version(ctx, &mut current_version)); Poll::Ready(node_table) }; loop { let node_table = poll_fn(&mut poll_version).await; get_router()?.update_routes(node_table.build_routes(), "new_routes").await?; drop(node_table); Timer::new(Duration::from_millis(100)).await; } }, ) .await?; Ok(()) } #[cfg(test)] mod test { use super::*; use arbitrary::{Arbitrary, Unstructured}; use rand::Rng; use std::collections::HashMap; use std::time::Instant; fn remove_item<T: Eq>(value: &T, from: &mut Vec<T>) -> bool { let len = from.len(); for i in 0..len { if from[i] == *value { from.remove(i); return true; } } return false; } fn construct_node_table_from_links(links: &[(u64, u64, u64, u64)]) -> NodeTable { let mut node_table = NodeTable::new(1.into()); for (from, to, link_id, rtt) in links { node_table .update_link( (*from).into(), (*to).into(), (*link_id).into(), LinkDescription { round_trip_time: Duration::from_millis(*rtt) }, ) .unwrap(); } node_table } fn is_outcome(mut got: Vec<(NodeId, NodeLinkId)>, outcome: &[(u64, u64)]) -> bool { let mut result = true; for (node_id, link_id) in outcome { if !remove_item(&((*node_id).into(), (*link_id).into()), &mut got) { log::trace!("Expected outcome not found: {}#{}", node_id, link_id); result = false; } } for (node_id, link_id) in got { log::trace!("Unexpected outcome: {}#{}", node_id.0, link_id.0); result = false; } result } fn builds_route_ok(links: &[(u64, u64, u64, u64)], outcome: &[(u64, u64)]) -> bool { log::trace!("TEST: {:?} --> {:?}", links, outcome); let node_table = construct_node_table_from_links(links); let built: Vec<(NodeId, NodeLinkId)> = node_table.build_routes().collect(); let r = is_outcome(built.clone(), outcome); if !r { log::trace!("NODE_TABLE: {:?}", node_table.nodes); log::trace!("BUILT: {:?}", built); } r } #[test] fn test_build_routes() { crate::test_util::init(); assert!(builds_route_ok(&[(1, 2, 1, 10), (2, 1, 123, 5)], &[(2, 1)])); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 10), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 2)] )); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 1000), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 1)] )); } #[derive(Arbitrary, Debug, Clone, Copy)] struct DoesntFormLoops { a_to_b: u64, b_to_a: u64, a_to_c: u64, c_to_a: u64, } fn verify_no_loops(config: DoesntFormLoops) { // With node configuration: // B(2) - A(1) - C(3) // Verify that routes from A to B do not point at C // and that routes from A to C do not point at B println!("{:?}", config); let built: HashMap<NodeId, NodeLinkId> = construct_node_table_from_links(&[ (1, 2, 100, config.a_to_b), (2, 1, 200, config.b_to_a), (1, 3, 300, config.a_to_c), (3, 1, 400, config.c_to_a), ]) .build_routes() .collect(); assert_eq!(built.get(&2.into()), Some(&100.into())); assert_eq!(built.get(&3.into()), Some(&300.into())); } #[test] fn no_loops() { crate::test_util::init(); let start = Instant::now(); while Instant::now() - start < Duration::from_secs(1) { let mut random_junk = [0u8; 64]; rand::thread_rng().fill(&mut random_junk); verify_no_loops(Arbitrary::arbitrary(&mut Unstructured::new(&random_junk)).unwrap()); } } }
{ log::trace!( "{:?} update_link: from:{:?} to:{:?} link_id:{:?} desc:{:?}", self.root_node, from, to, link_id, desc ); if from == to { bail!("Circular link seen"); } self.get_or_create_node_mut(to); self.get_or_create_node_mut(from).links.insert(link_id, Link { to, desc }); Ok(()) }
identifier_body
route_planner.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ future_help::{Observer, PollMutex}, labels::{NodeId, NodeLinkId}, link::LinkStatus, router::Router, }; use anyhow::{bail, format_err, Error}; use fuchsia_async::Timer; use futures::{future::poll_fn, lock::Mutex, prelude::*, ready}; use std::{ collections::{BTreeMap, BinaryHeap}, sync::{Arc, Weak}, task::{Context, Poll, Waker}, time::Duration, }; /// Assumed forwarding time through a node. /// This is a temporary hack to alleviate some bad route selection. const FORWARDING_TIME: Duration = Duration::from_millis(100); /// Collects all information about a node in one place #[derive(Debug)] struct Node { links: BTreeMap<NodeLinkId, Link>, } /// During pathfinding, collects the shortest path so far to a node #[derive(Debug, Clone, Copy)] struct NodeProgress { round_trip_time: Duration, outgoing_link: NodeLinkId, } /// Describes the state of a link #[derive(Debug, Clone)] pub struct LinkDescription { /// Current round trip time estimate for this link pub round_trip_time: Duration, } /// Collects all information about one link on one node /// Links that are owned by NodeTable should remain owned (mutable references should not be given /// out) #[derive(Debug)] pub struct Link { /// Destination node for this link pub to: NodeId, /// Description of this link pub desc: LinkDescription, } /// Table of all nodes (and links between them) known to an instance struct NodeTable { root_node: NodeId, nodes: BTreeMap<NodeId, Node>, version: u64, wake_on_version_change: Option<Waker>, } impl NodeTable { /// Create a new node table rooted at `root_node` pub fn new(root_node: NodeId) -> NodeTable { NodeTable { root_node, nodes: BTreeMap::new(), version: 0, wake_on_version_change: None } } fn poll_new_version(&mut self, ctx: &mut Context<'_>, last_version: &mut u64) -> Poll<()> { if *last_version == self.version { self.wake_on_version_change = Some(ctx.waker().clone()); Poll::Pending } else { *last_version = self.version; Poll::Ready(()) } } fn get_or_create_node_mut(&mut self, node_id: NodeId) -> &mut Node { self.nodes.entry(node_id).or_insert_with(|| Node { links: BTreeMap::new() }) } /// Update a single link on a node. fn
( &mut self, from: NodeId, to: NodeId, link_id: NodeLinkId, desc: LinkDescription, ) -> Result<(), Error> { log::trace!( "{:?} update_link: from:{:?} to:{:?} link_id:{:?} desc:{:?}", self.root_node, from, to, link_id, desc ); if from == to { bail!("Circular link seen"); } self.get_or_create_node_mut(to); self.get_or_create_node_mut(from).links.insert(link_id, Link { to, desc }); Ok(()) } fn update_links(&mut self, from: NodeId, links: Vec<LinkStatus>) -> Result<(), Error> { self.get_or_create_node_mut(from).links.clear(); for LinkStatus { to, local_id, round_trip_time } in links.into_iter() { self.update_link(from, to, local_id, LinkDescription { round_trip_time })?; } self.version += 1; self.wake_on_version_change.take().map(|w| w.wake()); Ok(()) } /// Build a routing table for our node based on current link data fn build_routes(&self) -> impl Iterator<Item = (NodeId, NodeLinkId)> { let mut todo = BinaryHeap::new(); log::trace!("{:?} BUILD ROUTES: {:?}", self.root_node, self.nodes); let mut progress = BTreeMap::<NodeId, NodeProgress>::new(); for (link_id, link) in self.nodes.get(&self.root_node).unwrap().links.iter() { if link.to == self.root_node { continue; } todo.push(link.to); let new_progress = NodeProgress { round_trip_time: link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: *link_id, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; } }) .or_insert_with(|| new_progress); } log::trace!("BUILD START: progress={:?} todo={:?}", progress, todo); while let Some(from) = todo.pop() { log::trace!("STEP {:?}: progress={:?} todo={:?}", from, progress, todo); let progress_from = progress.get(&from).unwrap().clone(); for (_, link) in self.nodes.get(&from).unwrap().links.iter() { if link.to == self.root_node { continue; } let new_progress = NodeProgress { round_trip_time: progress_from.round_trip_time + link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: progress_from.outgoing_link, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; todo.push(link.to); } }) .or_insert_with(|| { todo.push(link.to); new_progress }); } } log::trace!("DONE: progress={:?} todo={:?}", progress, todo); progress .into_iter() .map(|(node_id, NodeProgress { outgoing_link: link_id, .. })| (node_id, link_id)) } } #[derive(Debug)] pub(crate) struct RemoteRoutingUpdate { pub(crate) from_node_id: NodeId, pub(crate) status: Vec<LinkStatus>, } pub(crate) type RemoteRoutingUpdateSender = futures::channel::mpsc::Sender<RemoteRoutingUpdate>; pub(crate) type RemoteRoutingUpdateReceiver = futures::channel::mpsc::Receiver<RemoteRoutingUpdate>; pub(crate) fn routing_update_channel() -> (RemoteRoutingUpdateSender, RemoteRoutingUpdateReceiver) { futures::channel::mpsc::channel(1) } pub(crate) async fn run_route_planner( router: &Weak<Router>, mut remote_updates: RemoteRoutingUpdateReceiver, mut local_updates: Observer<Vec<LinkStatus>>, ) -> Result<(), Error> { let get_router = move || Weak::upgrade(router).ok_or_else(|| format_err!("router gone")); let node_table = Arc::new(Mutex::new(NodeTable::new(get_router()?.node_id()))); let remote_node_table = node_table.clone(); let local_node_table = node_table.clone(); let update_node_table = node_table; let _: ((), (), ()) = futures::future::try_join3( async move { while let Some(RemoteRoutingUpdate { from_node_id, status }) = remote_updates.next().await { let mut node_table = remote_node_table.lock().await; if from_node_id == node_table.root_node { log::warn!("Attempt to update own node id links as remote"); continue; } if let Err(e) = node_table.update_links(from_node_id, status) { log::warn!("Update remote links from {:?} failed: {:?}", from_node_id, e); continue; } } Ok::<_, Error>(()) }, async move { while let Some(status) = local_updates.next().await { let mut node_table = local_node_table.lock().await; let root_node = node_table.root_node; if let Err(e) = node_table.update_links(root_node, status) { log::warn!("Update local links failed: {:?}", e); continue; } } Ok(()) }, async move { let mut pm = PollMutex::new(&*update_node_table); let mut current_version = 0; let mut poll_version = move |ctx: &mut Context<'_>| { let mut node_table = ready!(pm.poll(ctx)); ready!(node_table.poll_new_version(ctx, &mut current_version)); Poll::Ready(node_table) }; loop { let node_table = poll_fn(&mut poll_version).await; get_router()?.update_routes(node_table.build_routes(), "new_routes").await?; drop(node_table); Timer::new(Duration::from_millis(100)).await; } }, ) .await?; Ok(()) } #[cfg(test)] mod test { use super::*; use arbitrary::{Arbitrary, Unstructured}; use rand::Rng; use std::collections::HashMap; use std::time::Instant; fn remove_item<T: Eq>(value: &T, from: &mut Vec<T>) -> bool { let len = from.len(); for i in 0..len { if from[i] == *value { from.remove(i); return true; } } return false; } fn construct_node_table_from_links(links: &[(u64, u64, u64, u64)]) -> NodeTable { let mut node_table = NodeTable::new(1.into()); for (from, to, link_id, rtt) in links { node_table .update_link( (*from).into(), (*to).into(), (*link_id).into(), LinkDescription { round_trip_time: Duration::from_millis(*rtt) }, ) .unwrap(); } node_table } fn is_outcome(mut got: Vec<(NodeId, NodeLinkId)>, outcome: &[(u64, u64)]) -> bool { let mut result = true; for (node_id, link_id) in outcome { if !remove_item(&((*node_id).into(), (*link_id).into()), &mut got) { log::trace!("Expected outcome not found: {}#{}", node_id, link_id); result = false; } } for (node_id, link_id) in got { log::trace!("Unexpected outcome: {}#{}", node_id.0, link_id.0); result = false; } result } fn builds_route_ok(links: &[(u64, u64, u64, u64)], outcome: &[(u64, u64)]) -> bool { log::trace!("TEST: {:?} --> {:?}", links, outcome); let node_table = construct_node_table_from_links(links); let built: Vec<(NodeId, NodeLinkId)> = node_table.build_routes().collect(); let r = is_outcome(built.clone(), outcome); if !r { log::trace!("NODE_TABLE: {:?}", node_table.nodes); log::trace!("BUILT: {:?}", built); } r } #[test] fn test_build_routes() { crate::test_util::init(); assert!(builds_route_ok(&[(1, 2, 1, 10), (2, 1, 123, 5)], &[(2, 1)])); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 10), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 2)] )); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 1000), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 1)] )); } #[derive(Arbitrary, Debug, Clone, Copy)] struct DoesntFormLoops { a_to_b: u64, b_to_a: u64, a_to_c: u64, c_to_a: u64, } fn verify_no_loops(config: DoesntFormLoops) { // With node configuration: // B(2) - A(1) - C(3) // Verify that routes from A to B do not point at C // and that routes from A to C do not point at B println!("{:?}", config); let built: HashMap<NodeId, NodeLinkId> = construct_node_table_from_links(&[ (1, 2, 100, config.a_to_b), (2, 1, 200, config.b_to_a), (1, 3, 300, config.a_to_c), (3, 1, 400, config.c_to_a), ]) .build_routes() .collect(); assert_eq!(built.get(&2.into()), Some(&100.into())); assert_eq!(built.get(&3.into()), Some(&300.into())); } #[test] fn no_loops() { crate::test_util::init(); let start = Instant::now(); while Instant::now() - start < Duration::from_secs(1) { let mut random_junk = [0u8; 64]; rand::thread_rng().fill(&mut random_junk); verify_no_loops(Arbitrary::arbitrary(&mut Unstructured::new(&random_junk)).unwrap()); } } }
update_link
identifier_name
flappy_bird.js
// get canvas const canvas = document.getElementById('canvas'); // get context 2d const ctx = canvas.getContext('2d'); // load sprite.png const sprite = new Image(); sprite.src = 'image.png'; // we need to keep track of the number of frames let frames = 0; // to rotate the bird, the ctx.rotation requires radians. So we need to convert the degrees to radians function toRadian(degree) { return degree * Math.PI / 180; } // draw clear canvas function drawCanvas(){ ctx.fillStyle = 'lightblue'; ctx.fillRect(0, 0, canvas.width, canvas.height); } // track the current gamestate const gameState = { current: 0, getReady: 0, playing: 1, gameOver: 2 } // get the real mouse position function mousePos(canvas, e) { let rect = canvas.getBoundingClientRect(); let scaleX = canvas.width / rect.width; let scaleY = canvas.height / rect.height; return { x: (e.clientX - rect.left) * scaleX, y: (e.clientY - rect.top) * scaleY } } // get the real touch position function touchPos(canvas, e) { let rect = canvas.getBoundingClientRect(); let scaleX = canvas.width / rect.width; let scaleY = canvas.height / rect.height; return { x: (e.touches[0].clientX - rect.left) * scaleX, y: (e.touches[0].clientY - rect.top) * scaleY } } function changeGameState(e) { e.preventDefault(); if (e.type === 'touchstart') { switch (gameState.current) { case gameState.getReady: gameState.current = gameState.playing; break; case gameState.playing: bird.fly(); break; case gameState.gameOver: // if the click is inside the start Button, restart the game let position = touchPos(canvas, e); if (position.x > startButton.x && position.x < startButton.x + startButton.w && position.y > startButton.y && position.y < startButton.y + startButton.h) { pipes.reset(); score.reset(); gameState.current = gameState.getReady; } break; } } else if (e.type === 'click') { switch (gameState.current) { case gameState.getReady: gameState.current = gameState.playing; break; case gameState.playing: bird.fly(); break; case gameState.gameOver: // if the click is inside the start Button, restart the game let position = mousePos(canvas, e); if (position.x > startButton.x && position.x < startButton.x + startButton.w && position.y > startButton.y && position.y < startButton.y + startButton.h) { pipes.reset(); score.reset(); gameState.current = gameState.getReady; } break; } } } // start button const startButton = { x: 120, y: 275, w: 80, h: 25 } // add click event listener to switch the gamestate and play the game canvas.addEventListener('click', changeGameState); canvas.addEventListener('touchstart', changeGameState); // draw background const background = { sX: 0, sY: 0, w: 275, h: 226, x: 0, y: canvas.height - 226, draw() { // draw background twice because the image does not fit the entire canvas ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x + this.w, this.y, this.w, this.h); } } // draw foreground const foreground = { sX: 276, sY: 0, w: 224, h: 112, x: 0, y: canvas.height - 112, dx: 2, update() { // if the game is in playing mode than move the foreground if (gameState.current === gameState.playing) { // when its reaches half of the width, it resets so that the foreground doesn't leave the canvas this.x = (this.x - this.dx) % (this.w / 2); } }, draw() { // draw foreground twice because the image does not fit the entire canvas ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x + this.w, this.y, this.w, this.h); } } // draw bird const bird = { animation: [ {sX: 276, sY: 112}, {sX: 276, sY: 139}, {sX: 276, sY: 164}, {sX: 276, sY: 139} ], x: 70, y: 120, w: 34, h: 26, frame: 0, rotation: 0, gravity: 0.2, speed: 0, flight: 5, radius: 12, fly() { this.speed = -this.flight; }, update() { let birdImg = this.animation[this.frame]; // bird changes animation every 5 frames this.frame += frames % 10 === 0 ? 1 : 0; this.frame = this.frame % this.animation.length; if (gameState.current === gameState.getReady) { this.y = 120; this.rotation = 0; this.speed = 0; } else { // bird falls because of gravity and the more it falls the faster it falls this.speed += this.gravity; this.y += this.speed; // if the bird touches the ground it's game over if (this.y + this.h / 2 >= foreground.y) { // bird rests in the ground this.y = foreground.y - this.h / 2; // and no longer flaps its wings this.frame = 1; gameState.current = gameState.gameOver; } if (this.speed >= this.flight) { this.rotation = toRadian(90); } else { this.rotation = toRadian(-20); } } }, draw() { let birdImg = this.animation[this.frame]; // rotate the bird ctx.save(); ctx.translate(this.x, this.y); ctx.rotate(this.rotation); // coordinates center in bird image rather than up-left corner ctx.drawImage(sprite, birdImg.sX, birdImg.sY, this.w, this.h, - this.w / 2, - this.h / 2, this.w, this.h); ctx.restore(); } } // draw pipes const pipes = { top: { sX: 553, sY: 0 }, bot: { sX: 502, sY: 0 }, w: 53, h: 400, gap: 120, dx: 2, total: [], max: -180, update() { // if the game is not in playing mode there is no pipe update if (gameState.current !== gameState.playing) return; // every 100 frames create a new pipe if (frames % 100 === 0) { this.total.push({ x: canvas.width, y: this.max * (Math.random() + 1) }); } // loop through the total array (holding all the pipe coordinates) for (let i = 0; i < this.total.length; i++) { let pipe = this.total[i]; // update its x position, so the pipes move to the left pipe.x -= this.dx; // when the pipe gets out out the canvas remove it if (pipe.x + this.w < 0) { // the first pipe getting out of the canvas will be the first element in the total array this.total.shift(); } if (bird.x === pipe.x + this.w + 1) { // when the bird goes through the pipe the score increases score.value += 1; score.best = Math.max(score.best, score.value); localStorage.setItem('flappy_bird_best_score', score.best); } // check if bird collides with top pipe if (bird.x + bird.radius > pipe.x && bird.x - bird.radius < pipe.x + this.w && bird.y + bird.radius > pipe.y && bird.y - bird.radius < pipe.y + this.h) { gameState.current = gameState.gameOver; } // check if bird collides with bot pipe if (bird.x + bird.radius > pipe.x && bird.x - bird.radius < pipe.x + this.w && bird.y + bird.radius > pipe.y + this.h + this.gap && bird.y - bird.radius < pipe.y + this.h + this.gap + this.gap) { gameState.current = gameState.gameOver; } } }, draw() { for (let i = 0; i < this.total.length; i++) { // draw the top pipe ctx.drawImage(sprite, this.top.sX, this.top.sY, this.w, this.h, this.total[i].x, this.total[i].y, this.w, this.h); // based on the top pipe position, draw the bot pipe ctx.drawImage(sprite, this.bot.sX, this.bot.sY, this.w, this.h, this.total[i].x, this.total[i].y + this.h + this.gap, this.w, this.h); } }, reset() { this.total = []; } } // draw Get Ready Image const getReady = { sX: 0, sY: 228, w: 173, h: 152, x: canvas.width / 2 - 173 / 2, y: 60, draw() { if (gameState.current === gameState.getReady) { ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); } } } // draw game over Image const gameOver = { sX: 175, sY: 228, w: 225, h: 202, x: canvas.width / 2 - 225 / 2, y: 100, draw() { if (gameState.current === gameState.gameOver) { ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); } } } // draw score const score = { value: 0, best: parseInt(localStorage.getItem('flappy_bird_best_score')) || 0, draw() { ctx.fillStyle = 'white'; ctx.strokeStyle = 'black'; if (gameState.current === gameState.playing) { ctx.font = '50px Permanent Marker'; ctx.lineWidth = 3; ctx.fillText(this.value, canvas.width / 2 - 10, 60); ctx.strokeText(this.value, canvas.width / 2 - 10, 60); } else if (gameState.current === gameState.gameOver) { ctx.font = '25px Permanent Marker'; ctx.lineWidth = 2; // for the score ctx.fillText(this.value, 225, 195); ctx.strokeText(this.value, 225, 195); // for the best score ctx.fillText(this.best, 225, 240); ctx.strokeText(this.best, 225, 240); } }, reset() { this.value = 0; } } // draw medals const medals = { type : [ {sX: 450, sY: 120}, // no medal {sX: 310, sY: 110}, // platina medal {sX: 358, sY: 110}, // silver medal {sX: 310, sY: 158}, // gold medal {sX: 358, sY: 158} // bronze medal ], w: 48, h: 48, x: 70, y: 185, current: 0, draw() { if (gameState.current === gameState.gameOver) { // select the medal let medal = this.type[this.current]; if (score.value > 15) { this.current = 1; // platina medal } else if (score.value > 10) { this.current = 3; // gold medal } else if (score.value > 5) { this.current = 2; // silver medal } else if (score.value > 2) { this.current = 4; // bronze medal } else { this.current = 0; // no medal } // draw the the selected medal ctx.drawImage(sprite, medal.sX, medal.sY, this.w, this.h, this.x, this.y, this.w, this.h); } } } function update() { bird.update(); foreground.update(); pipes.update(); } function draw() { drawCanvas(); background.draw(); pipes.draw(); foreground.draw(); bird.draw(); getReady.draw(); gameOver.draw(); score.draw(); medals.draw(); } function
() { update(); draw(); frames++; } setInterval(loop, 12);
loop
identifier_name
flappy_bird.js
// get canvas const canvas = document.getElementById('canvas'); // get context 2d const ctx = canvas.getContext('2d'); // load sprite.png const sprite = new Image(); sprite.src = 'image.png'; // we need to keep track of the number of frames let frames = 0; // to rotate the bird, the ctx.rotation requires radians. So we need to convert the degrees to radians function toRadian(degree) { return degree * Math.PI / 180; } // draw clear canvas function drawCanvas(){ ctx.fillStyle = 'lightblue'; ctx.fillRect(0, 0, canvas.width, canvas.height); } // track the current gamestate const gameState = { current: 0, getReady: 0, playing: 1, gameOver: 2 } // get the real mouse position function mousePos(canvas, e) { let rect = canvas.getBoundingClientRect(); let scaleX = canvas.width / rect.width; let scaleY = canvas.height / rect.height; return { x: (e.clientX - rect.left) * scaleX, y: (e.clientY - rect.top) * scaleY } } // get the real touch position function touchPos(canvas, e) { let rect = canvas.getBoundingClientRect(); let scaleX = canvas.width / rect.width; let scaleY = canvas.height / rect.height; return { x: (e.touches[0].clientX - rect.left) * scaleX, y: (e.touches[0].clientY - rect.top) * scaleY } } function changeGameState(e) { e.preventDefault(); if (e.type === 'touchstart') { switch (gameState.current) { case gameState.getReady: gameState.current = gameState.playing; break; case gameState.playing: bird.fly(); break; case gameState.gameOver: // if the click is inside the start Button, restart the game let position = touchPos(canvas, e); if (position.x > startButton.x && position.x < startButton.x + startButton.w && position.y > startButton.y && position.y < startButton.y + startButton.h) { pipes.reset(); score.reset(); gameState.current = gameState.getReady; } break; } } else if (e.type === 'click') { switch (gameState.current) { case gameState.getReady: gameState.current = gameState.playing; break; case gameState.playing: bird.fly(); break; case gameState.gameOver: // if the click is inside the start Button, restart the game let position = mousePos(canvas, e); if (position.x > startButton.x && position.x < startButton.x + startButton.w && position.y > startButton.y && position.y < startButton.y + startButton.h) { pipes.reset(); score.reset(); gameState.current = gameState.getReady; } break; } } } // start button const startButton = { x: 120, y: 275, w: 80, h: 25 } // add click event listener to switch the gamestate and play the game canvas.addEventListener('click', changeGameState); canvas.addEventListener('touchstart', changeGameState); // draw background const background = { sX: 0, sY: 0, w: 275, h: 226, x: 0, y: canvas.height - 226, draw() { // draw background twice because the image does not fit the entire canvas ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x + this.w, this.y, this.w, this.h); } } // draw foreground const foreground = { sX: 276, sY: 0, w: 224, h: 112, x: 0, y: canvas.height - 112, dx: 2, update() { // if the game is in playing mode than move the foreground if (gameState.current === gameState.playing) { // when its reaches half of the width, it resets so that the foreground doesn't leave the canvas this.x = (this.x - this.dx) % (this.w / 2); } }, draw() { // draw foreground twice because the image does not fit the entire canvas ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x + this.w, this.y, this.w, this.h); } } // draw bird const bird = { animation: [ {sX: 276, sY: 112}, {sX: 276, sY: 139}, {sX: 276, sY: 164}, {sX: 276, sY: 139} ], x: 70, y: 120, w: 34, h: 26, frame: 0, rotation: 0, gravity: 0.2, speed: 0, flight: 5, radius: 12, fly() { this.speed = -this.flight; }, update() { let birdImg = this.animation[this.frame]; // bird changes animation every 5 frames this.frame += frames % 10 === 0 ? 1 : 0; this.frame = this.frame % this.animation.length; if (gameState.current === gameState.getReady) { this.y = 120; this.rotation = 0; this.speed = 0; } else { // bird falls because of gravity and the more it falls the faster it falls this.speed += this.gravity; this.y += this.speed; // if the bird touches the ground it's game over if (this.y + this.h / 2 >= foreground.y) { // bird rests in the ground this.y = foreground.y - this.h / 2; // and no longer flaps its wings this.frame = 1; gameState.current = gameState.gameOver; } if (this.speed >= this.flight) { this.rotation = toRadian(90); } else {
this.rotation = toRadian(-20); } } }, draw() { let birdImg = this.animation[this.frame]; // rotate the bird ctx.save(); ctx.translate(this.x, this.y); ctx.rotate(this.rotation); // coordinates center in bird image rather than up-left corner ctx.drawImage(sprite, birdImg.sX, birdImg.sY, this.w, this.h, - this.w / 2, - this.h / 2, this.w, this.h); ctx.restore(); } } // draw pipes const pipes = { top: { sX: 553, sY: 0 }, bot: { sX: 502, sY: 0 }, w: 53, h: 400, gap: 120, dx: 2, total: [], max: -180, update() { // if the game is not in playing mode there is no pipe update if (gameState.current !== gameState.playing) return; // every 100 frames create a new pipe if (frames % 100 === 0) { this.total.push({ x: canvas.width, y: this.max * (Math.random() + 1) }); } // loop through the total array (holding all the pipe coordinates) for (let i = 0; i < this.total.length; i++) { let pipe = this.total[i]; // update its x position, so the pipes move to the left pipe.x -= this.dx; // when the pipe gets out out the canvas remove it if (pipe.x + this.w < 0) { // the first pipe getting out of the canvas will be the first element in the total array this.total.shift(); } if (bird.x === pipe.x + this.w + 1) { // when the bird goes through the pipe the score increases score.value += 1; score.best = Math.max(score.best, score.value); localStorage.setItem('flappy_bird_best_score', score.best); } // check if bird collides with top pipe if (bird.x + bird.radius > pipe.x && bird.x - bird.radius < pipe.x + this.w && bird.y + bird.radius > pipe.y && bird.y - bird.radius < pipe.y + this.h) { gameState.current = gameState.gameOver; } // check if bird collides with bot pipe if (bird.x + bird.radius > pipe.x && bird.x - bird.radius < pipe.x + this.w && bird.y + bird.radius > pipe.y + this.h + this.gap && bird.y - bird.radius < pipe.y + this.h + this.gap + this.gap) { gameState.current = gameState.gameOver; } } }, draw() { for (let i = 0; i < this.total.length; i++) { // draw the top pipe ctx.drawImage(sprite, this.top.sX, this.top.sY, this.w, this.h, this.total[i].x, this.total[i].y, this.w, this.h); // based on the top pipe position, draw the bot pipe ctx.drawImage(sprite, this.bot.sX, this.bot.sY, this.w, this.h, this.total[i].x, this.total[i].y + this.h + this.gap, this.w, this.h); } }, reset() { this.total = []; } } // draw Get Ready Image const getReady = { sX: 0, sY: 228, w: 173, h: 152, x: canvas.width / 2 - 173 / 2, y: 60, draw() { if (gameState.current === gameState.getReady) { ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); } } } // draw game over Image const gameOver = { sX: 175, sY: 228, w: 225, h: 202, x: canvas.width / 2 - 225 / 2, y: 100, draw() { if (gameState.current === gameState.gameOver) { ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); } } } // draw score const score = { value: 0, best: parseInt(localStorage.getItem('flappy_bird_best_score')) || 0, draw() { ctx.fillStyle = 'white'; ctx.strokeStyle = 'black'; if (gameState.current === gameState.playing) { ctx.font = '50px Permanent Marker'; ctx.lineWidth = 3; ctx.fillText(this.value, canvas.width / 2 - 10, 60); ctx.strokeText(this.value, canvas.width / 2 - 10, 60); } else if (gameState.current === gameState.gameOver) { ctx.font = '25px Permanent Marker'; ctx.lineWidth = 2; // for the score ctx.fillText(this.value, 225, 195); ctx.strokeText(this.value, 225, 195); // for the best score ctx.fillText(this.best, 225, 240); ctx.strokeText(this.best, 225, 240); } }, reset() { this.value = 0; } } // draw medals const medals = { type : [ {sX: 450, sY: 120}, // no medal {sX: 310, sY: 110}, // platina medal {sX: 358, sY: 110}, // silver medal {sX: 310, sY: 158}, // gold medal {sX: 358, sY: 158} // bronze medal ], w: 48, h: 48, x: 70, y: 185, current: 0, draw() { if (gameState.current === gameState.gameOver) { // select the medal let medal = this.type[this.current]; if (score.value > 15) { this.current = 1; // platina medal } else if (score.value > 10) { this.current = 3; // gold medal } else if (score.value > 5) { this.current = 2; // silver medal } else if (score.value > 2) { this.current = 4; // bronze medal } else { this.current = 0; // no medal } // draw the the selected medal ctx.drawImage(sprite, medal.sX, medal.sY, this.w, this.h, this.x, this.y, this.w, this.h); } } } function update() { bird.update(); foreground.update(); pipes.update(); } function draw() { drawCanvas(); background.draw(); pipes.draw(); foreground.draw(); bird.draw(); getReady.draw(); gameOver.draw(); score.draw(); medals.draw(); } function loop() { update(); draw(); frames++; } setInterval(loop, 12);
random_line_split
flappy_bird.js
// get canvas const canvas = document.getElementById('canvas'); // get context 2d const ctx = canvas.getContext('2d'); // load sprite.png const sprite = new Image(); sprite.src = 'image.png'; // we need to keep track of the number of frames let frames = 0; // to rotate the bird, the ctx.rotation requires radians. So we need to convert the degrees to radians function toRadian(degree) { return degree * Math.PI / 180; } // draw clear canvas function drawCanvas(){ ctx.fillStyle = 'lightblue'; ctx.fillRect(0, 0, canvas.width, canvas.height); } // track the current gamestate const gameState = { current: 0, getReady: 0, playing: 1, gameOver: 2 } // get the real mouse position function mousePos(canvas, e) { let rect = canvas.getBoundingClientRect(); let scaleX = canvas.width / rect.width; let scaleY = canvas.height / rect.height; return { x: (e.clientX - rect.left) * scaleX, y: (e.clientY - rect.top) * scaleY } } // get the real touch position function touchPos(canvas, e) { let rect = canvas.getBoundingClientRect(); let scaleX = canvas.width / rect.width; let scaleY = canvas.height / rect.height; return { x: (e.touches[0].clientX - rect.left) * scaleX, y: (e.touches[0].clientY - rect.top) * scaleY } } function changeGameState(e) { e.preventDefault(); if (e.type === 'touchstart') { switch (gameState.current) { case gameState.getReady: gameState.current = gameState.playing; break; case gameState.playing: bird.fly(); break; case gameState.gameOver: // if the click is inside the start Button, restart the game let position = touchPos(canvas, e); if (position.x > startButton.x && position.x < startButton.x + startButton.w && position.y > startButton.y && position.y < startButton.y + startButton.h) { pipes.reset(); score.reset(); gameState.current = gameState.getReady; } break; } } else if (e.type === 'click') { switch (gameState.current) { case gameState.getReady: gameState.current = gameState.playing; break; case gameState.playing: bird.fly(); break; case gameState.gameOver: // if the click is inside the start Button, restart the game let position = mousePos(canvas, e); if (position.x > startButton.x && position.x < startButton.x + startButton.w && position.y > startButton.y && position.y < startButton.y + startButton.h) { pipes.reset(); score.reset(); gameState.current = gameState.getReady; } break; } } } // start button const startButton = { x: 120, y: 275, w: 80, h: 25 } // add click event listener to switch the gamestate and play the game canvas.addEventListener('click', changeGameState); canvas.addEventListener('touchstart', changeGameState); // draw background const background = { sX: 0, sY: 0, w: 275, h: 226, x: 0, y: canvas.height - 226, draw() { // draw background twice because the image does not fit the entire canvas ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x + this.w, this.y, this.w, this.h); } } // draw foreground const foreground = { sX: 276, sY: 0, w: 224, h: 112, x: 0, y: canvas.height - 112, dx: 2, update() { // if the game is in playing mode than move the foreground if (gameState.current === gameState.playing) { // when its reaches half of the width, it resets so that the foreground doesn't leave the canvas this.x = (this.x - this.dx) % (this.w / 2); } }, draw() { // draw foreground twice because the image does not fit the entire canvas ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x + this.w, this.y, this.w, this.h); } } // draw bird const bird = { animation: [ {sX: 276, sY: 112}, {sX: 276, sY: 139}, {sX: 276, sY: 164}, {sX: 276, sY: 139} ], x: 70, y: 120, w: 34, h: 26, frame: 0, rotation: 0, gravity: 0.2, speed: 0, flight: 5, radius: 12, fly() { this.speed = -this.flight; }, update() { let birdImg = this.animation[this.frame]; // bird changes animation every 5 frames this.frame += frames % 10 === 0 ? 1 : 0; this.frame = this.frame % this.animation.length; if (gameState.current === gameState.getReady) { this.y = 120; this.rotation = 0; this.speed = 0; } else { // bird falls because of gravity and the more it falls the faster it falls this.speed += this.gravity; this.y += this.speed; // if the bird touches the ground it's game over if (this.y + this.h / 2 >= foreground.y) { // bird rests in the ground this.y = foreground.y - this.h / 2; // and no longer flaps its wings this.frame = 1; gameState.current = gameState.gameOver; } if (this.speed >= this.flight) { this.rotation = toRadian(90); } else { this.rotation = toRadian(-20); } } }, draw() { let birdImg = this.animation[this.frame]; // rotate the bird ctx.save(); ctx.translate(this.x, this.y); ctx.rotate(this.rotation); // coordinates center in bird image rather than up-left corner ctx.drawImage(sprite, birdImg.sX, birdImg.sY, this.w, this.h, - this.w / 2, - this.h / 2, this.w, this.h); ctx.restore(); } } // draw pipes const pipes = { top: { sX: 553, sY: 0 }, bot: { sX: 502, sY: 0 }, w: 53, h: 400, gap: 120, dx: 2, total: [], max: -180, update() { // if the game is not in playing mode there is no pipe update if (gameState.current !== gameState.playing) return; // every 100 frames create a new pipe if (frames % 100 === 0) { this.total.push({ x: canvas.width, y: this.max * (Math.random() + 1) }); } // loop through the total array (holding all the pipe coordinates) for (let i = 0; i < this.total.length; i++) { let pipe = this.total[i]; // update its x position, so the pipes move to the left pipe.x -= this.dx; // when the pipe gets out out the canvas remove it if (pipe.x + this.w < 0) { // the first pipe getting out of the canvas will be the first element in the total array this.total.shift(); } if (bird.x === pipe.x + this.w + 1) { // when the bird goes through the pipe the score increases score.value += 1; score.best = Math.max(score.best, score.value); localStorage.setItem('flappy_bird_best_score', score.best); } // check if bird collides with top pipe if (bird.x + bird.radius > pipe.x && bird.x - bird.radius < pipe.x + this.w && bird.y + bird.radius > pipe.y && bird.y - bird.radius < pipe.y + this.h) { gameState.current = gameState.gameOver; } // check if bird collides with bot pipe if (bird.x + bird.radius > pipe.x && bird.x - bird.radius < pipe.x + this.w && bird.y + bird.radius > pipe.y + this.h + this.gap && bird.y - bird.radius < pipe.y + this.h + this.gap + this.gap) { gameState.current = gameState.gameOver; } } }, draw() { for (let i = 0; i < this.total.length; i++) { // draw the top pipe ctx.drawImage(sprite, this.top.sX, this.top.sY, this.w, this.h, this.total[i].x, this.total[i].y, this.w, this.h); // based on the top pipe position, draw the bot pipe ctx.drawImage(sprite, this.bot.sX, this.bot.sY, this.w, this.h, this.total[i].x, this.total[i].y + this.h + this.gap, this.w, this.h); } }, reset() { this.total = []; } } // draw Get Ready Image const getReady = { sX: 0, sY: 228, w: 173, h: 152, x: canvas.width / 2 - 173 / 2, y: 60, draw()
} // draw game over Image const gameOver = { sX: 175, sY: 228, w: 225, h: 202, x: canvas.width / 2 - 225 / 2, y: 100, draw() { if (gameState.current === gameState.gameOver) { ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); } } } // draw score const score = { value: 0, best: parseInt(localStorage.getItem('flappy_bird_best_score')) || 0, draw() { ctx.fillStyle = 'white'; ctx.strokeStyle = 'black'; if (gameState.current === gameState.playing) { ctx.font = '50px Permanent Marker'; ctx.lineWidth = 3; ctx.fillText(this.value, canvas.width / 2 - 10, 60); ctx.strokeText(this.value, canvas.width / 2 - 10, 60); } else if (gameState.current === gameState.gameOver) { ctx.font = '25px Permanent Marker'; ctx.lineWidth = 2; // for the score ctx.fillText(this.value, 225, 195); ctx.strokeText(this.value, 225, 195); // for the best score ctx.fillText(this.best, 225, 240); ctx.strokeText(this.best, 225, 240); } }, reset() { this.value = 0; } } // draw medals const medals = { type : [ {sX: 450, sY: 120}, // no medal {sX: 310, sY: 110}, // platina medal {sX: 358, sY: 110}, // silver medal {sX: 310, sY: 158}, // gold medal {sX: 358, sY: 158} // bronze medal ], w: 48, h: 48, x: 70, y: 185, current: 0, draw() { if (gameState.current === gameState.gameOver) { // select the medal let medal = this.type[this.current]; if (score.value > 15) { this.current = 1; // platina medal } else if (score.value > 10) { this.current = 3; // gold medal } else if (score.value > 5) { this.current = 2; // silver medal } else if (score.value > 2) { this.current = 4; // bronze medal } else { this.current = 0; // no medal } // draw the the selected medal ctx.drawImage(sprite, medal.sX, medal.sY, this.w, this.h, this.x, this.y, this.w, this.h); } } } function update() { bird.update(); foreground.update(); pipes.update(); } function draw() { drawCanvas(); background.draw(); pipes.draw(); foreground.draw(); bird.draw(); getReady.draw(); gameOver.draw(); score.draw(); medals.draw(); } function loop() { update(); draw(); frames++; } setInterval(loop, 12);
{ if (gameState.current === gameState.getReady) { ctx.drawImage(sprite, this.sX, this.sY, this.w, this.h, this.x, this.y, this.w, this.h); } }
identifier_body
lib.rs
mod data; mod nn; use std::mem; use std::slice; //use std::os::raw::{/*c_double, c_int, */c_void}; // for js functions imports use once_cell::sync::Lazy; use std::sync::Mutex; // for lazy_static // for global variables use ndarray::prelude::*; use ndarray::{array, Array, Array1, Array3, Axis, Zip}; use data::Data; use nn::Network; #[derive(Default)] struct MetaData { fc_size: u32, num_classes: u32, descent_rate: f32, regular_rate: f32, } #[derive(Default)] struct CriticalSection(MetaData, Data, Network); // Imported js functions extern "C" { // for debug fn log_u64(num: u32); // for data pointer draw // x,y: the offset from upper left corner // label: a fractal which represents the position current label is in total // position range fn draw_point(x: u32, y: u32, label_ratio: f32); } static DATA: Lazy<Mutex<CriticalSection>> = Lazy::new(|| Mutex::default()); #[no_mangle] // This function returns the offset of the allocated buffer in wasm memory pub fn alloc(size: u32) -> *mut u8 { let mut buffer: Vec<u8> = Vec::with_capacity(size as usize); let buffer_ptr = buffer.as_mut_ptr(); mem::forget(buffer); buffer_ptr } #[no_mangle] pub fn free(buffer_ptr: *mut u8, size: u32) { let _ = unsafe { Vec::from_raw_parts(buffer_ptr, 0, size as usize) }; } #[no_mangle] pub fn init( data_radius: f32, data_spin_span: f32, data_num: u32, num_classes: u32, data_gen_rand_max: f32, network_gen_rand_max: f32, fc_size: u32, descent_rate: f32, regular_rate: f32, ) { // Thanks rust compiler :-/ let ref mut tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, network) = tmp; metadata.fc_size = fc_size; metadata.num_classes = num_classes; metadata.descent_rate = descent_rate; metadata.regular_rate = regular_rate; // Num of each data class is the same data.init( num_classes, data_num / num_classes, data_radius, data_spin_span, data_gen_rand_max, ); // Input of this network is two dimension points // output label is sparsed num_classes integer const PLANE_DIMENSION: u32 = 2; network.init(PLANE_DIMENSION, fc_size, num_classes, network_gen_rand_max); } #[no_mangle] pub fn train() -> f32 { let ref mut tmp = *DATA.lock().unwrap(); // Jesus, thats magic let CriticalSection(ref metadata, ref data, ref mut network) = *tmp; let regular_rate = metadata.regular_rate; let descent_rate = metadata.descent_rate; let (fc_layer, softmax) = network.forward_propagation(&data.points); let (dw1, db1, dw2, db2) = network.back_propagation( &data.points, &fc_layer, &softmax, &data.labels, regular_rate, ); let loss = network.loss(&softmax, &data.labels, regular_rate); network.descent(&dw1, &db1, &dw2, &db2, descent_rate); let (data_loss, regular_loss) = loss; data_loss + regular_loss } // Plot classified backgroud to canvas // span_least The least span of area should be drawn to canvas(because usually the canvas is not square) #[no_mangle] pub fn draw_prediction(canvas: *mut u8, width: u32, height: u32, span_least: f32) { // assert!(span_least > 0f32); let width = width as usize; let height = height as usize; // `data` will be used to draw data points let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, _, network) = tmp; let num_classes = metadata.num_classes as usize; let r: Array1<f32> = Array::linspace(0f32, 200f32, num_classes); let g: Array1<f32> = Array::linspace(0f32, 240f32, num_classes); let b: Array1<f32> = Array::linspace(0f32, 255f32, num_classes); let span_per_pixel = span_least / width.min(height) as f32; let span_height = height as f32 * span_per_pixel; let span_width = width as f32 * span_per_pixel; let width_max = span_width / 2f32; let width_min = -span_width / 2f32; let height_max = span_height / 2f32; let height_min = -span_height / 2f32; let x_axis: Array1<f32> = Array::linspace(width_min, width_max, width); let y_axis: Array1<f32> = Array::linspace(height_min, height_max, height); // coordination let mut grid: Array3<f32> = Array::zeros((height, width, 2)); for y in 0..height { for x in 0..width { let coord = array![x_axis[[x]], y_axis[[y]]]; let mut slice = grid.slice_mut(s![y, x, ..]); slice.assign(&coord); } } let xys = grid.into_shape((height * width, 2)).unwrap(); let (_, softmax) = network.forward_propagation(&xys); let mut labels: Array1<usize> = Array::zeros(height * width); for (y, row) in softmax.axis_iter(Axis(0)).enumerate() { let mut maxx = 0 as usize; let mut max = row[[0]]; for (x, col) in row.iter().enumerate() { if *col > max { maxx = x; max = *col; } } labels[[y]] = maxx; } let grid_label = labels.into_shape((height, width)).unwrap(); let canvas_size = width * height * 4; let canvas: &mut [u8] = unsafe { slice::from_raw_parts_mut(canvas, canvas_size) }; for y in 0..height { for x in 0..width { // assume rgba canvas[4 * (y * width + x) + 0] = r[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 1] = g[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 2] = b[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 3] = 0xFF as u8; } } } // check parameters for function below which draws predictions #[no_mangle] pub fn draw_points(width: u32, height: u32, span_least: f32) { let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, _) = tmp; let num_classes = metadata.num_classes as f32; let pixel_per_span = width.min(height) as f32 / span_least; let labels = &data.labels; let points = &data.points; let points_x = points.index_axis(Axis(1), 0); let points_y = points.index_axis(Axis(1), 1); Zip::from(labels) .and(points_x) .and(points_y) .apply(|&label, &x, &y| { // Assume data position is limited in: // [-data_radius - data_rand_max, data_radius + data_rand_max] let x = (x * pixel_per_span) as i64 + width as i64 / 2; let y = (y * pixel_per_span) as i64 + height as i64 / 2; // if points can show in canvas if !(x >= width as i64 || x < 0 || y >= height as i64 || y < 0)
}); } #[cfg(test)] mod kernel_test { use super::*; static POINT_DRAW_TIMES: Lazy<Mutex<u32>> = Lazy::new(|| Mutex::new(0)); // Override the extern functions #[no_mangle] extern "C" fn draw_point(_: u32, _: u32, _: f32) { *POINT_DRAW_TIMES.lock().unwrap() += 1; } use std::f32::consts::PI; // for math functions const DATA_GEN_RADIUS: f32 = 1f32; const SPIN_SPAN: f32 = PI; const NUM_CLASSES: u32 = 3; const DATA_NUM: u32 = 300; const FC_SIZE: u32 = 100; const REGULAR_RATE: f32 = 0.001f32; const DESCENT_RATE: f32 = 1f32; const DATA_GEN_RAND_MAX: f32 = 0.25f32; const NETWORK_GEN_RAND_MAX: f32 = 0.1f32; #[test] fn test_all() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let loss_before: f32 = train(); for _ in 0..50 { let loss = train(); assert!(loss < loss_before * 1.1f32); } } #[test] fn test_buffer_allocation() { let buffer = alloc(114514); free(buffer, 114514); } #[test] fn test_draw_prediction() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let width = 100; let height = 100; let buffer = alloc(width * height * 4); draw_prediction(buffer, width, height, 2f32); free(buffer, width * height * 4); } #[test] fn test_draw_points() { // Because cargo test is default multi-thread, put them together to avoid data_racing // span_least * 1.1 for padding init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); // test small resolution drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 1, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test tall screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test flat screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test square screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(100, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test huge screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(10000000, 1000000, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); } }
{ // floor let x = x as u32; let y = y as u32; let label_ratio = label as f32 / num_classes; unsafe { draw_point(x, y, label_ratio); } }
conditional_block
lib.rs
mod data; mod nn; use std::mem; use std::slice; //use std::os::raw::{/*c_double, c_int, */c_void}; // for js functions imports use once_cell::sync::Lazy; use std::sync::Mutex; // for lazy_static // for global variables use ndarray::prelude::*; use ndarray::{array, Array, Array1, Array3, Axis, Zip}; use data::Data; use nn::Network; #[derive(Default)] struct MetaData { fc_size: u32, num_classes: u32, descent_rate: f32, regular_rate: f32, } #[derive(Default)] struct CriticalSection(MetaData, Data, Network); // Imported js functions extern "C" { // for debug fn log_u64(num: u32); // for data pointer draw // x,y: the offset from upper left corner // label: a fractal which represents the position current label is in total // position range fn draw_point(x: u32, y: u32, label_ratio: f32); } static DATA: Lazy<Mutex<CriticalSection>> = Lazy::new(|| Mutex::default()); #[no_mangle] // This function returns the offset of the allocated buffer in wasm memory pub fn alloc(size: u32) -> *mut u8 { let mut buffer: Vec<u8> = Vec::with_capacity(size as usize); let buffer_ptr = buffer.as_mut_ptr(); mem::forget(buffer); buffer_ptr } #[no_mangle] pub fn free(buffer_ptr: *mut u8, size: u32) { let _ = unsafe { Vec::from_raw_parts(buffer_ptr, 0, size as usize) }; } #[no_mangle] pub fn init( data_radius: f32, data_spin_span: f32, data_num: u32, num_classes: u32, data_gen_rand_max: f32, network_gen_rand_max: f32, fc_size: u32, descent_rate: f32, regular_rate: f32, ) { // Thanks rust compiler :-/ let ref mut tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, network) = tmp; metadata.fc_size = fc_size; metadata.num_classes = num_classes; metadata.descent_rate = descent_rate; metadata.regular_rate = regular_rate; // Num of each data class is the same data.init( num_classes, data_num / num_classes, data_radius, data_spin_span, data_gen_rand_max, ); // Input of this network is two dimension points // output label is sparsed num_classes integer const PLANE_DIMENSION: u32 = 2; network.init(PLANE_DIMENSION, fc_size, num_classes, network_gen_rand_max); } #[no_mangle] pub fn train() -> f32 { let ref mut tmp = *DATA.lock().unwrap(); // Jesus, thats magic let CriticalSection(ref metadata, ref data, ref mut network) = *tmp; let regular_rate = metadata.regular_rate; let descent_rate = metadata.descent_rate; let (fc_layer, softmax) = network.forward_propagation(&data.points); let (dw1, db1, dw2, db2) = network.back_propagation( &data.points, &fc_layer, &softmax, &data.labels, regular_rate, ); let loss = network.loss(&softmax, &data.labels, regular_rate); network.descent(&dw1, &db1, &dw2, &db2, descent_rate); let (data_loss, regular_loss) = loss; data_loss + regular_loss } // Plot classified backgroud to canvas // span_least The least span of area should be drawn to canvas(because usually the canvas is not square) #[no_mangle] pub fn draw_prediction(canvas: *mut u8, width: u32, height: u32, span_least: f32) { // assert!(span_least > 0f32); let width = width as usize; let height = height as usize; // `data` will be used to draw data points let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, _, network) = tmp; let num_classes = metadata.num_classes as usize; let r: Array1<f32> = Array::linspace(0f32, 200f32, num_classes); let g: Array1<f32> = Array::linspace(0f32, 240f32, num_classes); let b: Array1<f32> = Array::linspace(0f32, 255f32, num_classes); let span_per_pixel = span_least / width.min(height) as f32; let span_height = height as f32 * span_per_pixel; let span_width = width as f32 * span_per_pixel; let width_max = span_width / 2f32; let width_min = -span_width / 2f32; let height_max = span_height / 2f32; let height_min = -span_height / 2f32; let x_axis: Array1<f32> = Array::linspace(width_min, width_max, width); let y_axis: Array1<f32> = Array::linspace(height_min, height_max, height); // coordination let mut grid: Array3<f32> = Array::zeros((height, width, 2)); for y in 0..height { for x in 0..width { let coord = array![x_axis[[x]], y_axis[[y]]]; let mut slice = grid.slice_mut(s![y, x, ..]); slice.assign(&coord); } } let xys = grid.into_shape((height * width, 2)).unwrap(); let (_, softmax) = network.forward_propagation(&xys); let mut labels: Array1<usize> = Array::zeros(height * width); for (y, row) in softmax.axis_iter(Axis(0)).enumerate() { let mut maxx = 0 as usize; let mut max = row[[0]]; for (x, col) in row.iter().enumerate() { if *col > max { maxx = x; max = *col; } } labels[[y]] = maxx; } let grid_label = labels.into_shape((height, width)).unwrap(); let canvas_size = width * height * 4; let canvas: &mut [u8] = unsafe { slice::from_raw_parts_mut(canvas, canvas_size) }; for y in 0..height { for x in 0..width { // assume rgba canvas[4 * (y * width + x) + 0] = r[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 1] = g[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 2] = b[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 3] = 0xFF as u8; } } } // check parameters for function below which draws predictions #[no_mangle] pub fn draw_points(width: u32, height: u32, span_least: f32) { let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, _) = tmp; let num_classes = metadata.num_classes as f32; let pixel_per_span = width.min(height) as f32 / span_least; let labels = &data.labels; let points = &data.points; let points_x = points.index_axis(Axis(1), 0); let points_y = points.index_axis(Axis(1), 1); Zip::from(labels) .and(points_x) .and(points_y) .apply(|&label, &x, &y| { // Assume data position is limited in: // [-data_radius - data_rand_max, data_radius + data_rand_max] let x = (x * pixel_per_span) as i64 + width as i64 / 2; let y = (y * pixel_per_span) as i64 + height as i64 / 2; // if points can show in canvas if !(x >= width as i64 || x < 0 || y >= height as i64 || y < 0) { // floor let x = x as u32; let y = y as u32; let label_ratio = label as f32 / num_classes; unsafe { draw_point(x, y, label_ratio); } } }); } #[cfg(test)] mod kernel_test { use super::*; static POINT_DRAW_TIMES: Lazy<Mutex<u32>> = Lazy::new(|| Mutex::new(0)); // Override the extern functions #[no_mangle] extern "C" fn draw_point(_: u32, _: u32, _: f32) {
const DATA_GEN_RADIUS: f32 = 1f32; const SPIN_SPAN: f32 = PI; const NUM_CLASSES: u32 = 3; const DATA_NUM: u32 = 300; const FC_SIZE: u32 = 100; const REGULAR_RATE: f32 = 0.001f32; const DESCENT_RATE: f32 = 1f32; const DATA_GEN_RAND_MAX: f32 = 0.25f32; const NETWORK_GEN_RAND_MAX: f32 = 0.1f32; #[test] fn test_all() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let loss_before: f32 = train(); for _ in 0..50 { let loss = train(); assert!(loss < loss_before * 1.1f32); } } #[test] fn test_buffer_allocation() { let buffer = alloc(114514); free(buffer, 114514); } #[test] fn test_draw_prediction() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let width = 100; let height = 100; let buffer = alloc(width * height * 4); draw_prediction(buffer, width, height, 2f32); free(buffer, width * height * 4); } #[test] fn test_draw_points() { // Because cargo test is default multi-thread, put them together to avoid data_racing // span_least * 1.1 for padding init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); // test small resolution drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 1, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test tall screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test flat screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test square screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(100, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test huge screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(10000000, 1000000, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); } }
*POINT_DRAW_TIMES.lock().unwrap() += 1; } use std::f32::consts::PI; // for math functions
random_line_split
lib.rs
mod data; mod nn; use std::mem; use std::slice; //use std::os::raw::{/*c_double, c_int, */c_void}; // for js functions imports use once_cell::sync::Lazy; use std::sync::Mutex; // for lazy_static // for global variables use ndarray::prelude::*; use ndarray::{array, Array, Array1, Array3, Axis, Zip}; use data::Data; use nn::Network; #[derive(Default)] struct MetaData { fc_size: u32, num_classes: u32, descent_rate: f32, regular_rate: f32, } #[derive(Default)] struct CriticalSection(MetaData, Data, Network); // Imported js functions extern "C" { // for debug fn log_u64(num: u32); // for data pointer draw // x,y: the offset from upper left corner // label: a fractal which represents the position current label is in total // position range fn draw_point(x: u32, y: u32, label_ratio: f32); } static DATA: Lazy<Mutex<CriticalSection>> = Lazy::new(|| Mutex::default()); #[no_mangle] // This function returns the offset of the allocated buffer in wasm memory pub fn alloc(size: u32) -> *mut u8 { let mut buffer: Vec<u8> = Vec::with_capacity(size as usize); let buffer_ptr = buffer.as_mut_ptr(); mem::forget(buffer); buffer_ptr } #[no_mangle] pub fn free(buffer_ptr: *mut u8, size: u32) { let _ = unsafe { Vec::from_raw_parts(buffer_ptr, 0, size as usize) }; } #[no_mangle] pub fn init( data_radius: f32, data_spin_span: f32, data_num: u32, num_classes: u32, data_gen_rand_max: f32, network_gen_rand_max: f32, fc_size: u32, descent_rate: f32, regular_rate: f32, ) { // Thanks rust compiler :-/ let ref mut tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, network) = tmp; metadata.fc_size = fc_size; metadata.num_classes = num_classes; metadata.descent_rate = descent_rate; metadata.regular_rate = regular_rate; // Num of each data class is the same data.init( num_classes, data_num / num_classes, data_radius, data_spin_span, data_gen_rand_max, ); // Input of this network is two dimension points // output label is sparsed num_classes integer const PLANE_DIMENSION: u32 = 2; network.init(PLANE_DIMENSION, fc_size, num_classes, network_gen_rand_max); } #[no_mangle] pub fn train() -> f32 { let ref mut tmp = *DATA.lock().unwrap(); // Jesus, thats magic let CriticalSection(ref metadata, ref data, ref mut network) = *tmp; let regular_rate = metadata.regular_rate; let descent_rate = metadata.descent_rate; let (fc_layer, softmax) = network.forward_propagation(&data.points); let (dw1, db1, dw2, db2) = network.back_propagation( &data.points, &fc_layer, &softmax, &data.labels, regular_rate, ); let loss = network.loss(&softmax, &data.labels, regular_rate); network.descent(&dw1, &db1, &dw2, &db2, descent_rate); let (data_loss, regular_loss) = loss; data_loss + regular_loss } // Plot classified backgroud to canvas // span_least The least span of area should be drawn to canvas(because usually the canvas is not square) #[no_mangle] pub fn draw_prediction(canvas: *mut u8, width: u32, height: u32, span_least: f32) { // assert!(span_least > 0f32); let width = width as usize; let height = height as usize; // `data` will be used to draw data points let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, _, network) = tmp; let num_classes = metadata.num_classes as usize; let r: Array1<f32> = Array::linspace(0f32, 200f32, num_classes); let g: Array1<f32> = Array::linspace(0f32, 240f32, num_classes); let b: Array1<f32> = Array::linspace(0f32, 255f32, num_classes); let span_per_pixel = span_least / width.min(height) as f32; let span_height = height as f32 * span_per_pixel; let span_width = width as f32 * span_per_pixel; let width_max = span_width / 2f32; let width_min = -span_width / 2f32; let height_max = span_height / 2f32; let height_min = -span_height / 2f32; let x_axis: Array1<f32> = Array::linspace(width_min, width_max, width); let y_axis: Array1<f32> = Array::linspace(height_min, height_max, height); // coordination let mut grid: Array3<f32> = Array::zeros((height, width, 2)); for y in 0..height { for x in 0..width { let coord = array![x_axis[[x]], y_axis[[y]]]; let mut slice = grid.slice_mut(s![y, x, ..]); slice.assign(&coord); } } let xys = grid.into_shape((height * width, 2)).unwrap(); let (_, softmax) = network.forward_propagation(&xys); let mut labels: Array1<usize> = Array::zeros(height * width); for (y, row) in softmax.axis_iter(Axis(0)).enumerate() { let mut maxx = 0 as usize; let mut max = row[[0]]; for (x, col) in row.iter().enumerate() { if *col > max { maxx = x; max = *col; } } labels[[y]] = maxx; } let grid_label = labels.into_shape((height, width)).unwrap(); let canvas_size = width * height * 4; let canvas: &mut [u8] = unsafe { slice::from_raw_parts_mut(canvas, canvas_size) }; for y in 0..height { for x in 0..width { // assume rgba canvas[4 * (y * width + x) + 0] = r[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 1] = g[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 2] = b[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 3] = 0xFF as u8; } } } // check parameters for function below which draws predictions #[no_mangle] pub fn draw_points(width: u32, height: u32, span_least: f32) { let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, _) = tmp; let num_classes = metadata.num_classes as f32; let pixel_per_span = width.min(height) as f32 / span_least; let labels = &data.labels; let points = &data.points; let points_x = points.index_axis(Axis(1), 0); let points_y = points.index_axis(Axis(1), 1); Zip::from(labels) .and(points_x) .and(points_y) .apply(|&label, &x, &y| { // Assume data position is limited in: // [-data_radius - data_rand_max, data_radius + data_rand_max] let x = (x * pixel_per_span) as i64 + width as i64 / 2; let y = (y * pixel_per_span) as i64 + height as i64 / 2; // if points can show in canvas if !(x >= width as i64 || x < 0 || y >= height as i64 || y < 0) { // floor let x = x as u32; let y = y as u32; let label_ratio = label as f32 / num_classes; unsafe { draw_point(x, y, label_ratio); } } }); } #[cfg(test)] mod kernel_test { use super::*; static POINT_DRAW_TIMES: Lazy<Mutex<u32>> = Lazy::new(|| Mutex::new(0)); // Override the extern functions #[no_mangle] extern "C" fn draw_point(_: u32, _: u32, _: f32) { *POINT_DRAW_TIMES.lock().unwrap() += 1; } use std::f32::consts::PI; // for math functions const DATA_GEN_RADIUS: f32 = 1f32; const SPIN_SPAN: f32 = PI; const NUM_CLASSES: u32 = 3; const DATA_NUM: u32 = 300; const FC_SIZE: u32 = 100; const REGULAR_RATE: f32 = 0.001f32; const DESCENT_RATE: f32 = 1f32; const DATA_GEN_RAND_MAX: f32 = 0.25f32; const NETWORK_GEN_RAND_MAX: f32 = 0.1f32; #[test] fn test_all()
#[test] fn test_buffer_allocation() { let buffer = alloc(114514); free(buffer, 114514); } #[test] fn test_draw_prediction() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let width = 100; let height = 100; let buffer = alloc(width * height * 4); draw_prediction(buffer, width, height, 2f32); free(buffer, width * height * 4); } #[test] fn test_draw_points() { // Because cargo test is default multi-thread, put them together to avoid data_racing // span_least * 1.1 for padding init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); // test small resolution drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 1, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test tall screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test flat screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test square screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(100, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test huge screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(10000000, 1000000, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); } }
{ init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let loss_before: f32 = train(); for _ in 0..50 { let loss = train(); assert!(loss < loss_before * 1.1f32); } }
identifier_body
lib.rs
mod data; mod nn; use std::mem; use std::slice; //use std::os::raw::{/*c_double, c_int, */c_void}; // for js functions imports use once_cell::sync::Lazy; use std::sync::Mutex; // for lazy_static // for global variables use ndarray::prelude::*; use ndarray::{array, Array, Array1, Array3, Axis, Zip}; use data::Data; use nn::Network; #[derive(Default)] struct
{ fc_size: u32, num_classes: u32, descent_rate: f32, regular_rate: f32, } #[derive(Default)] struct CriticalSection(MetaData, Data, Network); // Imported js functions extern "C" { // for debug fn log_u64(num: u32); // for data pointer draw // x,y: the offset from upper left corner // label: a fractal which represents the position current label is in total // position range fn draw_point(x: u32, y: u32, label_ratio: f32); } static DATA: Lazy<Mutex<CriticalSection>> = Lazy::new(|| Mutex::default()); #[no_mangle] // This function returns the offset of the allocated buffer in wasm memory pub fn alloc(size: u32) -> *mut u8 { let mut buffer: Vec<u8> = Vec::with_capacity(size as usize); let buffer_ptr = buffer.as_mut_ptr(); mem::forget(buffer); buffer_ptr } #[no_mangle] pub fn free(buffer_ptr: *mut u8, size: u32) { let _ = unsafe { Vec::from_raw_parts(buffer_ptr, 0, size as usize) }; } #[no_mangle] pub fn init( data_radius: f32, data_spin_span: f32, data_num: u32, num_classes: u32, data_gen_rand_max: f32, network_gen_rand_max: f32, fc_size: u32, descent_rate: f32, regular_rate: f32, ) { // Thanks rust compiler :-/ let ref mut tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, network) = tmp; metadata.fc_size = fc_size; metadata.num_classes = num_classes; metadata.descent_rate = descent_rate; metadata.regular_rate = regular_rate; // Num of each data class is the same data.init( num_classes, data_num / num_classes, data_radius, data_spin_span, data_gen_rand_max, ); // Input of this network is two dimension points // output label is sparsed num_classes integer const PLANE_DIMENSION: u32 = 2; network.init(PLANE_DIMENSION, fc_size, num_classes, network_gen_rand_max); } #[no_mangle] pub fn train() -> f32 { let ref mut tmp = *DATA.lock().unwrap(); // Jesus, thats magic let CriticalSection(ref metadata, ref data, ref mut network) = *tmp; let regular_rate = metadata.regular_rate; let descent_rate = metadata.descent_rate; let (fc_layer, softmax) = network.forward_propagation(&data.points); let (dw1, db1, dw2, db2) = network.back_propagation( &data.points, &fc_layer, &softmax, &data.labels, regular_rate, ); let loss = network.loss(&softmax, &data.labels, regular_rate); network.descent(&dw1, &db1, &dw2, &db2, descent_rate); let (data_loss, regular_loss) = loss; data_loss + regular_loss } // Plot classified backgroud to canvas // span_least The least span of area should be drawn to canvas(because usually the canvas is not square) #[no_mangle] pub fn draw_prediction(canvas: *mut u8, width: u32, height: u32, span_least: f32) { // assert!(span_least > 0f32); let width = width as usize; let height = height as usize; // `data` will be used to draw data points let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, _, network) = tmp; let num_classes = metadata.num_classes as usize; let r: Array1<f32> = Array::linspace(0f32, 200f32, num_classes); let g: Array1<f32> = Array::linspace(0f32, 240f32, num_classes); let b: Array1<f32> = Array::linspace(0f32, 255f32, num_classes); let span_per_pixel = span_least / width.min(height) as f32; let span_height = height as f32 * span_per_pixel; let span_width = width as f32 * span_per_pixel; let width_max = span_width / 2f32; let width_min = -span_width / 2f32; let height_max = span_height / 2f32; let height_min = -span_height / 2f32; let x_axis: Array1<f32> = Array::linspace(width_min, width_max, width); let y_axis: Array1<f32> = Array::linspace(height_min, height_max, height); // coordination let mut grid: Array3<f32> = Array::zeros((height, width, 2)); for y in 0..height { for x in 0..width { let coord = array![x_axis[[x]], y_axis[[y]]]; let mut slice = grid.slice_mut(s![y, x, ..]); slice.assign(&coord); } } let xys = grid.into_shape((height * width, 2)).unwrap(); let (_, softmax) = network.forward_propagation(&xys); let mut labels: Array1<usize> = Array::zeros(height * width); for (y, row) in softmax.axis_iter(Axis(0)).enumerate() { let mut maxx = 0 as usize; let mut max = row[[0]]; for (x, col) in row.iter().enumerate() { if *col > max { maxx = x; max = *col; } } labels[[y]] = maxx; } let grid_label = labels.into_shape((height, width)).unwrap(); let canvas_size = width * height * 4; let canvas: &mut [u8] = unsafe { slice::from_raw_parts_mut(canvas, canvas_size) }; for y in 0..height { for x in 0..width { // assume rgba canvas[4 * (y * width + x) + 0] = r[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 1] = g[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 2] = b[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 3] = 0xFF as u8; } } } // check parameters for function below which draws predictions #[no_mangle] pub fn draw_points(width: u32, height: u32, span_least: f32) { let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, _) = tmp; let num_classes = metadata.num_classes as f32; let pixel_per_span = width.min(height) as f32 / span_least; let labels = &data.labels; let points = &data.points; let points_x = points.index_axis(Axis(1), 0); let points_y = points.index_axis(Axis(1), 1); Zip::from(labels) .and(points_x) .and(points_y) .apply(|&label, &x, &y| { // Assume data position is limited in: // [-data_radius - data_rand_max, data_radius + data_rand_max] let x = (x * pixel_per_span) as i64 + width as i64 / 2; let y = (y * pixel_per_span) as i64 + height as i64 / 2; // if points can show in canvas if !(x >= width as i64 || x < 0 || y >= height as i64 || y < 0) { // floor let x = x as u32; let y = y as u32; let label_ratio = label as f32 / num_classes; unsafe { draw_point(x, y, label_ratio); } } }); } #[cfg(test)] mod kernel_test { use super::*; static POINT_DRAW_TIMES: Lazy<Mutex<u32>> = Lazy::new(|| Mutex::new(0)); // Override the extern functions #[no_mangle] extern "C" fn draw_point(_: u32, _: u32, _: f32) { *POINT_DRAW_TIMES.lock().unwrap() += 1; } use std::f32::consts::PI; // for math functions const DATA_GEN_RADIUS: f32 = 1f32; const SPIN_SPAN: f32 = PI; const NUM_CLASSES: u32 = 3; const DATA_NUM: u32 = 300; const FC_SIZE: u32 = 100; const REGULAR_RATE: f32 = 0.001f32; const DESCENT_RATE: f32 = 1f32; const DATA_GEN_RAND_MAX: f32 = 0.25f32; const NETWORK_GEN_RAND_MAX: f32 = 0.1f32; #[test] fn test_all() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let loss_before: f32 = train(); for _ in 0..50 { let loss = train(); assert!(loss < loss_before * 1.1f32); } } #[test] fn test_buffer_allocation() { let buffer = alloc(114514); free(buffer, 114514); } #[test] fn test_draw_prediction() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let width = 100; let height = 100; let buffer = alloc(width * height * 4); draw_prediction(buffer, width, height, 2f32); free(buffer, width * height * 4); } #[test] fn test_draw_points() { // Because cargo test is default multi-thread, put them together to avoid data_racing // span_least * 1.1 for padding init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); // test small resolution drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 1, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test tall screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test flat screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test square screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(100, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test huge screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(10000000, 1000000, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); } }
MetaData
identifier_name
common.js
/* 本地存储 userInfo openid userType teacherStatusInfo */ const myHttps = "wj.1-zhao.com"; const host = `https://${myHttps}`; const webStock = `wss://${myHttps}/WebSocketServer.ashx`; const QQMapWX = require('./qqmap-wx-jssdk.min.js'); const mapKey = new QQMapWX({ key: '4WABZ-V2ARX-NLS45-T5Q7T-CETWK-KMB7C' // 必填 }); const MD5 = require('./md5.js'); const phoneReg = /^1[34578]\d{9}$/; // 正则手机号码 const passportReg = /^1[45][0-9]{7}|G[0-9]{8}|P[0-9]{7}|S[0-9]{7,8}|D[0-9]+$/; //正则护照 const emailReg = /^[A-Za-z\d]+([-_.][A-Za-z\d]+)*@([A-Za-z\d]+[-.])+[A-Za-z\d]{2,5}$/; //正则邮箱 const srcImg = `${host}/QualifImgs/`; //图片 const srcUploadImg = `${host}/ImgCatch/`; //上传图片 const srcVideo = `${host}/QuaLifAudios/`; //视频 const srcActivity = `${host}/AtyImages/`; //活动 const srcActivityVideo = `${host}/ActVideos/`; //活动视频 const srcBanner = `${host}/BannerImgs/`; //轮播图 const srcPoster = `${host}/Content/Images/`; //海报 const srcForIdPhoto = `${host}/ForIdPhoto/`;//证件照 const srcShar = `${host}/Content/SharePic/`; //分享图片 const config = { /* 首页 */ //获取学生状态,注册学生 RisStudent: `${host}/LittleProgram/Student/RisStudent`, //获取首页banner图片列表 GetBannerImgs: `${host}/LittleProgram/SystemSetup/GetBannerImgs`, //获取首页最新活动 GetLastestAtyInfo: `${host}/LittleProgram/Activity/GetLastestAtyInfo`, // 获取首页推荐外教 GetRecomForTeas: `${host}/LittleProgram/ForeignTea/GetRecomForTeas`, //获取外教的详细信息 GetForeignTeaInfo: `${host}/LittleProgram/ForeignTea/GetForeignTeaInfo`, //找外教-详情页,获取外交发布课程信息 GetCourInfosByTeaId: `${host}/LittleProgram/Course/GetCourInfosByTeaId`, //找外教-详情页,获取某外教评论内容 GetReviewInfoByTeaId: `${host}/LittleProgram/Review/GetReviewInfoByTeaId`, //课程信息,获取课程信息与外教信息(2018-03-29) GetCourseInfo: `${host}/LittleProgram/Course/GetCourseInfo`, //课程信息,根据课程ID获取课程的上课时间(2018-03-29) GetTimeTableInfos: `${host}/LittleProgram/TimeTable/GetTimeTableInfos`, //购买课程--订单填写页--获取订单信息(2018-03-30) GetOrderInfos: `${host}/LittleProgram/CorOpenGroup/GetOrderInfos`, //学生--提交订单(2018-04-03) PlaceAnOrder: `${host}/LittleProgram/CorOpenGroup/PlaceAnOrder`, // 学生--取消支付或者支付失败时调用(2018-04-03) AttendGroupFailed: `${host}/LittleProgram/CorOpenGroup/AttendGroupFailed`, // 学生--支付成功,模版消息发送(2018-04-03) PayMentSuccess: `${host}/LittleProgram/CorOpenGroup/PayMentSuccess`, // 购买成功后--生成海报(2018-04-04) GetPosterInfo: `${host}/LittleProgram/Poster/GetPosterInfo`, // 学生-查看团详情(2018-04-04) LookUpFigroupInfo: `${host}/LittleProgram/CorOpenGroup/LookUpFigroupInfo`, // 学生-删除订单(2018-04-08) DeleteOgoById: `${host}/LittleProgram/OpenGrpOrder/DeleteOgoById`, // 订单页--获取外教上课地址与手机号(2018-04-09) GetTeaAddressPhone: `${host}/LittleProgram/CorOpenGroup/GetTeaAddressPhone`, // 用户--更改用户类型(2018-04-23) ChangeUserType: `${host}/LittleProgram/UserInfo/ChangeUserType`, // 获取分享的图片 GetSharePicName: `${host}/LittleProgram/SystemSetup/GetSharePicName`, /* 找外教 */ //获取找外教中商圈信息 GetTradingAreaInfos: `${host}/LittleProgram/TradingArea/GetTradingAreaInfos`, //找外教搜索页接口 FindForeignTea: `${host}/LittleProgram/ForeignTea/FindForeignTea`, /* 活动 */ //学生--查看活动--活动列表页(2018-04-04) GetAtyInfoList: `${host}/LittleProgram/Activity/GetAtyInfoList`, //学生-查看活动详情(2018-04-04) GetAtyDesInfo: `${host}/LittleProgram/Activity/GetAtyDesInfo`, //学生--活动报名(2018-04-04) AtySignUp: `${host}/LittleProgram/Activity/AtySignUp`, //活动--取消活动付费(2018-04-23) CanCelPay: `${host}/LittleProgram/Activity/CanCelPay`, // 学生--活动支付成功,模版消息发送(2018-04-03) PayMentSuccessActivity: `${host}/LittleProgram/Activity/PayMentSuccess`, /* 我的 */ //获取用户Openid GetSaveUserOpenId: `${host}/LittleProgram/UserInfo/GetSaveUserOpenId`, //更新用户头像与昵称 (2018-05-02) UpdateAvaUrlNick: `${host}/LittleProgram/UserInfo/UpdateAvaUrlNick`, //获取国家信息 GetCountryInfos: `${host}/LittleProgram/Nationality/GetCountryInfos`, //外教提交申请 ApplyForForeEdu: `${host}/LittleProgram/ForeignTea/ApplyForForeEdu`, //获取外教状态信息 是否vip... GetForTeaStatus: `${host}/LittleProgram/ForeignTea/GetForTeaStatus`, //外教--我的课程,课程列表(2018-03 - 29) GetMyCourInfos: `${host}/LittleProgram/Course/GetMyCourInfos`, //外教-我的课程-发布新课程 ReleaseCourse: `${host}/LittleProgram/Course/ReleaseCourse`, //我的-获取用户类型 GetUserType: `${host}/LittleProgram/UserInfo/GetUserType`, //外教--我的--获取基本信息(2018-03-29) GetForTeaDetailInfo: `${host}/LittleProgram/ForeignTea/GetForTeaDetailInfo`, //外教,我的--上传文件(2018-03-30) UpLoadForTeaFile: `${host}/LittleProgram/FileOpera/UpLoadForTeaFile`, // 外教--我的--修改基本资料提交(2018-03-30) AlterForTeaBaseInfo: `${host}/LittleProgram/ForeignTea/AlterForTeaBaseInfo`, //外教基本资料修改--删除上传文件 DeleteForTeaFile: `${host}/LittleProgram/FileOpera/DeleteForTeaFile`, //学生--查看课程详情--获取某课程拼团中的团订单(2018-03-30) GetCorGroupInfos: `${host}/LittleProgram/CorOpenGroup/GetCorGroupInfos`, //订单页--获取用户名与手机号(2018-03-30) GetUserNamePhone: `${host}/LittleProgram/Student/GetUserNamePhone`, //外教-删除课程信息 DeleteCourse: `${host}/LittleProgram/Course/DeleteCourse`, //外教-修改课程-获取信息(2018-04-02) AlterCourseGet: `${host}/LittleProgram/Course/AlterCourseGet`, //外教-修改课程信息(2018-04-02) AlterCourse: `${host}/LittleProgram/Course/AlterCourse`, //学生-获取我的订单列表(2018-04-08) GetOrderList: `${host}/LittleProgram/OpenGrpOrder/GetOrderList`, //学生-我的-我报名的活动(2018-04-08) GetMySignUpAtyList: `${host}/LittleProgram/Activity/GetMySignUpAtyList`, //学生--我的--学习需求 GetMyLearnNeeds: `${host}/LittleProgram/LearnNeeds/GetMyLearnNeeds`, //学生-删除我的需求(2018-04-09) DeleteMyLearnNeed: `${host}/LittleProgram/LearnNeeds/DeleteMyLearnNeed`, //学生-发布需求信息(2018-04-09) ReleaseMyLearnNeed: `${host}/LittleProgram/LearnNeeds/ReleaseMyLearnNeed`, //学生-我的-获取某需求信息以供修改(2018-04-09) GetMyLearnNeedInfo: `${host}/LittleProgram/LearnNeeds/GetMyLearnNeedInfo`, //学生--我的--修改需求(2018-04-09) AlterMyLearnNeedInfo: `${host}/LittleProgram/LearnNeeds/AlterMyLearnNeedInfo`, //学生--我的评论,评论列表(2018-04-09) GetMyAllRewInfos: `${host}/LittleProgram/Review/GetMyAllRewInfos`, //学生--发布一条新评论(2018-04-09) GiveTeaAMark: `${host}/LittleProgram/Review/GiveTeaAMark`, //学生--删除评论(2018-04-09) DeleteReview: `${host}/LittleProgram/Review/DeleteReview`, //外教--获取某课程拼团成功信息列表(2018-04-09) GetMyCorOrderList: `${host}/LittleProgram/OpenGrpOrder/GetMyCorOrderList`, //外教-拼团详情 GetTeaOrderInfoList: `${host}/LittleProgram/OpenGrpOrder/GetTeaOrderInfoList`, //外教-我的--需求查看(2018-04-10) GetAllLearnNeeds: `${host}/LittleProgram/LearnNeeds/GetAllLearnNeeds`, // 外教-订单查看-获取外教发布课程被购买订单列表(2018-04-10) GetTeaCogInfoList: `${host}/LittleProgram/OpenGrpOrder/GetTeaCogInfoList`, // 外教--点评管理--点评信息获取(2018-04-10) GetAllRewAboutMe: `${host}/LittleProgram/Review/GetAllRewAboutMe`, // 获取与我相关的所有聊天记录(2018-04-12) GetChatMemRecord: `${host}/LittleProgram/ChatRecord/GetChatMemRecord`, // 获取两人聊天记录(2018-04-12) GetChatRecord: `${host}/LittleProgram/ChatRecord/GetChatRecord`, // 获取聊天双方头像(2018-04-12) GetUserInfo: `${host}/LittleProgram/UserInfo/GetUserInfo`, // 外教-获取某外教所有课程所占用的时间段列表(2018-04-17) GetAllTeaTimeTableInfo: `${host}/LittleProgram/TimeTable/GetAllTeaTimeTableInfo`, // 获取未读消息数量(2018-04-19) GetUnReadMsgCount: `${host}/LittleProgram/ChatRecord/GetUnReadMsgCount`, // 外教--个人信息--未读订单数量(2018-04-20) GetNotCheckedOrderCount: `${host}/LittleProgram/CorOpenGroup/GetNotCheckedOrderCount`, // 获取帮助与反馈内容(2018-04-24) GetUserHelp: `${host}/LittleProgram/HelpAndFeedBack/GetUserHelp`, // 发表反馈信息(2018-04-24) PublishFeedBack: `${host}/LittleProgram/HelpAndFeedBack/PublishFeedBack`, // 外教--我的课程--关闭/打开课程(2018-04-25) ChangeCorSwitch: `${host}/LittleProgram/Course/ChangeCorSwitch`, // 获取用户头像与昵称 (2018-05-10) GetMyAvaName: `${host}/LittleProgram/UserInfo/GetMyAvaName`, // 模板消息绑定手机号 PutStuPhoneNum: `${host}/LittleProgram/Student/PutStuPhoneNum`, } module.exports = { webStock: webStock, config: config, passportReg: passportReg, phoneReg: phoneReg, emailReg: emailReg, srcImg: srcImg, srcUploadImg: srcUploadImg, srcVideo: srcVideo, srcActivity: srcActivity, srcBanner: srcBanner, srcPoster: srcPoster, srcForIdPhoto: srcForIdPhoto, srcActivityVideo: srcActivityVideo, srcShar: srcShar, //翻译 translate(query, complete) { // let appid = '2015063000000001', //官方示例,次数不限估计 // key = '12345678' let appid = '20180416000146782', //百度翻译appid key = 'i21sgz3p7ZDqfQiTq44D', //秘钥 salt = (new Date).getTime(), //所需随机数 // 要翻译的内容, 多个query可以用\n连接 如 query= 'apple\norange\nbanana\npear' from = 'zh', //源语言 to = 'en', //译文语言 sign = MD5(`${appid}${query}${salt}${key}`); //MD5加密后数据 wx.request({ url: 'https://fanyi-api.baidu.com/api/trans/vip/translate', method: 'GET', header: { 'content-type': 'application/json;charset=utf-8' }, data: { q: query, appid: appid, salt: salt, from: from, to: to, sign: sign }, complete: complete }) }, //请求数据 request(method, url, data, success, fail, complete) { fail = typeof (fail) === 'function' ? fail : function () { }; complete = typeof (complete) === 'function' ? complete : function () { }; wx.request({ url: url, data: data, method: method, header: { 'content-type': 'application/json' }, success: success, fail: fail, complete: complete }) }, //模态弹窗 showModal(content, showCancel, success, confirmText, title, cancelText) { title = title ? title : '提示'; showCancel = showCancel ? true : false; confirmText = confirmText ? confirmText : '确定'; cancelText = cancelText ? cancelText : '取消'; success = typeof (success) === 'function' ? success : function (res) { }; wx.showModal({ title: title, content: content, showCancel: showCancel, confirmText: confirmText, cancelText: cancelText, success: success }); }, //拍摄视频或从手机相册中选视频 chooseVideo(success) { success = typeof (success) === 'function' ? success : function (res) { }; wx.chooseVideo({ sourceType: ['album', 'camera'], compressed: true, maxDuration: 60, camera: 'back', success: success, complete: function (res) { } }) }, //从本地相册选择图片或使用相机拍照 chooseImage(success, count) { count = parseInt(count) ? count : 9; success = typeof (success) === 'function' ? success : function (res) { }; wx.chooseImage({ sizeType: ['original', 'compressed'], // 可以指定是原图还是压缩图,默认二者都有 sourceType: ['album', 'camera'], // 可以指定来源是相册还是相机,默认二者都有 count: count, success: success, }) }, getAddress(address) { //调用腾讯地图api地址解析为坐标 mapKey.geocoder({ address: address, success: (res) => { //打开地图查看 let data = res.result.location, w = data.lat, j = data.lng; wx.openLocation({ latitude: w, longitude: j, name: address }) }, fail: (res) => { }, complete: (res) => { } }); }, //获取openid getOpenid(callback) { callback = typeof (callback) === 'function' ? callback : function (res) { }; let openid = wx.getStorageSync('openid'); if (openid) { callback(); return; } wx.login({ complete: (res) => { if (res.code) { let code = res.code; wx.request({ url: config.GetSaveUserOpenId, data: { code: code, userType: -1 }, header: { 'content-type': 'application/json' }, method: 'POST', success: (res) => { if (res.data.res) { //保存openid wx.setStorageSync('openid', res.data.openid); //保存用户类型 let userType = res.data.userType && res.data.userType; wx.setStorageSync('userType', res.data.userType); callback(); } } }); } } }) }, //获取并更新用户头像等信息 getUserInfo(userInfo, callback) { callback = typeo
, method: 'POST', success: (res) => { if (res.data.res) { callback(); } } }); }, //学生注册 studentRegister() { wx.request({ url: config.RisStudent, method: 'POST', data: { openId: wx.getStorageSync('openid') }, success: (res) => { // if (res.data.res) { // switch (res.data.rtnType) { // case 1: // //注册成功 // break; // case 2: // //改账号被禁用,无法访问程序, // break; // case 3: // //账户正常 // break; // } // } else { // switch (res.data.errType) { // case 1: // //发生异常 // break; // case 2: // //openId错误 // break; // case 3: // //未知错误 // break; // } // } } }); }, newUnique(array, arr, id, hash) { //数组去重 arr.forEach(function (target) { !hash[target[id]] && (array.push(target)) && (hash[target[id]] = true); }, []); return array; } }
f (callback) === 'function' ? callback : function (res) { }; wx.setStorageSync('userInfo', userInfo); wx.request({ url: config.UpdateAvaUrlNick, data: { openId: wx.getStorageSync('openid'), avaUrl: userInfo.avatarUrl, nickName: userInfo.nickName, gender: userInfo.gender == 1 ? 1 : 0 //1男0女 }, header: { 'content-type': 'application/json' }
identifier_body
common.js
/* 本地存储 userInfo openid userType teacherStatusInfo */ const myHttps = "wj.1-zhao.com"; const host = `https://${myHttps}`; const webStock = `wss://${myHttps}/WebSocketServer.ashx`; const QQMapWX = require('./qqmap-wx-jssdk.min.js'); const mapKey = new QQMapWX({ key: '4WABZ-V2ARX-NLS45-T5Q7T-CETWK-KMB7C' // 必填 }); const MD5 = require('./md5.js'); const phoneReg = /^1[34578]\d{9}$/; // 正则手机号码 const passportReg = /^1[45][0-9]{7}|G[0-9]{8}|P[0-9]{7}|S[0-9]{7,8}|D[0-9]+$/; //正则护照 const emailReg = /^[A-Za-z\d]+([-_.][A-Za-z\d]+)*@([A-Za-z\d]+[-.])+[A-Za-z\d]{2,5}$/; //正则邮箱 const srcImg = `${host}/QualifImgs/`; //图片 const srcUploadImg = `${host}/ImgCatch/`; //上传图片 const srcVideo = `${host}/QuaLifAudios/`; //视频 const srcActivity = `${host}/AtyImages/`; //活动 const srcActivityVideo = `${host}/ActVideos/`; //活动视频 const srcBanner = `${host}/BannerImgs/`; //轮播图 const srcPoster = `${host}/Content/Images/`; //海报 const srcForIdPhoto = `${host}/ForIdPhoto/`;//证件照 const srcShar = `${host}/Content/SharePic/`; //分享图片 const config = { /* 首页 */ //获取学生状态,注册学生 RisStudent: `${host}/LittleProgram/Student/RisStudent`, //获取首页banner图片列表 GetBannerImgs: `${host}/LittleProgram/SystemSetup/GetBannerImgs`, //获取首页最新活动 GetLastestAtyInfo: `${host}/LittleProgram/Activity/GetLastestAtyInfo`, // 获取首页推荐外教 GetRecomForTeas: `${host}/LittleProgram/ForeignTea/GetRecomForTeas`, //获取外教的详细信息 GetForeignTeaInfo: `${host}/LittleProgram/ForeignTea/GetForeignTeaInfo`, //找外教-详情页,获取外交发布课程信息 GetCourInfosByTeaId: `${host}/LittleProgram/Course/GetCourInfosByTeaId`, //找外教-详情页,获取某外教评论内容 GetReviewInfoByTeaId: `${host}/LittleProgram/Review/GetReviewInfoByTeaId`, //课程信息,获取课程信息与外教信息(2018-03-29) GetCourseInfo: `${host}/LittleProgram/Course/GetCourseInfo`, //课程信息,根据课程ID获取课程的上课时间(2018-03-29) GetTimeTableInfos: `${host}/LittleProgram/TimeTable/GetTimeTableInfos`, //购买课程--订单填写页--获取订单信息(2018-03-30) GetOrderInfos: `${host}/LittleProgram/CorOpenGroup/GetOrderInfos`, //学生--提交订单(2018-04-03) PlaceAnOrder: `${host}/LittleProgram/CorOpenGroup/PlaceAnOrder`, // 学生--取消支付或者支付失败时调用(2018-04-03) AttendGroupFailed: `${host}/LittleProgram/CorOpenGroup/AttendGroupFailed`, // 学生--支付成功,模版消息发送(2018-04-03) PayMentSuccess: `${host}/LittleProgram/CorOpenGroup/PayMentSuccess`, // 购买成功后--生成海报(2018-04-04) GetPosterInfo: `${host}/LittleProgram/Poster/GetPosterInfo`, // 学生-查看团详情(2018-04-04) LookUpFigroupInfo: `${host}/LittleProgram/CorOpenGroup/LookUpFigroupInfo`, // 学生-删除订单(2018-04-08) DeleteOgoById: `${host}/LittleProgram/OpenGrpOrder/DeleteOgoById`, // 订单页--获取外教上课地址与手机号(2018-04-09) GetTeaAddressPhone: `${host}/LittleProgram/CorOpenGroup/GetTeaAddressPhone`, // 用户--更改用户类型(2018-04-23) ChangeUserType: `${host}/LittleProgram/UserInfo/ChangeUserType`, // 获取分享的图片 GetSharePicName: `${host}/LittleProgram/SystemSetup/GetSharePicName`, /* 找外教 */ //获取找外教中商圈信息 GetTradingAreaInfos: `${host}/LittleProgram/TradingArea/GetTradingAreaInfos`, //找外教搜索页接口 FindForeignTea: `${host}/LittleProgram/ForeignTea/FindForeignTea`, /* 活动 */ //学生--查看活动--活动列表页(2018-04-04) GetAtyInfoList: `${host}/LittleProgram/Activity/GetAtyInfoList`, //学生-查看活动详情(2018-04-04) GetAtyDesInfo: `${host}/LittleProgram/Activity/GetAtyDesInfo`, //学生--活动报名(2018-04-04) AtySignUp: `${host}/LittleProgram/Activity/AtySignUp`, //活动--取消活动付费(2018-04-23) CanCelPay: `${host}/LittleProgram/Activity/CanCelPay`, // 学生--活动支付成功,模版消息发送(2018-04-03) PayMentSuccessActivity: `${host}/LittleProgram/Activity/PayMentSuccess`, /* 我的 */ //获取用户Openid GetSaveUserOpenId: `${host}/LittleProgram/UserInfo/GetSaveUserOpenId`, //更新用户头像与昵称 (2018-05-02) UpdateAvaUrlNick: `${host}/LittleProgram/UserInfo/UpdateAvaUrlNick`, //获取国家信息 GetCountryInfos: `${host}/LittleProgram/Nationality/GetCountryInfos`, //外教提交申请 ApplyForForeEdu: `${host}/LittleProgram/ForeignTea/ApplyForForeEdu`, //获取外教状态信息 是否vip... GetForTeaStatus: `${host}/LittleProgram/ForeignTea/GetForTeaStatus`, //外教--我的课程,课程列表(2018-03 - 29) GetMyCourInfos: `${host}/LittleProgram/Course/GetMyCourInfos`, //外教-我的课程-发布新课程 ReleaseCourse: `${host}/LittleProgram/Course/ReleaseCourse`, //我的-获取用户类型 GetUserType: `${host}/LittleProgram/UserInfo/GetUserType`, //外教--我的--获取基本信息(2018-03-29) GetForTeaDetailInfo: `${host}/LittleProgram/ForeignTea/GetForTeaDetailInfo`, //外教,我的--上传文件(2018-03-30) UpLoadForTeaFile: `${host}/LittleProgram/FileOpera/UpLoadForTeaFile`, // 外教--我的--修改基本资料提交(2018-03-30) AlterForTeaBaseInfo: `${host}/LittleProgram/ForeignTea/AlterForTeaBaseInfo`, //外教基本资料修改--删除上传文件 DeleteForTeaFile: `${host}/LittleProgram/FileOpera/DeleteForTeaFile`, //学生--查看课程详情--获取某课程拼团中的团订单(2018-03-30) GetCorGroupInfos: `${host}/LittleProgram/CorOpenGroup/GetCorGroupInfos`, //订单页--获取用户名与手机号(2018-03-30) GetUserNamePhone: `${host}/LittleProgram/Student/GetUserNamePhone`, //外教-删除课程信息 DeleteCourse: `${host}/LittleProgram/Course/DeleteCourse`, //外教-修改课程-获取信息(2018-04-02) AlterCourseGet: `${host}/LittleProgram/Course/AlterCourseGet`, //外教-修改课程信息(2018-04-02) AlterCourse: `${host}/LittleProgram/Course/AlterCourse`, //学生-获取我的订单列表(2018-04-08) GetOrderList: `${host}/LittleProgram/OpenGrpOrder/GetOrderList`, //学生-我的-我报名的活动(2018-04-08) GetMySignUpAtyList: `${host}/LittleProgram/Activity/GetMySignUpAtyList`, //学生--我的--学习需求 GetMyLearnNeeds: `${host}/LittleProgram/LearnNeeds/GetMyLearnNeeds`, //学生-删除我的需求(2018-04-09) DeleteMyLearnNeed: `${host}/LittleProgram/LearnNeeds/DeleteMyLearnNeed`, //学生-发布需求信息(2018-04-09) ReleaseMyLearnNeed: `${host}/LittleProgram/LearnNeeds/ReleaseMyLearnNeed`, //学生-我的-获取某需求信息以供修改(2018-04-09) GetMyLearnNeedInfo: `${host}/LittleProgram/LearnNeeds/GetMyLearnNeedInfo`, //学生--我的--修改需求(2018-04-09) AlterMyLearnNeedInfo: `${host}/LittleProgram/LearnNeeds/AlterMyLearnNeedInfo`, //学生--我的评论,评论列表(2018-04-09) GetMyAllRewInfos: `${host}/LittleProgram/Review/GetMyAllRewInfos`, //学生--发布一条新评论(2018-04-09) GiveTeaAMark: `${host}/LittleProgram/Review/GiveTeaAMark`, //学生--删除评论(2018-04-09) DeleteReview: `${host}/LittleProgram/Review/DeleteReview`, //外教--获取某课程拼团成功信息列表(2018-04-09) GetMyCorOrderList: `${host}/LittleProgram/OpenGrpOrder/GetMyCorOrderList`, //外教-拼团详情 GetTeaOrderInfoList: `${host}/LittleProgram/OpenGrpOrder/GetTeaOrderInfoList`, //外教-我的--需求查看(2018-04-10) GetAllLearnNeeds: `${host}/LittleProgram/LearnNeeds/GetAllLearnNeeds`, // 外教-订单查看-获取外教发布课程被购买订单列表(2018-04-10) GetTeaCogInfoList: `${host}/LittleProgram/OpenGrpOrder/GetTeaCogInfoList`, // 外教--点评管理--点评信息获取(2018-04-10) GetAllRewAboutMe: `${host}/LittleProgram/Review/GetAllRewAboutMe`, // 获取与我相关的所有聊天记录(2018-04-12) GetChatMemRecord: `${host}/LittleProgram/ChatRecord/GetChatMemRecord`, // 获取两人聊天记录(2018-04-12) GetChatRecord: `${host}/LittleProgram/ChatRecord/GetChatRecord`, // 获取聊天双方头像(2018-04-12) GetUserInfo: `${host}/LittleProgram/UserInfo/GetUserInfo`, // 外教-获取某外教所有课程所占用的时间段列表(2018-04-17) GetAllTeaTimeTableInfo: `${host}/LittleProgram/TimeTable/GetAllTeaTimeTableInfo`, // 获取未读消息数量(2018-04-19) GetUnReadMsgCount: `${host}/LittleProgram/ChatRecord/GetUnReadMsgCount`, // 外教--个人信息--未读订单数量(2018-04-20) GetNotCheckedOrderCount: `${host}/LittleProgram/CorOpenGroup/GetNotCheckedOrderCount`, // 获取帮助与反馈内容(2018-04-24) GetUserHelp: `${host}/LittleProgram/HelpAndFeedBack/GetUserHelp`, // 发表反馈信息(2018-04-24) PublishFeedBack: `${host}/LittleProgram/HelpAndFeedBack/PublishFeedBack`, // 外教--我的课程--关闭/打开课程(2018-04-25) ChangeCorSwitch: `${host}/LittleProgram/Course/ChangeCorSwitch`, // 获取用户头像与昵称 (2018-05-10) GetMyAvaName: `${host}/LittleProgram/UserInfo/GetMyAvaName`, // 模板消息绑定手机号 PutStuPhoneNum: `${host}/LittleProgram/Student/PutStuPhoneNum`, } module.exports = { webStock: webStock, config: config, passportReg: passportReg, phoneReg: phoneReg, emailReg: emailReg, srcImg: srcImg, srcUploadImg: srcUploadImg, srcVideo: srcVideo, srcActivity: srcActivity, srcBanner: srcBanner, srcPoster: srcPoster, srcForIdPhoto: srcForIdPhoto, srcActivityVideo: srcActivityVideo, srcShar: srcShar, //翻译 translate(query, complete) { // let appid = '2015063000000001', //官方示例,次数不限估计 // key = '12345678' let appid = '20180416000146782', //百度翻译appid key = 'i21sgz3p7ZDqfQiTq44D', //秘钥 salt = (new Date).getTime(), //所需随机数 // 要翻译的内容, 多个query可以用\n连接 如 query= 'apple\norange\nbanana\npear' from = 'zh', //源语言 to = 'en', //译文语言 sign = MD5(`${appid}${query}${salt}${key}`); //MD5加密后数据 wx.request({ url: 'https://fanyi-api.baidu.com/api/trans/vip/translate', method: 'GET', header: { 'content-type': 'application/json;charset=utf-8' }, data: { q: query, appid: appid, salt: salt, from: from, to: to, sign: sign }, complete: complete }) }, //请求数据 request(method, url, data, success, fail, complete) { fail = typeof (fail) === 'function' ? fail : function () { }; complete = typeof (complete) === 'function' ? complete : function () { }; wx.request({ url: url, data: data, method: method, header: { 'content-type': 'application/json' }, success: success, fail: fail, complete: complete }) }, //模态弹窗 showModal(content, showCancel, success, confirmText, title, cancelText) { title = title ? title : '提示'; showCancel = showCancel ? true : false; confirmText = confirmText ? confirmText : '确定'; cancelText = cancelText ? cancelText : '取消'; success = typeof (success) === 'function' ? success : function (res) { }; wx.showModal({ title: title, content: content, showCancel: showCancel, confirmText: confirmText, cancelText: cancelText, success: success }); }, //拍摄视频或从手机相册中选视频 chooseVideo(success) { success = typeof (success) === 'function' ? success : function (res) { }; wx.chooseVideo({ sourceType: ['album', 'camera'], compressed: true, maxDuration: 60, camera: 'back', success: success, complete: function (res) { } }) }, //从本地相册选择图片或使用相机拍照 chooseImage(success, count) { count = parseInt(count) ? count : 9; success = typeof (success) === 'function' ? success : function (res) { }; wx.chooseImage({ sizeType: ['original', 'compressed'], // 可以指定是原图还是压缩图,默认二者都有 sourceType: ['album', 'camera'], // 可以指定来源是相册还是相机,默认二者都有 count: count, success: success, }) }, getAddress(address) { //调用腾讯地图api地址解析为坐标 mapKey.geocoder({ address: address, success: (res) => { //打开地图查看 let data = res.result.location, w = data.lat, j = data.lng; wx.openLocation({ latitude: w, longitude: j, name: address }) }, fail: (res) => { }, complete: (res) => { } }); }, //获取openid getOpenid(callback) { callback = typeof (callback) === 'function' ? callback : function (res) { }; let openid = wx.getStorageSync('openid'); if (openid) { callback(); return; } wx.login({ complete: (res) => { if (res.code) { let code = res.code; wx.request({ url: config.GetSaveUserOpenId, data: { code: code, userType: -1 }, header: { 'content-type': 'application/json' }, method: 'POST', success: (res) => { if (res.data.res) { //保存openid wx.setStorageSync('openid', res.data.openid);
wx.setStorageSync('userType', res.data.userType); callback(); } } }); } } }) }, //获取并更新用户头像等信息 getUserInfo(userInfo, callback) { callback = typeof (callback) === 'function' ? callback : function (res) { }; wx.setStorageSync('userInfo', userInfo); wx.request({ url: config.UpdateAvaUrlNick, data: { openId: wx.getStorageSync('openid'), avaUrl: userInfo.avatarUrl, nickName: userInfo.nickName, gender: userInfo.gender == 1 ? 1 : 0 //1男0女 }, header: { 'content-type': 'application/json' }, method: 'POST', success: (res) => { if (res.data.res) { callback(); } } }); }, //学生注册 studentRegister() { wx.request({ url: config.RisStudent, method: 'POST', data: { openId: wx.getStorageSync('openid') }, success: (res) => { // if (res.data.res) { // switch (res.data.rtnType) { // case 1: // //注册成功 // break; // case 2: // //改账号被禁用,无法访问程序, // break; // case 3: // //账户正常 // break; // } // } else { // switch (res.data.errType) { // case 1: // //发生异常 // break; // case 2: // //openId错误 // break; // case 3: // //未知错误 // break; // } // } } }); }, newUnique(array, arr, id, hash) { //数组去重 arr.forEach(function (target) { !hash[target[id]] && (array.push(target)) && (hash[target[id]] = true); }, []); return array; } }
//保存用户类型 let userType = res.data.userType && res.data.userType;
random_line_split
common.js
/* 本地存储 userInfo openid userType teacherStatusInfo */ const myHttps = "wj.1-zhao.com"; const host = `https://${myHttps}`; const webStock = `wss://${myHttps}/WebSocketServer.ashx`; const QQMapWX = require('./qqmap-wx-jssdk.min.js'); const mapKey = new QQMapWX({ key: '4WABZ-V2ARX-NLS45-T5Q7T-CETWK-KMB7C' // 必填 }); const MD5 = require('./md5.js'); const phoneReg = /^1[34578]\d{9}$/; // 正则手机号码 const passportReg = /^1[45][0-9]{7}|G[0-9]{8}|P[0-9]{7}|S[0-9]{7,8}|D[0-9]+$/; //正则护照 const emailReg = /^[A-Za-z\d]+([-_.][A-Za-z\d]+)*@([A-Za-z\d]+[-.])+[A-Za-z\d]{2,5}$/; //正则邮箱 const srcImg = `${host}/QualifImgs/`; //图片 const srcUploadImg = `${host}/ImgCatch/`; //上传图片 const srcVideo = `${host}/QuaLifAudios/`; //视频 const srcActivity = `${host}/AtyImages/`; //活动 const srcActivityVideo = `${host}/ActVideos/`; //活动视频 const srcBanner = `${host}/BannerImgs/`; //轮播图 const srcPoster = `${host}/Content/Images/`; //海报 const srcForIdPhoto = `${host}/ForIdPhoto/`;//证件照 const srcShar = `${host}/Content/SharePic/`; //分享图片 const config = { /* 首页 */ //获取学生状态,注册学生 RisStudent: `${host}/LittleProgram/Student/RisStudent`, //获取首页banner图片列表 GetBannerImgs: `${host}/LittleProgram/SystemSetup/GetBannerImgs`, //获取首页最新活动 GetLastestAtyInfo: `${host}/LittleProgram/Activity/GetLastestAtyInfo`, // 获取首页推荐外教 GetRecomForTeas: `${host}/LittleProgram/ForeignTea/GetRecomForTeas`, //获取外教的详细信息 GetForeignTeaInfo: `${host}/LittleProgram/ForeignTea/GetForeignTeaInfo`, //找外教-详情页,获取外交发布课程信息 GetCourInfosByTeaId: `${host}/LittleProgram/Course/GetCourInfosByTeaId`, //找外教-详情页,获取某外教评论内容 GetReviewInfoByTeaId: `${host}/LittleProgram/Review/GetReviewInfoByTeaId`, //课程信息,获取课程信息与外教信息(2018-03-29) GetCourseInfo: `${host}/LittleProgram/Course/GetCourseInfo`, //课程信息,根据课程ID获取课程的上课时间(2018-03-29) GetTimeTableInfos: `${host}/LittleProgram/TimeTable/GetTimeTableInfos`, //购买课程--订单填写页--获取订单信息(2018-03-30) GetOrderInfos: `${host}/LittleProgram/CorOpenGroup/GetOrderInfos`, //学生--提交订单(2018-04-03) PlaceAnOrder: `${host}/LittleProgram/CorOpenGroup/PlaceAnOrder`, // 学生--取消支付或者支付失败时调用(2018-04-03) AttendGroupFailed: `${host}/LittleProgram/CorOpenGroup/AttendGroupFailed`, // 学生--支付成功,模版消息发送(2018-04-03) PayMentSuccess: `${host}/LittleProgram/CorOpenGroup/PayMentSuccess`, // 购买成功后--生成海报(2018-04-04) GetPosterInfo: `${host}/LittleProgram/Poster/GetPosterInfo`, // 学生-查看团详情(2018-04-04) LookUpFigroupInfo: `${host}/LittleProgram/CorOpenGroup/LookUpFigroupInfo`, // 学生-删除订单(2018-04-08) DeleteOgoById: `${host}/LittleProgram/OpenGrpOrder/DeleteOgoById`, // 订单页--获取外教上课地址与手机号(2018-04-09) GetTeaAddressPhone: `${host}/LittleProgram/CorOpenGroup/GetTeaAddressPhone`, // 用户--更改用户类型(2018-04-23) ChangeUserType: `${host}/LittleProgram/UserInfo/ChangeUserType`, // 获取分享的图片 GetSharePicName: `${host}/LittleProgram/SystemSetup/GetSharePicName`, /* 找外教 */ //获取找外教中商圈信息 GetTradingAreaInfos: `${host}/LittleProgram/TradingArea/GetTradingAreaInfos`, //找外教搜索页接口 FindForeignTea: `${host}/LittleProgram/ForeignTea/FindForeignTea`, /* 活动 */ //学生--查看活动--活动列表页(2018-04-04) GetAtyInfoList: `${host}/LittleProgram/Activity/GetAtyInfoList`, //学生-查看活动详情(2018-04-04) GetAtyDesInfo: `${host}/LittleProgram/Activity/GetAtyDesInfo`, //学生--活动报名(2018-04-04) AtySignUp: `${host}/LittleProgram/Activity/AtySignUp`, //活动--取消活动付费(2018-04-23) CanCelPay: `${host}/LittleProgram/Activity/CanCelPay`, // 学生--活动支付成功,模版消息发送(2018-04-03) PayMentSuccessActivity: `${host}/LittleProgram/Activity/PayMentSuccess`, /* 我的 */ //获取用户Openid GetSaveUserOpenId: `${host}/LittleProgram/UserInfo/GetSaveUserOpenId`, //更新用户头像与昵称 (2018-05-02) UpdateAvaUrlNick: `${host}/LittleProgram/UserInfo/UpdateAvaUrlNick`, //获取国家信息 GetCountryInfos: `${host}/LittleProgram/Nationality/GetCountryInfos`, //外教提交申请 ApplyForForeEdu: `${host}/LittleProgram/ForeignTea/ApplyForForeEdu`, //获取外教状态信息 是否vip... GetForTeaStatus: `${host}/LittleProgram/ForeignTea/GetForTeaStatus`, //外教--我的课程,课程列表(2018-03 - 29) GetMyCourInfos: `${host}/LittleProgram/Course/GetMyCourInfos`, //外教-我的课程-发布新课程 ReleaseCourse: `${host}/LittleProgram/Course/ReleaseCourse`, //我的-获取用户类型 GetUserType: `${host}/LittleProgram/UserInfo/GetUserType`, //外教--我的--获取基本信息(2018-03-29) GetForTeaDetailInfo: `${host}/LittleProgram/ForeignTea/GetForTeaDetailInfo`, //外教,我的--上传文件(2018-03-30) UpLoadForTeaFile: `${host}/LittleProgram/FileOpera/UpLoadForTeaFile`, // 外教--我的--修改基本资料提交(2018-03-30) AlterForTeaBaseInfo: `${host}/LittleProgram/ForeignTea/AlterForTeaBaseInfo`, //外教基本资料修改--删除上传文件 DeleteForTeaFile: `${host}/LittleProgram/FileOpera/DeleteForTeaFile`, //学生--查看课程详情--获取某课程拼团中的团订单(2018-03-30) GetCorGroupInfos: `${host}/LittleProgram/CorOpenGroup/GetCorGroupInfos`, //订单页--获取用户名与手机号(2018-03-30) GetUserNamePhone: `${host}/LittleProgram/Student/GetUserNamePhone`, //外教-删除课程信息 DeleteCourse: `${host}/LittleProgram/Course/DeleteCourse`, //外教-修改课程-获取信息(2018-04-02) AlterCourseGet: `${host}/LittleProgram/Course/AlterCourseGet`, //外教-修改课程信息(2018-04-02) AlterCourse: `${host}/LittleProgram/Course/AlterCourse`, //学生-获取我的订单列表(2018-04-08) GetOrderList: `${host}/LittleProgram/OpenGrpOrder/GetOrderList`, //学生-我的-我报名的活动(2018-04-08) GetMySignUpAtyList: `${host}/LittleProgram/Activity/GetMySignUpAtyList`, //学生--我的--学习需求 GetMyLearnNeeds: `${host}/LittleProgram/LearnNeeds/GetMyLearnNeeds`, //学生-删除我的需求(2018-04-09) DeleteMyLearnNeed: `${host}/LittleProgram/LearnNeeds/DeleteMyLearnNeed`, //学生-发布需求信息(2018-04-09) ReleaseMyLearnNeed: `${host}/LittleProgram/LearnNeeds/ReleaseMyLearnNeed`, //学生-我的-获取某需求信息以供修改(2018-04-09) GetMyLearnNeedInfo: `${host}/LittleProgram/LearnNeeds/GetMyLearnNeedInfo`, //学生--我的--修改需求(2018-04-09) AlterMyLearnNeedInfo: `${host}/LittleProgram/LearnNeeds/AlterMyLearnNeedInfo`, //学生--我的评论,评论列表(2018-04-09) GetMyAllRewInfos: `${host}/LittleProgram/Review/GetMyAllRewInfos`, //学生--发布一条新评论(2018-04-09) GiveTeaAMark: `${host}/LittleProgram/Review/GiveTeaAMark`, //学生--删除评论(2018-04-09) DeleteReview: `${host}/LittleProgram/Review/DeleteReview`, //外教--获取某课程拼团成功信息列表(2018-04-09) GetMyCorOrderList: `${host}/LittleProgram/OpenGrpOrder/GetMyCorOrderList`, //外教-拼团详情 GetTeaOrderInfoList: `${host}/LittleProgram/OpenGrpOrder/GetTeaOrderInfoList`, //外教-我的--需求查看(2018-04-10) GetAllLearnNeeds: `${host}/LittleProgram/LearnNeeds/GetAllLearnNeeds`, // 外教-订单查看-获取外教发布课程被购买订单列表(2018-04-10) GetTeaCogInfoList: `${host}/LittleProgram/OpenGrpOrder/GetTeaCogInfoList`, // 外教--点评管理--点评信息获取(2018-04-10) GetAllRewAboutMe: `${host}/LittleProgram/Review/GetAllRewAboutMe`, // 获取与我相关的所有聊天记录(2018-04-12) GetChatMemRecord: `${host}/LittleProgram/ChatRecord/GetChatMemRecord`, // 获取两人聊天记录(2018-04-12) GetChatRecord: `${host}/LittleProgram/ChatRecord/GetChatRecord`, // 获取聊天双方头像(2018-04-12) GetUserInfo: `${host}/LittleProgram/UserInfo/GetUserInfo`, // 外教-获取某外教所有课程所占用的时间段列表(2018-04-17) GetAllTeaTimeTableInfo: `${host}/LittleProgram/TimeTable/GetAllTeaTimeTableInfo`, // 获取未读消息数量(2018-04-19) GetUnReadMsgCount: `${host}/LittleProgram/ChatRecord/GetUnReadMsgCount`, // 外教--个人信息--未读订单数量(2018-04-20) GetNotCheckedOrderCount: `${host}/LittleProgram/CorOpenGroup/GetNotCheckedOrderCount`, // 获取帮助与反馈内容(2018-04-24) GetUserHelp: `${host}/LittleProgram/HelpAndFeedBack/GetUserHelp`, // 发表反馈信息(2018-04-24) PublishFeedBack: `${host}/LittleProgram/HelpAndFeedBack/PublishFeedBack`, // 外教--我的课程--关闭/打开课程(2018-04-25) ChangeCorSwitch: `${host}/LittleProgram/Course/ChangeCorSwitch`, // 获取用户头像与昵称 (2018-05-10) GetMyAvaName: `${host}/LittleProgram/UserInfo/GetMyAvaName`, // 模板消息绑定手机号 PutStuPhoneNum: `${host}/LittleProgram/Student/PutStuPhoneNum`, } module.exports = { webStock: webStock, config: config, passportReg: passportReg, phoneReg: phoneReg, emailReg: emailReg, srcImg: srcImg, srcUploadImg: srcUploadImg, srcVideo: srcVideo, srcActivity: srcActivity, srcBanner: srcBanner, srcPoster: srcPoster, srcForIdPhoto: srcForIdPhoto, srcActivityVideo: srcActivityVideo, srcShar: srcShar, //翻译 translate(query, complete) { // let appid = '2015063000000001', //官方示例,次数不限估计 // key = '12345678' let appid = '20180416000146782', //百度翻译appid key = 'i21sgz3p7ZDqfQiTq44D', //秘钥 salt = (new Date).getTime(), //所需随机数 // 要翻译的内容, 多个query可以用\n连接 如 query= 'apple\norange\nbanana\npear' from = 'zh', //源语言 to = 'en', //译文语言 sign = MD5(`${appid}${query}${salt}${key}`); //MD5加密后数据 wx.request({ url: 'https://fanyi-api.baidu.com/api/trans/vip/translate', method: 'GET', header: { 'content-type': 'application/json;charset=utf-8' }, data: { q: query, appid: appid, salt: salt, from: from, to: to, sign: sign }, complete: complete }) }, //请求数据 request(method, url, data, success, fail, complete) { fail = typeof (fail) === 'function' ? fail : function () { }; complete = typeof (complete) === 'function' ? complete : function () { }; wx.request({ url: url, data: data, method: method, header: { 'content-type': 'application/json' }, success: success, fail: fail, complete: complete }) }, //模态弹窗 showModal(content, showCancel, success, confirmText, title, cancelText) { title = title ? title : '提示'; showCancel = showCancel ? true : false; confirmText = confirmText ? confirmText : '确定'; cancelText = cancelText ? cancelText : '取消'; success = typeof (success) === 'function' ? success : function (res) { }; wx.showModal({ title: title, content: content, showCancel: showCancel, confirmText: confirmText, cancelText: cancelText, success: success }); }, //拍摄视频或从手机相册中选视频 chooseVideo(success) { success = typeof (success) === 'function' ? success : function (res) { }; wx.chooseVideo({ sourceType: ['album', 'camera'], compressed: true, maxDuration: 60, camera: 'back', success: success, complete: function (res) { } }) }, //从本地相册选择图片或使用相机拍照 chooseImage(success, count) { count = parseInt(count) ? count : 9; success = typeof (success) === 'function' ? success : function (res) { }; wx.chooseImage({ sizeType: ['original', 'compressed'], // 可以指定是原图还是压缩图,默认二者都有 sourceType: ['album', 'camera'], // 可以指定来源是相册还是相机,默认二者都有 count: count, success: success, }) }, getAddress(address) { //调用腾讯地图api地址解析为坐标 mapKey.geocoder({ address: address, success: (res) => { //打开地图查看 let data = res.result.location, w = data.lat, j = data.lng; wx.openLocation({ latitude: w, longitude: j, name: address }) }, fail: (res) => { }, complete: (res) => { } }); }, //获取openid getOpenid(callback) { callback = typeof (callback) === 'function' ? callback : function (res) { }; let openid = wx.getStorageSync('openid'); if (openid) { callback(); return; } wx.login({ complete: (res) => { if (res.code) { let code = res.code; wx.request({ url: config.GetSaveUserOpenId, data: { code: code, userType: -1 }, header: { 'content-type': 'application/json' }, method: 'POST', success: (res) => { if (res.data.res) { //保存openid wx.setStorageSync('openid', res.data.openid); //保存用户类型 let userType = res.data.userType && res.data.userType; wx.setStorageSync('userType', res.data.userType); callback(); } } }); } } }) }, //获取并更新用户头像等信息 getUserInfo(userInfo, callback) { callback = typeof (callback) === 'function' ? callback : function (res) { }; wx.setStorageSync('userInfo', userInfo); wx.request({ url: config.UpdateAvaUrlNick, data: { openId: wx.getStorageSync('openid'), avaUrl: userInfo.avatarUrl, nickName: userInfo.nickName, gender: userInfo.gender == 1 ? 1 : 0 //1男0女 }, header: { 'content-type': 'application/json' }, method: 'POST', success: (res) => { if (res.data.res) { callback(); } } }); }, //学生注册 studentRegister() { wx.request({ url: config.RisStudent, method: 'POST', data: { openId: wx.getStorageSync('openid') }, success: (res) => { // if (res.data.res) { // switch (res.data.rtnType) { // case 1: // //注册成功 // break; // case 2: // //改账号被禁用,无法访问程序, // break; // case 3: // //账户正常 // break; // } // } else { // switch (res.data.errType) { // case 1: // //发生异常 // break; // case 2: // //openId错误 // break; // case 3: // //未知错误 // break; // } // } } }); }, newUnique(array, arr, id, hash) { //数组去重 arr.forEach(function (target) { !hash[target[id]] && (array.push(target)) && (hash[target[id]] = true); }, []); return array; } }
conditional_block
common.js
/* 本地存储 userInfo openid userType teacherStatusInfo */ const myHttps = "wj.1-zhao.com"; const host = `https://${myHttps}`; const webStock = `wss://${myHttps}/WebSocketServer.ashx`; const QQMapWX = require('./qqmap-wx-jssdk.min.js'); const mapKey = new QQMapWX({ key: '4WABZ-V2ARX-NLS45-T5Q7T-CETWK-KMB7C' // 必填 }); const MD5 = require('./md5.js'); const phoneReg = /^1[34578]\d{9}$/; // 正则手机号码 const passportReg = /^1[45][0-9]{7}|G[0-9]{8}|P[0-9]{7}|S[0-9]{7,8}|D[0-9]+$/; //正则护照 const emailReg = /^[A-Za-z\d]+([-_.][A-Za-z\d]+)*@([A-Za-z\d]+[-.])+[A-Za-z\d]{2,5}$/; //正则邮箱 const srcImg = `${host}/QualifImgs/`; //图片 const srcUploadImg = `${host}/ImgCatch/`; //上传图片 const srcVideo = `${host}/QuaLifAudios/`; //视频 const srcActivity = `${host}/AtyImages/`; //活动 const srcActivityVideo = `${host}/ActVideos/`; //活动视频 const srcBanner = `${host}/BannerImgs/`; //轮播图 const srcPoster = `${host}/Content/Images/`; //海报 const srcForIdPhoto = `${host}/ForIdPhoto/`;//证件照 const srcShar = `${host}/Content/SharePic/`; //分享图片 const config = { /* 首页 */ //获取学生状态,注册学生 RisStudent: `${host}/LittleProgram/Student/RisStudent`, //获取首页banner图片列表 GetBannerImgs: `${host}/LittleProgram/SystemSetup/GetBannerImgs`, //获取首页最新活动 GetLastestAtyInfo: `${host}/LittleProgram/Activity/GetLastestAtyInfo`, // 获取首页推荐外教 GetRecomForTeas: `${host}/LittleProgram/ForeignTea/GetRecomForTeas`, //获取外教的详细信息 GetForeignTeaInfo: `${host}/LittleProgram/ForeignTea/GetForeignTeaInfo`, //找外教-详情页,获取外交发布课程信息 GetCourInfosByTeaId: `${host}/LittleProgram/Course/GetCourInfosByTeaId`, //找外教-详情页,获取某外教评论内容 GetReviewInfoByTeaId: `${host}/LittleProgram/Review/GetReviewInfoByTeaId`, //课程信息,获取课程信息与外教信息(2018-03-29) GetCourseInfo: `${host}/LittleProgram/Course/GetCourseInfo`, //课程信息,根据课程ID获取课程的上课时间(2018-03-29) GetTimeTableInfos: `${host}/LittleProgram/TimeTable/GetTimeTableInfos`, //购买课程--订单填写页--获取订单信息(2018-03-30) GetOrderInfos: `${host}/LittleProgram/CorOpenGroup/GetOrderInfos`, //学生--提交订单(2018-04-03) PlaceAnOrder: `${host}/LittleProgram/CorOpenGroup/PlaceAnOrder`, // 学生--取消支付或者支付失败时调用(2018-04-03) AttendGroupFailed: `${host}/LittleProgram/CorOpenGroup/AttendGroupFailed`, // 学生--支付成功,模版消息发送(2018-04-03) PayMentSuccess: `${host}/LittleProgram/CorOpenGroup/PayMentSuccess`, // 购买成功后--生成海报(2018-04-04) GetPosterInfo: `${host}/LittleProgram/Poster/GetPosterInfo`, // 学生-查看团详情(2018-04-04) LookUpFigroupInfo: `${host}/LittleProgram/CorOpenGroup/LookUpFigroupInfo`, // 学生-删除订单(2018-04-08) DeleteOgoById: `${host}/LittleProgram/OpenGrpOrder/DeleteOgoById`, // 订单页--获取外教上课地址与手机号(2018-04-09) GetTeaAddressPhone: `${host}/LittleProgram/CorOpenGroup/GetTeaAddressPhone`, // 用户--更改用户类型(2018-04-23) ChangeUserType: `${host}/LittleProgram/UserInfo/ChangeUserType`, // 获取分享的图片 GetSharePicName: `${host}/LittleProgram/SystemSetup/GetSharePicName`, /* 找外教 */ //获取找外教中商圈信息 GetTradingAreaInfos: `${host}/LittleProgram/TradingArea/GetTradingAreaInfos`, //找外教搜索页接口 FindForeignTea: `${host}/LittleProgram/ForeignTea/FindForeignTea`, /* 活动 */ //学生--查看活动--活动列表页(2018-04-04) GetAtyInfoList: `${host}/LittleProgram/Activity/GetAtyInfoList`, //学生-查看活动详情(2018-04-04) GetAtyDesInfo: `${host}/LittleProgram/Activity/GetAtyDesInfo`, //学生--活动报名(2018-04-04) AtySignUp: `${host}/LittleProgram/Activity/AtySignUp`, //活动--取消活动付费(2018-04-23) CanCelPay: `${host}/LittleProgram/Activity/CanCelPay`, // 学生--活动支付成功,模版消息发送(2018-04-03) PayMentSuccessActivity: `${host}/LittleProgram/Activity/PayMentSuccess`, /* 我的 */ //获取用户Openid GetSaveUserOpenId: `${host}/LittleProgram/UserInfo/GetSaveUserOpenId`, //更新用户头像与昵称 (2018-05-02) UpdateAvaUrlNick: `${host}/LittleProgram/UserInfo/UpdateAvaUrlNick`, //获取国家信息 GetCountryInfos: `${host}/LittleProgram/Nationality/GetCountryInfos`, //外教提交申请 ApplyForForeEdu: `${host}/LittleProgram/ForeignTea/ApplyForForeEdu`, //获取外教状态信息 是否vip... GetForTeaStatus: `${host}/LittleProgram/ForeignTea/GetForTeaStatus`, //外教--我的课程,课程列表(2018-03 - 29) GetMyCourInfos: `${host}/LittleProgram/Course/GetMyCourInfos`, //外教-我的课程-发布新课程 ReleaseCourse: `${host}/LittleProgram/Course/ReleaseCourse`, //我的-获取用户类型 GetUserType: `${host}/LittleProgram/UserInfo/GetUserType`, //外教--我的--获取基本信息(2018-03-29) GetForTeaDetailInfo: `${host}/LittleProgram/ForeignTea/GetForTeaDetailInfo`, //外教,我的--上传文件(2018-03-30) UpLoadForTeaFile: `${host}/LittleProgram/FileOpera/UpLoadForTeaFile`, // 外教--我的--修改基本资料提交(2018-03-30) AlterForTeaBaseInfo: `${host}/LittleProgram/ForeignTea/AlterForTeaBaseInfo`, //外教基本资料修改--删除上传文件 DeleteForTeaFile: `${host}/LittleProgram/FileOpera/DeleteForTeaFile`, //学生--查看课程详情--获取某课程拼团中的团订单(2018-03-30) GetCorGroupInfos: `${host}/LittleProgram/CorOpenGroup/GetCorGroupInfos`, //订单页--获取用户名与手机号(2018-03-30) GetUserNamePhone: `${host}/LittleProgram/Student/GetUserNamePhone`, //外教-删除课程信息 DeleteCourse: `${host}/LittleProgram/Course/DeleteCourse`, //外教-修改课程-获取信息(2018-04-02) AlterCourseGet: `${host}/LittleProgram/Course/AlterCourseGet`, //外教-修改课程信息(2018-04-02) AlterCourse: `${host}/LittleProgram/Course/AlterCourse`, //学生-获取我的订单列表(2018-04-08) GetOrderList: `${host}/LittleProgram/OpenGrpOrder/GetOrderList`, //学生-我的-我报名的活动(2018-04-08) GetMySignUpAtyList: `${host}/LittleProgram/Activity/GetMySignUpAtyList`, //学生--我的--学习需求 GetMyLearnNeeds: `${host}/LittleProgram/LearnNeeds/GetMyLearnNeeds`, //学生-删除我的需求(2018-04-09) DeleteMyLearnNeed: `${host}/LittleProgram/LearnNeeds/DeleteMyLearnNeed`, //学生-发布需求信息(2018-04-09) ReleaseMyLearnNeed: `${host}/LittleProgram/LearnNeeds/ReleaseMyLearnNeed`, //学生-我的-获取某需求信息以供修改(2018-04-09) GetMyLearnNeedInfo: `${host}/LittleProgram/LearnNeeds/GetMyLearnNeedInfo`, //学生--我的--修改需求(2018-04-09) AlterMyLearnNeedInfo: `${host}/LittleProgram/LearnNeeds/AlterMyLearnNeedInfo`, //学生--我的评论,评论列表(2018-04-09) GetMyAllRewInfos: `${host}/LittleProgram/Review/GetMyAllRewInfos`, //学生--发布一条新评论(2018-04-09) GiveTeaAMark: `${host}/LittleProgram/Review/GiveTeaAMark`, //学生--删除评论(2018-04-09) DeleteReview: `${host}/LittleProgram/Review/DeleteReview`, //外教--获取某课程拼团成功信息列表(2018-04-09) GetMyCorOrderList: `${host}/LittleProgram/OpenGrpOrder/GetMyCorOrderList`, //外教-拼团详情 GetTeaOrderInfoList: `${host}/LittleProgram/OpenGrpOrder/GetTeaOrderInfoList`, //外教-我的--需求查看(2018-04-10) GetAllLearnNeeds: `${host}/LittleProgram/LearnNeeds/GetAllLearnNeeds`, // 外教-订单查看-获取外教发布课程被购买订单列表(2018-04-10) GetTeaCogInfoList: `${host}/LittleProgram/OpenGrpOrder/GetTeaCogInfoList`, // 外教--点评管理--点评信息获取(2018-04-10) GetAllRewAboutMe: `${host}/LittleProgram/Review/GetAllRewAboutMe`, // 获取与我相关的所有聊天记录(2018-04-12) GetChatMemRecord: `${host}/LittleProgram/ChatRecord/GetChatMemRecord`, // 获取两人聊天记录(2018-04-12) GetChatRecord: `${host}/LittleProgram/ChatRecord/GetChatRecord`, // 获取聊天双方头像(2018-04-12) GetUserInfo: `${host}/LittleProgram/UserInfo/GetUserInfo`, // 外教-获取某外教所有课程所占用的时间段列表(2018-04-17) GetAllTeaTimeTableInfo: `${host}/LittleProgram/TimeTable/GetAllTeaTimeTableInfo`, // 获取未读消息数量(2018-04-19) GetUnReadMsgCount: `${host}/LittleProgram/ChatRecord/GetUnReadMsgCount`, // 外教--个人信息--未读订单数量(2018-04-20) GetNotCheckedOrderCount: `${host}/LittleProgram/CorOpenGroup/GetNotCheckedOrderCount`, // 获取帮助与反馈内容(2018-04-24) GetUserHelp: `${host}/LittleProgram/HelpAndFeedBack/GetUserHelp`, // 发表反馈信息(2018-04-24) PublishFeedBack: `${host}/LittleProgram/HelpAndFeedBack/PublishFeedBack`, // 外教--我的课程--关闭/打开课程(2018-04-25) ChangeCorSwitch: `${host}/LittleProgram/Course/ChangeCorSwitch`, // 获取用户头像与昵称 (2018-05-10) GetMyAvaName: `${host}/LittleProgram/UserInfo/GetMyAvaName`, // 模板消息绑定手机号 PutStuPhoneNum: `${host}/LittleProgram/Student/PutStuPhoneNum`, } module.exports = { webStock: webStock, config: config, passportReg: passportReg, phoneReg: phoneReg, emailReg: emailReg, srcImg: srcImg, srcUploadImg: srcUploadImg, srcVideo: srcVideo, srcActivity: srcActivity, srcBanner: srcBanner, srcPoster: srcPoster, srcForIdPhoto: srcForIdPhoto, srcActivityVideo: srcActivityVideo, srcShar: srcShar, //翻译 translate(query, complete) { // let appid = '2015063000000001', //官方示例,次数不限估计 // key = '12345678' let appid = '20180416000146782', //百度翻译appid key = 'i21sgz3p7ZDqfQiTq44D', //秘钥 salt = (new Date).getTime(), //所需随机数 // 要翻译的内容, 多个query可以用\n连接 如 query= 'apple\norange\nbanana\npear' from = 'zh', //源语言 to = 'en', //译文语言 sign = MD5(`${appid}${query}${salt}${key}`); //MD5加密后数据 wx.request({ url: 'https://fanyi-api.baidu.com/api/trans/vip/translate', method: 'GET', header: { 'content-type': 'application/json;charset=utf-8' }, data: { q: query, appid: appid, salt: salt, from: from, to: to, sign: sign }, complete: complete }) }, //请求数据 request(method, url, data, success, fail, complete) { fail = typeof (fail) === 'function' ? fail : function () { }; complete = typeof (complete) === 'function' ? complete : function () { }; wx.request({ url: url, data: data, method: method, header: { 'content-type': 'application/json' }, success: success, fail: fail, complete: complete }) }, //模态弹窗 showModal(content, showCancel, success, confirmText, title, cancelText) { title = title ? title : '提示'; showCancel = showCancel ? true : false; confirmText = confirmText ? confirmText : '确定'; cancelText = cancelText ? cancelText : '取消'; success = typeof (success) === 'function' ? success : function (res) { }; wx.showModal({ title: title, content: content, showCancel: showCancel, confirmText: confirmText, cancelText: cancelText, success: success }); }, //拍摄视频或从手机相册中选视频 chooseVideo(success) { success = typeof (success) === 'function' ? success : function (res) { }; wx.chooseVideo({ sourceType: ['album', 'camera'], compressed: true, maxDuration: 60, camera: 'back', success: success, complete: function (res) { } }) }, //从本地相册选择图片或使用相机拍照 chooseImage(success, count) { count = parseInt(count) ? count : 9; success = typeof (success) === 'function' ? success : function (res) { }; wx.chooseImage({ sizeType: ['original', 'compressed'], // 可以指定是原图还是压缩图,默认二者都有 sourceType: ['album', 'camera'], // 可以指定来源是相册还是相机,默认二者都有 count: count, success: success, }) }, getAddress(address) { //调用腾讯地图api地址解析为坐标 mapKey.geocoder({ address: address, success: (res) => { //打开地图查看 let data = res.result.location, w = data.lat, j = data.lng; wx.openLocation({ latitude: w, longitude: j, name: address }) }, fail: (res) => { }, complete: (res) => { } }); }, //获取openid getOpenid(callback) { callback = typeof (callback) === 'function' ? callback : function (res) { }; let openid = wx.getStorageSync('openid'); if (openid) { callback(); return; } wx.login({ complete: (res) => { if (res.code) { let code = res.code; wx.request({ url: config.GetSaveUserOpenId, data: { code: code, userType: -1 }, header: { 'content-type': 'application/json' }, method: 'POST', success: (res) => { if (res.data.res) { //保存openid wx.setStorageSync('openid', res.data.openid); //保存用户类型 let userType = res.data.userType && res.data.userType; wx.setStorageSync('userType', res.data.userType); callback(); } } }); } } }) }, //获取并更新用户头像等信息 getUserInfo(userInfo, callback) { callback = typeof (callback) === 'function' ? callback : function (res) { }; wx.setStorageSync('userInfo', userInfo); wx.request({ url: config.UpdateAvaUrlNick, data: { openId: wx.getStorageSync('openid'), avaUrl: userInfo.avatarUrl, nickName: userInfo.nickName, gender: userInfo.gender == 1 ? 1 : 0 //1男0女 }, header: { 'content-type': 'application/json' }, method: 'POST', success: (res) => { if (res.data.res) { callback(); } } }); }, //学生注册 studentRegister() { wx.request({ url: config.RisStudent, method: 'POST', data: { openId: wx.getStorageSync('openid') }, success: (res) => { // if (res.data.res) { // switch (res.data.rtnType) { // case 1: // //注册成功 // break; // case 2: // //改账号被禁用,无法访问程序, // break; // case 3: // //账户正常 // break; // } // } else { // switch (res.data.errType) { // case 1: // //发生异常 // break; // case 2: // //openId错误 // break; // case 3: // //未知错误 // break; // } // } } }); }, newUnique(array, arr, id, hash) { //数组去重 arr.forEach(function (target) { !hash[target[id]] && (array.push(target)) && (hash[target[id]] = true); }, []); return array; } }
identifier_name
cache.rs
use crate::clean::{self, GetDefId, AttributesExt}; use crate::fold::DocFolder; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use std::mem; use std::path::{Path, PathBuf}; use std::collections::BTreeMap; use syntax::source_map::FileName; use syntax::symbol::sym; use serialize::json::{ToJson, Json, as_json}; use super::{ItemType, IndexItem, IndexItemFunctionType, Impl, shorten, plain_summary_line}; use super::{Type, RenderInfo}; /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering threads. #[derive(Default)] crate struct Cache { /// Maps a type ID to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: FxHashMap<DefId, Vec<Impl>>, /// Maintains a mapping of local crate `NodeId`s to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. pub external_paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Maps local `DefId`s of exported types to fully qualified paths. /// Unlike 'paths', this mapping ignores any renames that occur /// due to 'use' statements. /// /// This map is used when writing out the special 'implementors' /// javascript file. By using the exact path that the type /// is declared with, we ensure that each path will be identical /// to the path used if the corresponding type is inlined. By /// doing this, we can detect duplicate impls on a trait page, and only display /// the impl for the inlined type. pub exact_paths: FxHashMap<DefId, Vec<String>>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: FxHashMap<DefId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: FxHashMap<DefId, Vec<Impl>>, /// Cache of where external crate documentation can be found. pub extern_locations: FxHashMap<CrateNum, (String, PathBuf, ExternalLocation)>, /// Cache of where documentation for primitives can be found. pub primitive_locations: FxHashMap<clean::PrimitiveType, DefId>, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from the privacy check pass. pub access_levels: AccessLevels<DefId>, /// The version of the crate being documented, if given from the `--crate-version` flag. pub crate_version: Option<String>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<String>, parent_stack: Vec<DefId>, parent_is_trait_impl: bool, search_index: Vec<IndexItem>, stripped_mod: bool, pub deref_trait_did: Option<DefId>, pub deref_mut_trait_did: Option<DefId>, pub owned_box_did: Option<DefId>, masked_crates: FxHashSet<CrateNum>, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_impl_items: Vec<(DefId, clean::Item)>, // Similarly to `orphan_impl_items`, sometimes trait impls are picked up // even though the trait itself is not exported. This can happen if a trait // was defined in function/expression scope, since the impl will be picked // up by `collect-trait-impls` but the trait won't be scraped out in the HIR // crawl. In order to prevent crashes when looking for spotlight traits or // when gathering trait documentation on a type, hold impls here while // folding and add them to the cache later on if we find the trait. orphan_trait_impls: Vec<(DefId, FxHashSet<DefId>, Impl)>, /// Aliases added through `#[doc(alias = "...")]`. Since a few items can have the same alias, /// we need the alias element to have an array of items. pub(super) aliases: FxHashMap<String, Vec<IndexItem>>, } impl Cache { pub fn from_krate( renderinfo: RenderInfo, extern_html_root_urls: &BTreeMap<String, String>, dst: &Path, mut krate: clean::Crate, ) -> (clean::Crate, String, Cache) { // Crawl the crate to build various caches used for the output let RenderInfo { inlined: _, external_paths, exact_paths, access_levels, deref_trait_did, deref_mut_trait_did, owned_box_did, } = renderinfo; let external_paths = external_paths.into_iter() .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) .collect(); let mut cache = Cache { impls: Default::default(), external_paths, exact_paths, paths: Default::default(), implementors: Default::default(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), parent_is_trait_impl: false, extern_locations: Default::default(), primitive_locations: Default::default(), stripped_mod: false, access_levels, crate_version: krate.version.take(), orphan_impl_items: Vec::new(), orphan_trait_impls: Vec::new(), traits: krate.external_traits.replace(Default::default()), deref_trait_did, deref_mut_trait_did, owned_box_did, masked_crates: mem::take(&mut krate.masked_crates), aliases: Default::default(), }; // Cache where all our extern crates are located for &(n, ref e) in &krate.externs { let src_root = match e.src { FileName::Real(ref p) => match p.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }, _ => PathBuf::new(), }; let extern_url = extern_html_root_urls.get(&e.name).map(|u| &**u); cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, extern_url, &dst))); let did = DefId { krate: n, index: CRATE_DEF_INDEX }; cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. // // Favor linking to as local extern as possible, so iterate all crates in // reverse topological order. for &(_, ref e) in krate.externs.iter().rev() { for &(def_id, prim, _) in &e.primitives { cache.primitive_locations.insert(prim, def_id); } } for &(def_id, prim, _) in &krate.primitives { cache.primitive_locations.insert(prim, def_id); } cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); for (trait_did, dids, impl_) in cache.orphan_trait_impls.drain(..) { if cache.traits.contains_key(&trait_did) { for did in dids { cache.impls.entry(did).or_insert(vec![]).push(impl_.clone()); } } } // Build our search index let index = build_index(&krate, &mut cache); (krate, index, cache) } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { if item.def_id.is_local() { debug!("folding {} \"{:?}\", id {:?}", item.type_(), item.name, item.def_id); } // If this is a stripped module, // we don't want it or its children in the search index. let orig_stripped_mod = match item.inner { clean::StrippedItem(box clean::ModuleItem(..)) => { mem::replace(&mut self.stripped_mod, true) } _ => self.stripped_mod, }; // If the impl is from a masked crate or references something from a // masked crate then remove it completely. if let clean::ImplItem(ref i) = item.inner { if self.masked_crates.contains(&item.def_id.krate) || i.trait_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) || i.for_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) { return None; } } // Propagate a trait method's documentation to all implementors of the // trait. if let clean::TraitItem(ref t) = item.inner { self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { if let Some(did) = i.trait_.def_id() { if i.blanket_impl.is_none() { self.implementors.entry(did).or_default().push(Impl { impl_item: item.clone(), }); } } } // Index this method for searching later on. if let Some(ref s) = item.name { let (parent, is_inherent_impl_item) = match item.inner { clean::StrippedItem(..) => ((None, None), false), clean::AssocConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => { // skip associated items in trait impls ((None, None), false) } clean::AssocTypeItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { ((Some(*self.parent_stack.last().unwrap()), Some(&self.stack[..self.stack.len() - 1])), false) } clean::MethodItem(..) | clean::AssocConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), None => None }; ((Some(*last), path), true) } } _ => ((None, Some(&*self.stack)), false) }; match parent { (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, // which should not be indexed. The crate-item itself is // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { ty: item.type_(), name: s.to_string(), path: path.join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent, parent_idx: None, search_type: get_index_search_type(&item), }); } } (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. let pushed = match item.name { Some(ref n) if !n.is_empty() => { self.stack.push(n.to_string()); true } _ => false, }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | clean::ConstantItem(..) | clean::StaticItem(..) | clean::UnionItem(..) | clean::ForeignTypeItem | clean::MacroItem(..) | clean::ProcMacroItem(..) if !self.stripped_mod => { // Re-exported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a re-exported item doesn't show up in the // `public_items` map, so we can skip inserting into the // paths map if there was already an entry present and we're // not a public item. if !self.paths.contains_key(&item.def_id) || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } self.add_aliases(&item); } // Link variants to their parent enum because pages aren't emitted // for each variant. clean::VariantItem(..) if !self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) => { self.add_aliases(&item); self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } _ => {} } // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::ForeignTypeItem | clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did, .. } => { self.parent_stack.push(did); true } ref t => { let prim_did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); match prim_did { Some(did) => { self.parent_stack.push(did); true } None => false, } } } } _ => false }; // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { if let clean::Item { inner: clean::ImplItem(_), .. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let mut dids = FxHashSet::default(); if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { match i.for_ { clean::ResolvedPath { did, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { did, .. }, .. } => { dids.insert(did); } ref t => { let did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); if let Some(did) = did { dids.insert(did); } } } if let Some(generics) = i.trait_.as_ref().and_then(|t| t.generics()) { for bound in generics { if let Some(did) = bound.def_id() { dids.insert(did); } } } } else { unreachable!() }; let impl_item = Impl { impl_item: item, }; if impl_item.trait_did().map_or(true, |d| self.traits.contains_key(&d)) { for did in dids { self.impls.entry(did).or_insert(vec![]).push(impl_item.clone()); } } else { let trait_did = impl_item.trait_did().unwrap(); self.orphan_trait_impls.push((trait_did, dids, impl_item)); } None } else { Some(item) } }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.stripped_mod = orig_stripped_mod; self.parent_is_trait_impl = orig_parent_is_trait_impl; ret } } impl Cache { fn add_aliases(&mut self, item: &clean::Item) { if item.def_id.index == CRATE_DEF_INDEX { return } if let Some(ref item_name) = item.name { let path = self.paths.get(&item.def_id) .map(|p| p.0[..p.0.len() - 1].join("::")) .unwrap_or("std".to_owned()); for alias in item.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::alias)) .filter_map(|a| a.value_str() .map(|s| s.to_string().replace("\"", ""))) .filter(|v| !v.is_empty()) .collect::<FxHashSet<_>>() .into_iter() { self.aliases.entry(alias) .or_insert(Vec::with_capacity(1)) .push(IndexItem { ty: item.type_(), name: item_name.to_string(), path: path.clone(), desc: shorten(plain_summary_line(item.doc_value())), parent: None, parent_idx: None, search_type: get_index_search_type(&item), }); } } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, extern_url: Option<&str>, dst: &Path) -> ExternalLocation { use ExternalLocation::*; // See if there's documentation generated into the local directory let local_location = dst.join(&e.name); if local_location.is_dir() { return Local; } if let Some(url) = extern_url { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/'); } return Remote(url); } // Failing that, see if there's an attribute specifying where to find this // external crate e.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::html_root_url)) .filter_map(|a| a.value_str()) .map(|url| { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/') } Remote(url) }).next().unwrap_or(Unknown) // Well, at least we tried. } /// Builds the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { let mut nodeid_to_pathid = FxHashMap::default(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::<Json>::new(); let Cache { ref mut search_index, ref orphan_impl_items, ref paths, .. } = *cache; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { ty: item.type_(), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent: Some(did), parent_idx: None, search_type: get_index_search_type(&item), }); } } // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.map(|nodeid| { if nodeid_to_pathid.contains_key(&nodeid) { *nodeid_to_pathid.get(&nodeid).unwrap() } else { let pathid = lastpathid; nodeid_to_pathid.insert(nodeid, pathid); lastpathid += 1; let &(ref fqp, short) = paths.get(&nodeid).unwrap(); crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); pathid } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(item.to_json()); } let crate_doc = krate.module.as_ref().map(|module| { shorten(plain_summary_line(module.doc_value())) }).unwrap_or(String::new()); let mut crate_data = BTreeMap::new(); crate_data.insert("doc".to_owned(), Json::String(crate_doc)); crate_data.insert("i".to_owned(), Json::Array(crate_items)); crate_data.insert("p".to_owned(), Json::Array(crate_paths)); // Collect the index into a string format!("searchIndex[{}] = {};", as_json(&krate.name), Json::Object(crate_data)) } fn get_index_search_type(item: &clean::Item) -> Option<IndexItemFunctionType> { let (all_types, ret_types) = match item.inner { clean::FunctionItem(ref f) => (&f.all_types, &f.ret_types), clean::MethodItem(ref m) => (&m.all_types, &m.ret_types), clean::TyMethodItem(ref m) => (&m.all_types, &m.ret_types), _ => return None, }; let inputs = all_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect(); let output = ret_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect::<Vec<_>>(); let output = if output.is_empty() { None } else { Some(output) }; Some(IndexItemFunctionType { inputs, output }) } fn get_index_type(clean_type: &clean::Type) -> Type { let t = Type { name: get_index_type_name(clean_type, true).map(|s| s.to_ascii_lowercase()), generics: get_generics(clean_type), }; t } fn get_index_type_name(clean_type: &clean::Type, accept_generic: bool) -> Option<String>
fn get_generics(clean_type: &clean::Type) -> Option<Vec<String>> { clean_type.generics() .and_then(|types| { let r = types.iter() .filter_map(|t| get_index_type_name(t, false)) .map(|s| s.to_ascii_lowercase()) .collect::<Vec<_>>(); if r.is_empty() { None } else { Some(r) } }) }
{ match *clean_type { clean::ResolvedPath { ref path, .. } => { let segments = &path.segments; let path_segment = segments.into_iter().last().unwrap_or_else(|| panic!( "get_index_type_name(clean_type: {:?}, accept_generic: {:?}) had length zero path", clean_type, accept_generic )); Some(path_segment.name.clone()) } clean::Generic(ref s) if accept_generic => Some(s.clone()), clean::Primitive(ref p) => Some(format!("{:?}", p)), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_, accept_generic), // FIXME: add all from clean::Type. _ => None } }
identifier_body
cache.rs
use crate::clean::{self, GetDefId, AttributesExt}; use crate::fold::DocFolder; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use std::mem; use std::path::{Path, PathBuf}; use std::collections::BTreeMap; use syntax::source_map::FileName; use syntax::symbol::sym; use serialize::json::{ToJson, Json, as_json}; use super::{ItemType, IndexItem, IndexItemFunctionType, Impl, shorten, plain_summary_line}; use super::{Type, RenderInfo}; /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering threads. #[derive(Default)] crate struct Cache { /// Maps a type ID to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: FxHashMap<DefId, Vec<Impl>>, /// Maintains a mapping of local crate `NodeId`s to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. pub external_paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Maps local `DefId`s of exported types to fully qualified paths. /// Unlike 'paths', this mapping ignores any renames that occur /// due to 'use' statements. /// /// This map is used when writing out the special 'implementors' /// javascript file. By using the exact path that the type /// is declared with, we ensure that each path will be identical /// to the path used if the corresponding type is inlined. By /// doing this, we can detect duplicate impls on a trait page, and only display /// the impl for the inlined type. pub exact_paths: FxHashMap<DefId, Vec<String>>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: FxHashMap<DefId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: FxHashMap<DefId, Vec<Impl>>, /// Cache of where external crate documentation can be found. pub extern_locations: FxHashMap<CrateNum, (String, PathBuf, ExternalLocation)>, /// Cache of where documentation for primitives can be found. pub primitive_locations: FxHashMap<clean::PrimitiveType, DefId>, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from the privacy check pass. pub access_levels: AccessLevels<DefId>, /// The version of the crate being documented, if given from the `--crate-version` flag. pub crate_version: Option<String>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<String>, parent_stack: Vec<DefId>, parent_is_trait_impl: bool, search_index: Vec<IndexItem>, stripped_mod: bool, pub deref_trait_did: Option<DefId>, pub deref_mut_trait_did: Option<DefId>, pub owned_box_did: Option<DefId>, masked_crates: FxHashSet<CrateNum>, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_impl_items: Vec<(DefId, clean::Item)>, // Similarly to `orphan_impl_items`, sometimes trait impls are picked up // even though the trait itself is not exported. This can happen if a trait // was defined in function/expression scope, since the impl will be picked // up by `collect-trait-impls` but the trait won't be scraped out in the HIR // crawl. In order to prevent crashes when looking for spotlight traits or // when gathering trait documentation on a type, hold impls here while // folding and add them to the cache later on if we find the trait. orphan_trait_impls: Vec<(DefId, FxHashSet<DefId>, Impl)>, /// Aliases added through `#[doc(alias = "...")]`. Since a few items can have the same alias, /// we need the alias element to have an array of items. pub(super) aliases: FxHashMap<String, Vec<IndexItem>>, } impl Cache { pub fn from_krate( renderinfo: RenderInfo, extern_html_root_urls: &BTreeMap<String, String>, dst: &Path, mut krate: clean::Crate, ) -> (clean::Crate, String, Cache) { // Crawl the crate to build various caches used for the output let RenderInfo { inlined: _, external_paths, exact_paths, access_levels, deref_trait_did, deref_mut_trait_did, owned_box_did, } = renderinfo; let external_paths = external_paths.into_iter() .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) .collect(); let mut cache = Cache { impls: Default::default(), external_paths, exact_paths, paths: Default::default(), implementors: Default::default(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), parent_is_trait_impl: false, extern_locations: Default::default(), primitive_locations: Default::default(), stripped_mod: false, access_levels, crate_version: krate.version.take(), orphan_impl_items: Vec::new(), orphan_trait_impls: Vec::new(), traits: krate.external_traits.replace(Default::default()), deref_trait_did, deref_mut_trait_did, owned_box_did, masked_crates: mem::take(&mut krate.masked_crates), aliases: Default::default(), }; // Cache where all our extern crates are located for &(n, ref e) in &krate.externs { let src_root = match e.src { FileName::Real(ref p) => match p.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }, _ => PathBuf::new(), };
cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. // // Favor linking to as local extern as possible, so iterate all crates in // reverse topological order. for &(_, ref e) in krate.externs.iter().rev() { for &(def_id, prim, _) in &e.primitives { cache.primitive_locations.insert(prim, def_id); } } for &(def_id, prim, _) in &krate.primitives { cache.primitive_locations.insert(prim, def_id); } cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); for (trait_did, dids, impl_) in cache.orphan_trait_impls.drain(..) { if cache.traits.contains_key(&trait_did) { for did in dids { cache.impls.entry(did).or_insert(vec![]).push(impl_.clone()); } } } // Build our search index let index = build_index(&krate, &mut cache); (krate, index, cache) } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { if item.def_id.is_local() { debug!("folding {} \"{:?}\", id {:?}", item.type_(), item.name, item.def_id); } // If this is a stripped module, // we don't want it or its children in the search index. let orig_stripped_mod = match item.inner { clean::StrippedItem(box clean::ModuleItem(..)) => { mem::replace(&mut self.stripped_mod, true) } _ => self.stripped_mod, }; // If the impl is from a masked crate or references something from a // masked crate then remove it completely. if let clean::ImplItem(ref i) = item.inner { if self.masked_crates.contains(&item.def_id.krate) || i.trait_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) || i.for_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) { return None; } } // Propagate a trait method's documentation to all implementors of the // trait. if let clean::TraitItem(ref t) = item.inner { self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { if let Some(did) = i.trait_.def_id() { if i.blanket_impl.is_none() { self.implementors.entry(did).or_default().push(Impl { impl_item: item.clone(), }); } } } // Index this method for searching later on. if let Some(ref s) = item.name { let (parent, is_inherent_impl_item) = match item.inner { clean::StrippedItem(..) => ((None, None), false), clean::AssocConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => { // skip associated items in trait impls ((None, None), false) } clean::AssocTypeItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { ((Some(*self.parent_stack.last().unwrap()), Some(&self.stack[..self.stack.len() - 1])), false) } clean::MethodItem(..) | clean::AssocConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), None => None }; ((Some(*last), path), true) } } _ => ((None, Some(&*self.stack)), false) }; match parent { (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, // which should not be indexed. The crate-item itself is // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { ty: item.type_(), name: s.to_string(), path: path.join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent, parent_idx: None, search_type: get_index_search_type(&item), }); } } (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. let pushed = match item.name { Some(ref n) if !n.is_empty() => { self.stack.push(n.to_string()); true } _ => false, }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | clean::ConstantItem(..) | clean::StaticItem(..) | clean::UnionItem(..) | clean::ForeignTypeItem | clean::MacroItem(..) | clean::ProcMacroItem(..) if !self.stripped_mod => { // Re-exported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a re-exported item doesn't show up in the // `public_items` map, so we can skip inserting into the // paths map if there was already an entry present and we're // not a public item. if !self.paths.contains_key(&item.def_id) || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } self.add_aliases(&item); } // Link variants to their parent enum because pages aren't emitted // for each variant. clean::VariantItem(..) if !self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) => { self.add_aliases(&item); self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } _ => {} } // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::ForeignTypeItem | clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did, .. } => { self.parent_stack.push(did); true } ref t => { let prim_did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); match prim_did { Some(did) => { self.parent_stack.push(did); true } None => false, } } } } _ => false }; // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { if let clean::Item { inner: clean::ImplItem(_), .. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let mut dids = FxHashSet::default(); if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { match i.for_ { clean::ResolvedPath { did, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { did, .. }, .. } => { dids.insert(did); } ref t => { let did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); if let Some(did) = did { dids.insert(did); } } } if let Some(generics) = i.trait_.as_ref().and_then(|t| t.generics()) { for bound in generics { if let Some(did) = bound.def_id() { dids.insert(did); } } } } else { unreachable!() }; let impl_item = Impl { impl_item: item, }; if impl_item.trait_did().map_or(true, |d| self.traits.contains_key(&d)) { for did in dids { self.impls.entry(did).or_insert(vec![]).push(impl_item.clone()); } } else { let trait_did = impl_item.trait_did().unwrap(); self.orphan_trait_impls.push((trait_did, dids, impl_item)); } None } else { Some(item) } }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.stripped_mod = orig_stripped_mod; self.parent_is_trait_impl = orig_parent_is_trait_impl; ret } } impl Cache { fn add_aliases(&mut self, item: &clean::Item) { if item.def_id.index == CRATE_DEF_INDEX { return } if let Some(ref item_name) = item.name { let path = self.paths.get(&item.def_id) .map(|p| p.0[..p.0.len() - 1].join("::")) .unwrap_or("std".to_owned()); for alias in item.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::alias)) .filter_map(|a| a.value_str() .map(|s| s.to_string().replace("\"", ""))) .filter(|v| !v.is_empty()) .collect::<FxHashSet<_>>() .into_iter() { self.aliases.entry(alias) .or_insert(Vec::with_capacity(1)) .push(IndexItem { ty: item.type_(), name: item_name.to_string(), path: path.clone(), desc: shorten(plain_summary_line(item.doc_value())), parent: None, parent_idx: None, search_type: get_index_search_type(&item), }); } } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, extern_url: Option<&str>, dst: &Path) -> ExternalLocation { use ExternalLocation::*; // See if there's documentation generated into the local directory let local_location = dst.join(&e.name); if local_location.is_dir() { return Local; } if let Some(url) = extern_url { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/'); } return Remote(url); } // Failing that, see if there's an attribute specifying where to find this // external crate e.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::html_root_url)) .filter_map(|a| a.value_str()) .map(|url| { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/') } Remote(url) }).next().unwrap_or(Unknown) // Well, at least we tried. } /// Builds the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { let mut nodeid_to_pathid = FxHashMap::default(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::<Json>::new(); let Cache { ref mut search_index, ref orphan_impl_items, ref paths, .. } = *cache; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { ty: item.type_(), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent: Some(did), parent_idx: None, search_type: get_index_search_type(&item), }); } } // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.map(|nodeid| { if nodeid_to_pathid.contains_key(&nodeid) { *nodeid_to_pathid.get(&nodeid).unwrap() } else { let pathid = lastpathid; nodeid_to_pathid.insert(nodeid, pathid); lastpathid += 1; let &(ref fqp, short) = paths.get(&nodeid).unwrap(); crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); pathid } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(item.to_json()); } let crate_doc = krate.module.as_ref().map(|module| { shorten(plain_summary_line(module.doc_value())) }).unwrap_or(String::new()); let mut crate_data = BTreeMap::new(); crate_data.insert("doc".to_owned(), Json::String(crate_doc)); crate_data.insert("i".to_owned(), Json::Array(crate_items)); crate_data.insert("p".to_owned(), Json::Array(crate_paths)); // Collect the index into a string format!("searchIndex[{}] = {};", as_json(&krate.name), Json::Object(crate_data)) } fn get_index_search_type(item: &clean::Item) -> Option<IndexItemFunctionType> { let (all_types, ret_types) = match item.inner { clean::FunctionItem(ref f) => (&f.all_types, &f.ret_types), clean::MethodItem(ref m) => (&m.all_types, &m.ret_types), clean::TyMethodItem(ref m) => (&m.all_types, &m.ret_types), _ => return None, }; let inputs = all_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect(); let output = ret_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect::<Vec<_>>(); let output = if output.is_empty() { None } else { Some(output) }; Some(IndexItemFunctionType { inputs, output }) } fn get_index_type(clean_type: &clean::Type) -> Type { let t = Type { name: get_index_type_name(clean_type, true).map(|s| s.to_ascii_lowercase()), generics: get_generics(clean_type), }; t } fn get_index_type_name(clean_type: &clean::Type, accept_generic: bool) -> Option<String> { match *clean_type { clean::ResolvedPath { ref path, .. } => { let segments = &path.segments; let path_segment = segments.into_iter().last().unwrap_or_else(|| panic!( "get_index_type_name(clean_type: {:?}, accept_generic: {:?}) had length zero path", clean_type, accept_generic )); Some(path_segment.name.clone()) } clean::Generic(ref s) if accept_generic => Some(s.clone()), clean::Primitive(ref p) => Some(format!("{:?}", p)), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_, accept_generic), // FIXME: add all from clean::Type. _ => None } } fn get_generics(clean_type: &clean::Type) -> Option<Vec<String>> { clean_type.generics() .and_then(|types| { let r = types.iter() .filter_map(|t| get_index_type_name(t, false)) .map(|s| s.to_ascii_lowercase()) .collect::<Vec<_>>(); if r.is_empty() { None } else { Some(r) } }) }
let extern_url = extern_html_root_urls.get(&e.name).map(|u| &**u); cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, extern_url, &dst))); let did = DefId { krate: n, index: CRATE_DEF_INDEX };
random_line_split
cache.rs
use crate::clean::{self, GetDefId, AttributesExt}; use crate::fold::DocFolder; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use std::mem; use std::path::{Path, PathBuf}; use std::collections::BTreeMap; use syntax::source_map::FileName; use syntax::symbol::sym; use serialize::json::{ToJson, Json, as_json}; use super::{ItemType, IndexItem, IndexItemFunctionType, Impl, shorten, plain_summary_line}; use super::{Type, RenderInfo}; /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering threads. #[derive(Default)] crate struct Cache { /// Maps a type ID to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: FxHashMap<DefId, Vec<Impl>>, /// Maintains a mapping of local crate `NodeId`s to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. pub external_paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Maps local `DefId`s of exported types to fully qualified paths. /// Unlike 'paths', this mapping ignores any renames that occur /// due to 'use' statements. /// /// This map is used when writing out the special 'implementors' /// javascript file. By using the exact path that the type /// is declared with, we ensure that each path will be identical /// to the path used if the corresponding type is inlined. By /// doing this, we can detect duplicate impls on a trait page, and only display /// the impl for the inlined type. pub exact_paths: FxHashMap<DefId, Vec<String>>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: FxHashMap<DefId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: FxHashMap<DefId, Vec<Impl>>, /// Cache of where external crate documentation can be found. pub extern_locations: FxHashMap<CrateNum, (String, PathBuf, ExternalLocation)>, /// Cache of where documentation for primitives can be found. pub primitive_locations: FxHashMap<clean::PrimitiveType, DefId>, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from the privacy check pass. pub access_levels: AccessLevels<DefId>, /// The version of the crate being documented, if given from the `--crate-version` flag. pub crate_version: Option<String>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<String>, parent_stack: Vec<DefId>, parent_is_trait_impl: bool, search_index: Vec<IndexItem>, stripped_mod: bool, pub deref_trait_did: Option<DefId>, pub deref_mut_trait_did: Option<DefId>, pub owned_box_did: Option<DefId>, masked_crates: FxHashSet<CrateNum>, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_impl_items: Vec<(DefId, clean::Item)>, // Similarly to `orphan_impl_items`, sometimes trait impls are picked up // even though the trait itself is not exported. This can happen if a trait // was defined in function/expression scope, since the impl will be picked // up by `collect-trait-impls` but the trait won't be scraped out in the HIR // crawl. In order to prevent crashes when looking for spotlight traits or // when gathering trait documentation on a type, hold impls here while // folding and add them to the cache later on if we find the trait. orphan_trait_impls: Vec<(DefId, FxHashSet<DefId>, Impl)>, /// Aliases added through `#[doc(alias = "...")]`. Since a few items can have the same alias, /// we need the alias element to have an array of items. pub(super) aliases: FxHashMap<String, Vec<IndexItem>>, } impl Cache { pub fn from_krate( renderinfo: RenderInfo, extern_html_root_urls: &BTreeMap<String, String>, dst: &Path, mut krate: clean::Crate, ) -> (clean::Crate, String, Cache) { // Crawl the crate to build various caches used for the output let RenderInfo { inlined: _, external_paths, exact_paths, access_levels, deref_trait_did, deref_mut_trait_did, owned_box_did, } = renderinfo; let external_paths = external_paths.into_iter() .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) .collect(); let mut cache = Cache { impls: Default::default(), external_paths, exact_paths, paths: Default::default(), implementors: Default::default(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), parent_is_trait_impl: false, extern_locations: Default::default(), primitive_locations: Default::default(), stripped_mod: false, access_levels, crate_version: krate.version.take(), orphan_impl_items: Vec::new(), orphan_trait_impls: Vec::new(), traits: krate.external_traits.replace(Default::default()), deref_trait_did, deref_mut_trait_did, owned_box_did, masked_crates: mem::take(&mut krate.masked_crates), aliases: Default::default(), }; // Cache where all our extern crates are located for &(n, ref e) in &krate.externs { let src_root = match e.src { FileName::Real(ref p) => match p.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }, _ => PathBuf::new(), }; let extern_url = extern_html_root_urls.get(&e.name).map(|u| &**u); cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, extern_url, &dst))); let did = DefId { krate: n, index: CRATE_DEF_INDEX }; cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. // // Favor linking to as local extern as possible, so iterate all crates in // reverse topological order. for &(_, ref e) in krate.externs.iter().rev() { for &(def_id, prim, _) in &e.primitives { cache.primitive_locations.insert(prim, def_id); } } for &(def_id, prim, _) in &krate.primitives { cache.primitive_locations.insert(prim, def_id); } cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); for (trait_did, dids, impl_) in cache.orphan_trait_impls.drain(..) { if cache.traits.contains_key(&trait_did) { for did in dids { cache.impls.entry(did).or_insert(vec![]).push(impl_.clone()); } } } // Build our search index let index = build_index(&krate, &mut cache); (krate, index, cache) } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { if item.def_id.is_local() { debug!("folding {} \"{:?}\", id {:?}", item.type_(), item.name, item.def_id); } // If this is a stripped module, // we don't want it or its children in the search index. let orig_stripped_mod = match item.inner { clean::StrippedItem(box clean::ModuleItem(..)) => { mem::replace(&mut self.stripped_mod, true) } _ => self.stripped_mod, }; // If the impl is from a masked crate or references something from a // masked crate then remove it completely. if let clean::ImplItem(ref i) = item.inner { if self.masked_crates.contains(&item.def_id.krate) || i.trait_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) || i.for_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) { return None; } } // Propagate a trait method's documentation to all implementors of the // trait. if let clean::TraitItem(ref t) = item.inner { self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { if let Some(did) = i.trait_.def_id() { if i.blanket_impl.is_none() { self.implementors.entry(did).or_default().push(Impl { impl_item: item.clone(), }); } } } // Index this method for searching later on. if let Some(ref s) = item.name { let (parent, is_inherent_impl_item) = match item.inner { clean::StrippedItem(..) => ((None, None), false), clean::AssocConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => { // skip associated items in trait impls ((None, None), false) } clean::AssocTypeItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { ((Some(*self.parent_stack.last().unwrap()), Some(&self.stack[..self.stack.len() - 1])), false) } clean::MethodItem(..) | clean::AssocConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), None => None }; ((Some(*last), path), true) } } _ => ((None, Some(&*self.stack)), false) }; match parent { (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, // which should not be indexed. The crate-item itself is // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { ty: item.type_(), name: s.to_string(), path: path.join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent, parent_idx: None, search_type: get_index_search_type(&item), }); } } (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. let pushed = match item.name { Some(ref n) if !n.is_empty() => { self.stack.push(n.to_string()); true } _ => false, }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | clean::ConstantItem(..) | clean::StaticItem(..) | clean::UnionItem(..) | clean::ForeignTypeItem | clean::MacroItem(..) | clean::ProcMacroItem(..) if !self.stripped_mod => { // Re-exported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a re-exported item doesn't show up in the // `public_items` map, so we can skip inserting into the // paths map if there was already an entry present and we're // not a public item. if !self.paths.contains_key(&item.def_id) || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } self.add_aliases(&item); } // Link variants to their parent enum because pages aren't emitted // for each variant. clean::VariantItem(..) if !self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) => { self.add_aliases(&item); self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } _ => {} } // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::ForeignTypeItem | clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did, .. } => { self.parent_stack.push(did); true } ref t => { let prim_did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); match prim_did { Some(did) => { self.parent_stack.push(did); true } None => false, } } } } _ => false }; // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { if let clean::Item { inner: clean::ImplItem(_), .. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let mut dids = FxHashSet::default(); if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { match i.for_ { clean::ResolvedPath { did, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { did, .. }, .. } => { dids.insert(did); } ref t => { let did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); if let Some(did) = did { dids.insert(did); } } } if let Some(generics) = i.trait_.as_ref().and_then(|t| t.generics()) { for bound in generics { if let Some(did) = bound.def_id() { dids.insert(did); } } } } else { unreachable!() }; let impl_item = Impl { impl_item: item, }; if impl_item.trait_did().map_or(true, |d| self.traits.contains_key(&d)) { for did in dids { self.impls.entry(did).or_insert(vec![]).push(impl_item.clone()); } } else { let trait_did = impl_item.trait_did().unwrap(); self.orphan_trait_impls.push((trait_did, dids, impl_item)); } None } else { Some(item) } }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.stripped_mod = orig_stripped_mod; self.parent_is_trait_impl = orig_parent_is_trait_impl; ret } } impl Cache { fn add_aliases(&mut self, item: &clean::Item) { if item.def_id.index == CRATE_DEF_INDEX { return } if let Some(ref item_name) = item.name { let path = self.paths.get(&item.def_id) .map(|p| p.0[..p.0.len() - 1].join("::")) .unwrap_or("std".to_owned()); for alias in item.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::alias)) .filter_map(|a| a.value_str() .map(|s| s.to_string().replace("\"", ""))) .filter(|v| !v.is_empty()) .collect::<FxHashSet<_>>() .into_iter() { self.aliases.entry(alias) .or_insert(Vec::with_capacity(1)) .push(IndexItem { ty: item.type_(), name: item_name.to_string(), path: path.clone(), desc: shorten(plain_summary_line(item.doc_value())), parent: None, parent_idx: None, search_type: get_index_search_type(&item), }); } } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, extern_url: Option<&str>, dst: &Path) -> ExternalLocation { use ExternalLocation::*; // See if there's documentation generated into the local directory let local_location = dst.join(&e.name); if local_location.is_dir() { return Local; } if let Some(url) = extern_url { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/'); } return Remote(url); } // Failing that, see if there's an attribute specifying where to find this // external crate e.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::html_root_url)) .filter_map(|a| a.value_str()) .map(|url| { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/') } Remote(url) }).next().unwrap_or(Unknown) // Well, at least we tried. } /// Builds the search index from the collected metadata fn
(krate: &clean::Crate, cache: &mut Cache) -> String { let mut nodeid_to_pathid = FxHashMap::default(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::<Json>::new(); let Cache { ref mut search_index, ref orphan_impl_items, ref paths, .. } = *cache; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { ty: item.type_(), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent: Some(did), parent_idx: None, search_type: get_index_search_type(&item), }); } } // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.map(|nodeid| { if nodeid_to_pathid.contains_key(&nodeid) { *nodeid_to_pathid.get(&nodeid).unwrap() } else { let pathid = lastpathid; nodeid_to_pathid.insert(nodeid, pathid); lastpathid += 1; let &(ref fqp, short) = paths.get(&nodeid).unwrap(); crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); pathid } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(item.to_json()); } let crate_doc = krate.module.as_ref().map(|module| { shorten(plain_summary_line(module.doc_value())) }).unwrap_or(String::new()); let mut crate_data = BTreeMap::new(); crate_data.insert("doc".to_owned(), Json::String(crate_doc)); crate_data.insert("i".to_owned(), Json::Array(crate_items)); crate_data.insert("p".to_owned(), Json::Array(crate_paths)); // Collect the index into a string format!("searchIndex[{}] = {};", as_json(&krate.name), Json::Object(crate_data)) } fn get_index_search_type(item: &clean::Item) -> Option<IndexItemFunctionType> { let (all_types, ret_types) = match item.inner { clean::FunctionItem(ref f) => (&f.all_types, &f.ret_types), clean::MethodItem(ref m) => (&m.all_types, &m.ret_types), clean::TyMethodItem(ref m) => (&m.all_types, &m.ret_types), _ => return None, }; let inputs = all_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect(); let output = ret_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect::<Vec<_>>(); let output = if output.is_empty() { None } else { Some(output) }; Some(IndexItemFunctionType { inputs, output }) } fn get_index_type(clean_type: &clean::Type) -> Type { let t = Type { name: get_index_type_name(clean_type, true).map(|s| s.to_ascii_lowercase()), generics: get_generics(clean_type), }; t } fn get_index_type_name(clean_type: &clean::Type, accept_generic: bool) -> Option<String> { match *clean_type { clean::ResolvedPath { ref path, .. } => { let segments = &path.segments; let path_segment = segments.into_iter().last().unwrap_or_else(|| panic!( "get_index_type_name(clean_type: {:?}, accept_generic: {:?}) had length zero path", clean_type, accept_generic )); Some(path_segment.name.clone()) } clean::Generic(ref s) if accept_generic => Some(s.clone()), clean::Primitive(ref p) => Some(format!("{:?}", p)), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_, accept_generic), // FIXME: add all from clean::Type. _ => None } } fn get_generics(clean_type: &clean::Type) -> Option<Vec<String>> { clean_type.generics() .and_then(|types| { let r = types.iter() .filter_map(|t| get_index_type_name(t, false)) .map(|s| s.to_ascii_lowercase()) .collect::<Vec<_>>(); if r.is_empty() { None } else { Some(r) } }) }
build_index
identifier_name
cache.rs
use crate::clean::{self, GetDefId, AttributesExt}; use crate::fold::DocFolder; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use std::mem; use std::path::{Path, PathBuf}; use std::collections::BTreeMap; use syntax::source_map::FileName; use syntax::symbol::sym; use serialize::json::{ToJson, Json, as_json}; use super::{ItemType, IndexItem, IndexItemFunctionType, Impl, shorten, plain_summary_line}; use super::{Type, RenderInfo}; /// Indicates where an external crate can be found. pub enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// This cache is used to store information about the `clean::Crate` being /// rendered in order to provide more useful documentation. This contains /// information like all implementors of a trait, all traits a type implements, /// documentation for all known traits, etc. /// /// This structure purposefully does not implement `Clone` because it's intended /// to be a fairly large and expensive structure to clone. Instead this adheres /// to `Send` so it may be stored in a `Arc` instance and shared among the various /// rendering threads. #[derive(Default)] crate struct Cache { /// Maps a type ID to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print /// out extra documentation on the page of an enum/struct. /// /// The values of the map are a list of implementations and documentation /// found on that implementation. pub impls: FxHashMap<DefId, Vec<Impl>>, /// Maintains a mapping of local crate `NodeId`s to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. pub paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. pub external_paths: FxHashMap<DefId, (Vec<String>, ItemType)>, /// Maps local `DefId`s of exported types to fully qualified paths. /// Unlike 'paths', this mapping ignores any renames that occur /// due to 'use' statements. /// /// This map is used when writing out the special 'implementors' /// javascript file. By using the exact path that the type /// is declared with, we ensure that each path will be identical /// to the path used if the corresponding type is inlined. By /// doing this, we can detect duplicate impls on a trait page, and only display /// the impl for the inlined type. pub exact_paths: FxHashMap<DefId, Vec<String>>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. pub traits: FxHashMap<DefId, clean::Trait>, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait pub implementors: FxHashMap<DefId, Vec<Impl>>, /// Cache of where external crate documentation can be found. pub extern_locations: FxHashMap<CrateNum, (String, PathBuf, ExternalLocation)>, /// Cache of where documentation for primitives can be found. pub primitive_locations: FxHashMap<clean::PrimitiveType, DefId>, // Note that external items for which `doc(hidden)` applies to are shown as // non-reachable while local items aren't. This is because we're reusing // the access levels from the privacy check pass. pub access_levels: AccessLevels<DefId>, /// The version of the crate being documented, if given from the `--crate-version` flag. pub crate_version: Option<String>, // Private fields only used when initially crawling a crate to build a cache stack: Vec<String>, parent_stack: Vec<DefId>, parent_is_trait_impl: bool, search_index: Vec<IndexItem>, stripped_mod: bool, pub deref_trait_did: Option<DefId>, pub deref_mut_trait_did: Option<DefId>, pub owned_box_did: Option<DefId>, masked_crates: FxHashSet<CrateNum>, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. orphan_impl_items: Vec<(DefId, clean::Item)>, // Similarly to `orphan_impl_items`, sometimes trait impls are picked up // even though the trait itself is not exported. This can happen if a trait // was defined in function/expression scope, since the impl will be picked // up by `collect-trait-impls` but the trait won't be scraped out in the HIR // crawl. In order to prevent crashes when looking for spotlight traits or // when gathering trait documentation on a type, hold impls here while // folding and add them to the cache later on if we find the trait. orphan_trait_impls: Vec<(DefId, FxHashSet<DefId>, Impl)>, /// Aliases added through `#[doc(alias = "...")]`. Since a few items can have the same alias, /// we need the alias element to have an array of items. pub(super) aliases: FxHashMap<String, Vec<IndexItem>>, } impl Cache { pub fn from_krate( renderinfo: RenderInfo, extern_html_root_urls: &BTreeMap<String, String>, dst: &Path, mut krate: clean::Crate, ) -> (clean::Crate, String, Cache) { // Crawl the crate to build various caches used for the output let RenderInfo { inlined: _, external_paths, exact_paths, access_levels, deref_trait_did, deref_mut_trait_did, owned_box_did, } = renderinfo; let external_paths = external_paths.into_iter() .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) .collect(); let mut cache = Cache { impls: Default::default(), external_paths, exact_paths, paths: Default::default(), implementors: Default::default(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), parent_is_trait_impl: false, extern_locations: Default::default(), primitive_locations: Default::default(), stripped_mod: false, access_levels, crate_version: krate.version.take(), orphan_impl_items: Vec::new(), orphan_trait_impls: Vec::new(), traits: krate.external_traits.replace(Default::default()), deref_trait_did, deref_mut_trait_did, owned_box_did, masked_crates: mem::take(&mut krate.masked_crates), aliases: Default::default(), }; // Cache where all our extern crates are located for &(n, ref e) in &krate.externs { let src_root = match e.src { FileName::Real(ref p) => match p.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }, _ => PathBuf::new(), }; let extern_url = extern_html_root_urls.get(&e.name).map(|u| &**u); cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, extern_url, &dst))); let did = DefId { krate: n, index: CRATE_DEF_INDEX }; cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. // // Favor linking to as local extern as possible, so iterate all crates in // reverse topological order. for &(_, ref e) in krate.externs.iter().rev() { for &(def_id, prim, _) in &e.primitives { cache.primitive_locations.insert(prim, def_id); } } for &(def_id, prim, _) in &krate.primitives { cache.primitive_locations.insert(prim, def_id); } cache.stack.push(krate.name.clone()); krate = cache.fold_crate(krate); for (trait_did, dids, impl_) in cache.orphan_trait_impls.drain(..) { if cache.traits.contains_key(&trait_did) { for did in dids { cache.impls.entry(did).or_insert(vec![]).push(impl_.clone()); } } } // Build our search index let index = build_index(&krate, &mut cache); (krate, index, cache) } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> { if item.def_id.is_local() { debug!("folding {} \"{:?}\", id {:?}", item.type_(), item.name, item.def_id); } // If this is a stripped module, // we don't want it or its children in the search index. let orig_stripped_mod = match item.inner { clean::StrippedItem(box clean::ModuleItem(..)) => { mem::replace(&mut self.stripped_mod, true) } _ => self.stripped_mod, }; // If the impl is from a masked crate or references something from a // masked crate then remove it completely. if let clean::ImplItem(ref i) = item.inner { if self.masked_crates.contains(&item.def_id.krate) || i.trait_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) || i.for_.def_id().map_or(false, |d| self.masked_crates.contains(&d.krate)) { return None; } } // Propagate a trait method's documentation to all implementors of the // trait. if let clean::TraitItem(ref t) = item.inner { self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { if let Some(did) = i.trait_.def_id() { if i.blanket_impl.is_none() { self.implementors.entry(did).or_default().push(Impl { impl_item: item.clone(), }); } } } // Index this method for searching later on. if let Some(ref s) = item.name { let (parent, is_inherent_impl_item) = match item.inner { clean::StrippedItem(..) => ((None, None), false), clean::AssocConstItem(..) | clean::TypedefItem(_, true) if self.parent_is_trait_impl => { // skip associated items in trait impls ((None, None), false) } clean::AssocTypeItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { ((Some(*self.parent_stack.last().unwrap()), Some(&self.stack[..self.stack.len() - 1])), false) } clean::MethodItem(..) | clean::AssocConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), None => None }; ((Some(*last), path), true) } } _ => ((None, Some(&*self.stack)), false) }; match parent { (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, // which should not be indexed. The crate-item itself is // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { ty: item.type_(), name: s.to_string(), path: path.join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent, parent_idx: None, search_type: get_index_search_type(&item), }); } } (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. let pushed = match item.name { Some(ref n) if !n.is_empty() => { self.stack.push(n.to_string()); true } _ => false, }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | clean::ConstantItem(..) | clean::StaticItem(..) | clean::UnionItem(..) | clean::ForeignTypeItem | clean::MacroItem(..) | clean::ProcMacroItem(..) if !self.stripped_mod => { // Re-exported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a re-exported item doesn't show up in the // `public_items` map, so we can skip inserting into the // paths map if there was already an entry present and we're // not a public item. if !self.paths.contains_key(&item.def_id) || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } self.add_aliases(&item); } // Link variants to their parent enum because pages aren't emitted // for each variant. clean::VariantItem(..) if !self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) => { self.add_aliases(&item); self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } _ => {} } // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::ForeignTypeItem | clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did, .. } => { self.parent_stack.push(did); true } ref t => { let prim_did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); match prim_did { Some(did) => { self.parent_stack.push(did); true } None => false, } } } } _ => false }; // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { if let clean::Item { inner: clean::ImplItem(_), .. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let mut dids = FxHashSet::default(); if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { match i.for_ { clean::ResolvedPath { did, .. } | clean::BorrowedRef { type_: box clean::ResolvedPath { did, .. }, .. } => { dids.insert(did); } ref t => { let did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); if let Some(did) = did { dids.insert(did); } } } if let Some(generics) = i.trait_.as_ref().and_then(|t| t.generics()) { for bound in generics { if let Some(did) = bound.def_id() { dids.insert(did); } } } } else { unreachable!() }; let impl_item = Impl { impl_item: item, }; if impl_item.trait_did().map_or(true, |d| self.traits.contains_key(&d)) { for did in dids { self.impls.entry(did).or_insert(vec![]).push(impl_item.clone()); } } else { let trait_did = impl_item.trait_did().unwrap(); self.orphan_trait_impls.push((trait_did, dids, impl_item)); } None } else { Some(item) } }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.stripped_mod = orig_stripped_mod; self.parent_is_trait_impl = orig_parent_is_trait_impl; ret } } impl Cache { fn add_aliases(&mut self, item: &clean::Item) { if item.def_id.index == CRATE_DEF_INDEX { return } if let Some(ref item_name) = item.name { let path = self.paths.get(&item.def_id) .map(|p| p.0[..p.0.len() - 1].join("::")) .unwrap_or("std".to_owned()); for alias in item.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::alias)) .filter_map(|a| a.value_str() .map(|s| s.to_string().replace("\"", ""))) .filter(|v| !v.is_empty()) .collect::<FxHashSet<_>>() .into_iter() { self.aliases.entry(alias) .or_insert(Vec::with_capacity(1)) .push(IndexItem { ty: item.type_(), name: item_name.to_string(), path: path.clone(), desc: shorten(plain_summary_line(item.doc_value())), parent: None, parent_idx: None, search_type: get_index_search_type(&item), }); } } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, extern_url: Option<&str>, dst: &Path) -> ExternalLocation { use ExternalLocation::*; // See if there's documentation generated into the local directory let local_location = dst.join(&e.name); if local_location.is_dir() { return Local; } if let Some(url) = extern_url { let mut url = url.to_string(); if !url.ends_with("/") { url.push('/'); } return Remote(url); } // Failing that, see if there's an attribute specifying where to find this // external crate e.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::html_root_url)) .filter_map(|a| a.value_str()) .map(|url| { let mut url = url.to_string(); if !url.ends_with("/")
Remote(url) }).next().unwrap_or(Unknown) // Well, at least we tried. } /// Builds the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { let mut nodeid_to_pathid = FxHashMap::default(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::<Json>::new(); let Cache { ref mut search_index, ref orphan_impl_items, ref paths, .. } = *cache; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { ty: item.type_(), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent: Some(did), parent_idx: None, search_type: get_index_search_type(&item), }); } } // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.map(|nodeid| { if nodeid_to_pathid.contains_key(&nodeid) { *nodeid_to_pathid.get(&nodeid).unwrap() } else { let pathid = lastpathid; nodeid_to_pathid.insert(nodeid, pathid); lastpathid += 1; let &(ref fqp, short) = paths.get(&nodeid).unwrap(); crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); pathid } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(item.to_json()); } let crate_doc = krate.module.as_ref().map(|module| { shorten(plain_summary_line(module.doc_value())) }).unwrap_or(String::new()); let mut crate_data = BTreeMap::new(); crate_data.insert("doc".to_owned(), Json::String(crate_doc)); crate_data.insert("i".to_owned(), Json::Array(crate_items)); crate_data.insert("p".to_owned(), Json::Array(crate_paths)); // Collect the index into a string format!("searchIndex[{}] = {};", as_json(&krate.name), Json::Object(crate_data)) } fn get_index_search_type(item: &clean::Item) -> Option<IndexItemFunctionType> { let (all_types, ret_types) = match item.inner { clean::FunctionItem(ref f) => (&f.all_types, &f.ret_types), clean::MethodItem(ref m) => (&m.all_types, &m.ret_types), clean::TyMethodItem(ref m) => (&m.all_types, &m.ret_types), _ => return None, }; let inputs = all_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect(); let output = ret_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect::<Vec<_>>(); let output = if output.is_empty() { None } else { Some(output) }; Some(IndexItemFunctionType { inputs, output }) } fn get_index_type(clean_type: &clean::Type) -> Type { let t = Type { name: get_index_type_name(clean_type, true).map(|s| s.to_ascii_lowercase()), generics: get_generics(clean_type), }; t } fn get_index_type_name(clean_type: &clean::Type, accept_generic: bool) -> Option<String> { match *clean_type { clean::ResolvedPath { ref path, .. } => { let segments = &path.segments; let path_segment = segments.into_iter().last().unwrap_or_else(|| panic!( "get_index_type_name(clean_type: {:?}, accept_generic: {:?}) had length zero path", clean_type, accept_generic )); Some(path_segment.name.clone()) } clean::Generic(ref s) if accept_generic => Some(s.clone()), clean::Primitive(ref p) => Some(format!("{:?}", p)), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_, accept_generic), // FIXME: add all from clean::Type. _ => None } } fn get_generics(clean_type: &clean::Type) -> Option<Vec<String>> { clean_type.generics() .and_then(|types| { let r = types.iter() .filter_map(|t| get_index_type_name(t, false)) .map(|s| s.to_ascii_lowercase()) .collect::<Vec<_>>(); if r.is_empty() { None } else { Some(r) } }) }
{ url.push('/') }
conditional_block
l2fib_evpn_ipmac_info.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: l2fib_evpn_ipmac_info.proto package cisco_ios_xr_l2vpn_oper_l2vpn_forwarding_nodes_node_l2fib_evpn_ip6macs_l2fib_evpn_ip6mac import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type L2FibEvpnIpmacInfo_KEYS struct { NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` Bdid uint32 `protobuf:"varint,2,opt,name=bdid,proto3" json:"bdid,omitempty"` IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` IsLocal bool `protobuf:"varint,4,opt,name=is_local,json=isLocal,proto3" json:"is_local,omitempty"` MacAddress string `protobuf:"bytes,5,opt,name=mac_address,json=macAddress,proto3" json:"mac_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibEvpnIpmacInfo_KEYS) Reset() { *m = L2FibEvpnIpmacInfo_KEYS{} } func (m *L2FibEvpnIpmacInfo_KEYS) String() string { return proto.CompactTextString(m) } func (*L2FibEvpnIpmacInfo_KEYS) ProtoMessage() {} func (*L2FibEvpnIpmacInfo_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{0} } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Unmarshal(m, b) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Marshal(b, m, deterministic) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Merge(m, src) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Size() int { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Size(m) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_DiscardUnknown()
var xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS proto.InternalMessageInfo func (m *L2FibEvpnIpmacInfo_KEYS) GetNodeId() string { if m != nil { return m.NodeId } return "" } func (m *L2FibEvpnIpmacInfo_KEYS) GetBdid() uint32 { if m != nil { return m.Bdid } return 0 } func (m *L2FibEvpnIpmacInfo_KEYS) GetIpAddress() string { if m != nil { return m.IpAddress } return "" } func (m *L2FibEvpnIpmacInfo_KEYS) GetIsLocal() bool { if m != nil { return m.IsLocal } return false } func (m *L2FibEvpnIpmacInfo_KEYS) GetMacAddress() string { if m != nil { return m.MacAddress } return "" } type L2FibIpAddrT struct { AddrType string `protobuf:"bytes,1,opt,name=addr_type,json=addrType,proto3" json:"addr_type,omitempty"` Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibIpAddrT) Reset() { *m = L2FibIpAddrT{} } func (m *L2FibIpAddrT) String() string { return proto.CompactTextString(m) } func (*L2FibIpAddrT) ProtoMessage() {} func (*L2FibIpAddrT) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{1} } func (m *L2FibIpAddrT) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibIpAddrT.Unmarshal(m, b) } func (m *L2FibIpAddrT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibIpAddrT.Marshal(b, m, deterministic) } func (m *L2FibIpAddrT) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibIpAddrT.Merge(m, src) } func (m *L2FibIpAddrT) XXX_Size() int { return xxx_messageInfo_L2FibIpAddrT.Size(m) } func (m *L2FibIpAddrT) XXX_DiscardUnknown() { xxx_messageInfo_L2FibIpAddrT.DiscardUnknown(m) } var xxx_messageInfo_L2FibIpAddrT proto.InternalMessageInfo func (m *L2FibIpAddrT) GetAddrType() string { if m != nil { return m.AddrType } return "" } func (m *L2FibIpAddrT) GetIp() string { if m != nil { return m.Ip } return "" } type L2FibEvpnIpmacInfo struct { BdidXr uint32 `protobuf:"varint,50,opt,name=bdid_xr,json=bdidXr,proto3" json:"bdid_xr,omitempty"` MacAddressXr string `protobuf:"bytes,51,opt,name=mac_address_xr,json=macAddressXr,proto3" json:"mac_address_xr,omitempty"` IpAddressXr *L2FibIpAddrT `protobuf:"bytes,52,opt,name=ip_address_xr,json=ipAddressXr,proto3" json:"ip_address_xr,omitempty"` ArpNdSyncPending bool `protobuf:"varint,53,opt,name=arp_nd_sync_pending,json=arpNdSyncPending,proto3" json:"arp_nd_sync_pending,omitempty"` ArpNdProbePending bool `protobuf:"varint,54,opt,name=arp_nd_probe_pending,json=arpNdProbePending,proto3" json:"arp_nd_probe_pending,omitempty"` ArpNdDeletePending bool `protobuf:"varint,55,opt,name=arp_nd_delete_pending,json=arpNdDeletePending,proto3" json:"arp_nd_delete_pending,omitempty"` IsLocalXr bool `protobuf:"varint,56,opt,name=is_local_xr,json=isLocalXr,proto3" json:"is_local_xr,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibEvpnIpmacInfo) Reset() { *m = L2FibEvpnIpmacInfo{} } func (m *L2FibEvpnIpmacInfo) String() string { return proto.CompactTextString(m) } func (*L2FibEvpnIpmacInfo) ProtoMessage() {} func (*L2FibEvpnIpmacInfo) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{2} } func (m *L2FibEvpnIpmacInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibEvpnIpmacInfo.Unmarshal(m, b) } func (m *L2FibEvpnIpmacInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibEvpnIpmacInfo.Marshal(b, m, deterministic) } func (m *L2FibEvpnIpmacInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibEvpnIpmacInfo.Merge(m, src) } func (m *L2FibEvpnIpmacInfo) XXX_Size() int { return xxx_messageInfo_L2FibEvpnIpmacInfo.Size(m) } func (m *L2FibEvpnIpmacInfo) XXX_DiscardUnknown() { xxx_messageInfo_L2FibEvpnIpmacInfo.DiscardUnknown(m) } var xxx_messageInfo_L2FibEvpnIpmacInfo proto.InternalMessageInfo func (m *L2FibEvpnIpmacInfo) GetBdidXr() uint32 { if m != nil { return m.BdidXr } return 0 } func (m *L2FibEvpnIpmacInfo) GetMacAddressXr() string { if m != nil { return m.MacAddressXr } return "" } func (m *L2FibEvpnIpmacInfo) GetIpAddressXr() *L2FibIpAddrT { if m != nil { return m.IpAddressXr } return nil } func (m *L2FibEvpnIpmacInfo) GetArpNdSyncPending() bool { if m != nil { return m.ArpNdSyncPending } return false } func (m *L2FibEvpnIpmacInfo) GetArpNdProbePending() bool { if m != nil { return m.ArpNdProbePending } return false } func (m *L2FibEvpnIpmacInfo) GetArpNdDeletePending() bool { if m != nil { return m.ArpNdDeletePending } return false } func (m *L2FibEvpnIpmacInfo) GetIsLocalXr() bool { if m != nil { return m.IsLocalXr } return false } func init() { proto.RegisterType((*L2FibEvpnIpmacInfo_KEYS)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_evpn_ipmac_info_KEYS") proto.RegisterType((*L2FibIpAddrT)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_ip_addr_t") proto.RegisterType((*L2FibEvpnIpmacInfo)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_evpn_ipmac_info") } func init() { proto.RegisterFile("l2fib_evpn_ipmac_info.proto", fileDescriptor_359a6e06a2413ca9) } var fileDescriptor_359a6e06a2413ca9 = []byte{ // 413 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0xcb, 0xae, 0xd3, 0x30, 0x10, 0x55, 0x7a, 0x2f, 0x6d, 0x33, 0xa1, 0x05, 0x0c, 0x15, 0x81, 0x0a, 0x88, 0x2a, 0x16, 0xd9, 0x10, 0x44, 0x0a, 0x85, 0x15, 0x12, 0x12, 0x2c, 0x10, 0x08, 0x55, 0x29, 0x8b, 0xb0, 0xb2, 0x1c, 0xdb, 0x45, 0x96, 0x52, 0xdb, 0xb2, 0x2b, 0x68, 0x3e, 0x82, 0x0f, 0x60, 0xcf, 0x87, 0x22, 0x3b, 0x49, 0xcb, 0xa3, 0xdb, 0xbb, 0xb1, 0xe6, 0x71, 0xce, 0xcc, 0x99, 0x19, 0xc3, 0xbc, 0xce, 0xb7, 0xa2, 0xc2, 0xfc, 0x9b, 0x96, 0x58, 0xe8, 0x1d, 0xa1, 0x58, 0xc8, 0xad, 0xca, 0xb4, 0x51, 0x7b, 0x85, 0x4a, 0x2a, 0x2c, 0x55, 0x58, 0x28, 0x8b, 0x0f, 0x06, 0xd7, 0xb9, 0x03, 0x29, 0xcd, 0x4d, 0xd6, 0x9a, 0x5b, 0x65, 0xbe, 0x13, 0xc3, 0x84, 0xfc, 0x9a, 0x49, 0xc5, 0xb8, 0xf5, 0x6f, 0xf6, 0x57, 0xc1, 0xd5, 0x8e, 0x50, 0xfb, 0x7f, 0x68, 0xf1, 0x2b, 0x80, 0xfb, 0x67, 0x3b, 0xe3, 0x0f, 0xef, 0xbe, 0x6c, 0xd0, 0x5d, 0x18, 0xb9, 0x5a, 0x58, 0xb0, 0x38, 0x48, 0x82, 0x34, 0x2c, 0x86, 0xce, 0x7d, 0xcf, 0x10, 0x82, 0xcb, 0x8a, 0x09, 0x16, 0x0f, 0x92, 0x20, 0x9d, 0x14, 0xde, 0x46, 0x0f, 0x00, 0x84, 0xc6, 0x84, 0x31, 0xc3, 0xad, 0x8d, 0x2f, 0x3c, 0x3e, 0x14, 0xfa, 0x4d, 0x1b, 0x40, 0xf7, 0x60, 0x2c, 0x2c, 0xae, 0x15, 0x25, 0x75, 0x7c, 0x99, 0x04, 0xe9, 0xb8, 0x18, 0x09, 0xfb, 0xd1, 0xb9, 0xe8, 0x11, 0x44, 0xae, 0x6f, 0x4f, 0xbd, 0xe6, 0xa9, 0xb0, 0x23, 0xb4, 0xe3, 0x2e, 0x5e, 0xc3, 0x8d, 0x56, 0x65, 0xd7, 0x00, 0xef, 0xd1, 0x1c, 0xc2, 0xd6, 0x6a, 0x34, 0xef, 0xc4, 0x8d, 0x5d, 0xe0, 0x73, 0xa3, 0x39, 0x9a, 0xc2, 0x40, 0x68, 0x2f, 0x2e, 0x2c, 0x06, 0x42, 0x2f, 0x7e, 0x5e, 0xc0, 0xec, 0xec, 0x98, 0x6e, 0x42, 0x27, 0x1e, 0x1f, 0x4c, 0x9c, 0xfb, 0x59, 0x86, 0xce, 0x2d, 0x0d, 0x7a, 0x0c, 0xd3, 0x3f, 0x34, 0xb9, 0xfc, 0xd2, 0x97, 0xbb, 0x7e, 0x92, 0x55, 0x1a, 0xf4, 0x23, 0x80, 0xc9, 0x69, 0x68, 0x87, 0x7a, 0x9e, 0x04, 0x69, 0x94, 0x8b, 0xec, 0xaa, 0x4e, 0x96, 0xfd, 0xb3, 0x88, 0x22, 0x3a, 0xae, 0xb8, 0x34, 0xe8, 0x09, 0xdc, 0x26, 0x46, 0x63, 0xc9, 0xb0, 0x6d, 0x24, 0xc5, 0x9a, 0x4b, 0xd7, 0x23, 0x7e, 0xe1, 0xf7, 0x7d, 0x93, 0x18, 0xfd, 0x89, 0x6d, 0x1a, 0x49, 0xd7, 0x6d, 0x1c, 0x3d, 0x85, 0x3b, 0x1d, 0x5c, 0x1b, 0x55, 0xf1, 0x23, 0x7e, 0xe5, 0xf1, 0xb7, 0x3c, 0x7e, 0xed, 0x32, 0x3d, 0xe1, 0x19, 0xcc, 0x3a, 0x02, 0xe3, 0x35, 0xdf, 0x9f, 0x18, 0x2f, 0x3d, 0x03, 0x79, 0xc6, 0x5b, 0x9f, 0xea, 0x29, 0x0f, 0x21, 0xea, 0xef, 0xee, 0xf6, 0xf3, 0xca, 0x03, 0xc3, 0xee, 0xf4, 0xa5, 0xa9, 0x86, 0xfe, 0x8f, 0x2f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x49, 0x87, 0x17, 0x1f, 0x02, 0x03, 0x00, 0x00, }
{ xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.DiscardUnknown(m) }
identifier_body
l2fib_evpn_ipmac_info.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: l2fib_evpn_ipmac_info.proto package cisco_ios_xr_l2vpn_oper_l2vpn_forwarding_nodes_node_l2fib_evpn_ip6macs_l2fib_evpn_ip6mac import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type L2FibEvpnIpmacInfo_KEYS struct { NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` Bdid uint32 `protobuf:"varint,2,opt,name=bdid,proto3" json:"bdid,omitempty"` IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` IsLocal bool `protobuf:"varint,4,opt,name=is_local,json=isLocal,proto3" json:"is_local,omitempty"` MacAddress string `protobuf:"bytes,5,opt,name=mac_address,json=macAddress,proto3" json:"mac_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibEvpnIpmacInfo_KEYS) Reset() { *m = L2FibEvpnIpmacInfo_KEYS{} } func (m *L2FibEvpnIpmacInfo_KEYS) String() string { return proto.CompactTextString(m) } func (*L2FibEvpnIpmacInfo_KEYS) ProtoMessage() {} func (*L2FibEvpnIpmacInfo_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{0} } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Unmarshal(m, b) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Marshal(b, m, deterministic) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Merge(m, src) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Size() int { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Size(m) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_DiscardUnknown() { xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.DiscardUnknown(m) } var xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS proto.InternalMessageInfo func (m *L2FibEvpnIpmacInfo_KEYS) GetNodeId() string { if m != nil { return m.NodeId } return "" } func (m *L2FibEvpnIpmacInfo_KEYS) GetBdid() uint32 { if m != nil { return m.Bdid } return 0 } func (m *L2FibEvpnIpmacInfo_KEYS) GetIpAddress() string { if m != nil { return m.IpAddress } return "" } func (m *L2FibEvpnIpmacInfo_KEYS) GetIsLocal() bool { if m != nil { return m.IsLocal } return false } func (m *L2FibEvpnIpmacInfo_KEYS) GetMacAddress() string { if m != nil { return m.MacAddress } return "" } type L2FibIpAddrT struct { AddrType string `protobuf:"bytes,1,opt,name=addr_type,json=addrType,proto3" json:"addr_type,omitempty"` Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibIpAddrT) Reset() { *m = L2FibIpAddrT{} } func (m *L2FibIpAddrT) String() string { return proto.CompactTextString(m) } func (*L2FibIpAddrT) ProtoMessage() {} func (*L2FibIpAddrT) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{1} } func (m *L2FibIpAddrT) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibIpAddrT.Unmarshal(m, b) } func (m *L2FibIpAddrT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibIpAddrT.Marshal(b, m, deterministic) } func (m *L2FibIpAddrT) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibIpAddrT.Merge(m, src) } func (m *L2FibIpAddrT) XXX_Size() int { return xxx_messageInfo_L2FibIpAddrT.Size(m) } func (m *L2FibIpAddrT) XXX_DiscardUnknown() { xxx_messageInfo_L2FibIpAddrT.DiscardUnknown(m) } var xxx_messageInfo_L2FibIpAddrT proto.InternalMessageInfo func (m *L2FibIpAddrT) GetAddrType() string { if m != nil { return m.AddrType } return "" } func (m *L2FibIpAddrT) GetIp() string { if m != nil { return m.Ip } return "" } type L2FibEvpnIpmacInfo struct { BdidXr uint32 `protobuf:"varint,50,opt,name=bdid_xr,json=bdidXr,proto3" json:"bdid_xr,omitempty"` MacAddressXr string `protobuf:"bytes,51,opt,name=mac_address_xr,json=macAddressXr,proto3" json:"mac_address_xr,omitempty"` IpAddressXr *L2FibIpAddrT `protobuf:"bytes,52,opt,name=ip_address_xr,json=ipAddressXr,proto3" json:"ip_address_xr,omitempty"` ArpNdSyncPending bool `protobuf:"varint,53,opt,name=arp_nd_sync_pending,json=arpNdSyncPending,proto3" json:"arp_nd_sync_pending,omitempty"` ArpNdProbePending bool `protobuf:"varint,54,opt,name=arp_nd_probe_pending,json=arpNdProbePending,proto3" json:"arp_nd_probe_pending,omitempty"` ArpNdDeletePending bool `protobuf:"varint,55,opt,name=arp_nd_delete_pending,json=arpNdDeletePending,proto3" json:"arp_nd_delete_pending,omitempty"` IsLocalXr bool `protobuf:"varint,56,opt,name=is_local_xr,json=isLocalXr,proto3" json:"is_local_xr,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibEvpnIpmacInfo) Reset() { *m = L2FibEvpnIpmacInfo{} } func (m *L2FibEvpnIpmacInfo) String() string { return proto.CompactTextString(m) } func (*L2FibEvpnIpmacInfo) ProtoMessage() {} func (*L2FibEvpnIpmacInfo) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{2} } func (m *L2FibEvpnIpmacInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibEvpnIpmacInfo.Unmarshal(m, b) } func (m *L2FibEvpnIpmacInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibEvpnIpmacInfo.Marshal(b, m, deterministic) } func (m *L2FibEvpnIpmacInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibEvpnIpmacInfo.Merge(m, src) } func (m *L2FibEvpnIpmacInfo) XXX_Size() int { return xxx_messageInfo_L2FibEvpnIpmacInfo.Size(m) } func (m *L2FibEvpnIpmacInfo) XXX_DiscardUnknown() { xxx_messageInfo_L2FibEvpnIpmacInfo.DiscardUnknown(m) } var xxx_messageInfo_L2FibEvpnIpmacInfo proto.InternalMessageInfo func (m *L2FibEvpnIpmacInfo) GetBdidXr() uint32 { if m != nil { return m.BdidXr } return 0 } func (m *L2FibEvpnIpmacInfo) GetMacAddressXr() string { if m != nil { return m.MacAddressXr } return "" } func (m *L2FibEvpnIpmacInfo) GetIpAddressXr() *L2FibIpAddrT { if m != nil { return m.IpAddressXr } return nil } func (m *L2FibEvpnIpmacInfo) GetArpNdSyncPending() bool {
return false } func (m *L2FibEvpnIpmacInfo) GetArpNdProbePending() bool { if m != nil { return m.ArpNdProbePending } return false } func (m *L2FibEvpnIpmacInfo) GetArpNdDeletePending() bool { if m != nil { return m.ArpNdDeletePending } return false } func (m *L2FibEvpnIpmacInfo) GetIsLocalXr() bool { if m != nil { return m.IsLocalXr } return false } func init() { proto.RegisterType((*L2FibEvpnIpmacInfo_KEYS)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_evpn_ipmac_info_KEYS") proto.RegisterType((*L2FibIpAddrT)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_ip_addr_t") proto.RegisterType((*L2FibEvpnIpmacInfo)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_evpn_ipmac_info") } func init() { proto.RegisterFile("l2fib_evpn_ipmac_info.proto", fileDescriptor_359a6e06a2413ca9) } var fileDescriptor_359a6e06a2413ca9 = []byte{ // 413 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0xcb, 0xae, 0xd3, 0x30, 0x10, 0x55, 0x7a, 0x2f, 0x6d, 0x33, 0xa1, 0x05, 0x0c, 0x15, 0x81, 0x0a, 0x88, 0x2a, 0x16, 0xd9, 0x10, 0x44, 0x0a, 0x85, 0x15, 0x12, 0x12, 0x2c, 0x10, 0x08, 0x55, 0x29, 0x8b, 0xb0, 0xb2, 0x1c, 0xdb, 0x45, 0x96, 0x52, 0xdb, 0xb2, 0x2b, 0x68, 0x3e, 0x82, 0x0f, 0x60, 0xcf, 0x87, 0x22, 0x3b, 0x49, 0xcb, 0xa3, 0xdb, 0xbb, 0xb1, 0xe6, 0x71, 0xce, 0xcc, 0x99, 0x19, 0xc3, 0xbc, 0xce, 0xb7, 0xa2, 0xc2, 0xfc, 0x9b, 0x96, 0x58, 0xe8, 0x1d, 0xa1, 0x58, 0xc8, 0xad, 0xca, 0xb4, 0x51, 0x7b, 0x85, 0x4a, 0x2a, 0x2c, 0x55, 0x58, 0x28, 0x8b, 0x0f, 0x06, 0xd7, 0xb9, 0x03, 0x29, 0xcd, 0x4d, 0xd6, 0x9a, 0x5b, 0x65, 0xbe, 0x13, 0xc3, 0x84, 0xfc, 0x9a, 0x49, 0xc5, 0xb8, 0xf5, 0x6f, 0xf6, 0x57, 0xc1, 0xd5, 0x8e, 0x50, 0xfb, 0x7f, 0x68, 0xf1, 0x2b, 0x80, 0xfb, 0x67, 0x3b, 0xe3, 0x0f, 0xef, 0xbe, 0x6c, 0xd0, 0x5d, 0x18, 0xb9, 0x5a, 0x58, 0xb0, 0x38, 0x48, 0x82, 0x34, 0x2c, 0x86, 0xce, 0x7d, 0xcf, 0x10, 0x82, 0xcb, 0x8a, 0x09, 0x16, 0x0f, 0x92, 0x20, 0x9d, 0x14, 0xde, 0x46, 0x0f, 0x00, 0x84, 0xc6, 0x84, 0x31, 0xc3, 0xad, 0x8d, 0x2f, 0x3c, 0x3e, 0x14, 0xfa, 0x4d, 0x1b, 0x40, 0xf7, 0x60, 0x2c, 0x2c, 0xae, 0x15, 0x25, 0x75, 0x7c, 0x99, 0x04, 0xe9, 0xb8, 0x18, 0x09, 0xfb, 0xd1, 0xb9, 0xe8, 0x11, 0x44, 0xae, 0x6f, 0x4f, 0xbd, 0xe6, 0xa9, 0xb0, 0x23, 0xb4, 0xe3, 0x2e, 0x5e, 0xc3, 0x8d, 0x56, 0x65, 0xd7, 0x00, 0xef, 0xd1, 0x1c, 0xc2, 0xd6, 0x6a, 0x34, 0xef, 0xc4, 0x8d, 0x5d, 0xe0, 0x73, 0xa3, 0x39, 0x9a, 0xc2, 0x40, 0x68, 0x2f, 0x2e, 0x2c, 0x06, 0x42, 0x2f, 0x7e, 0x5e, 0xc0, 0xec, 0xec, 0x98, 0x6e, 0x42, 0x27, 0x1e, 0x1f, 0x4c, 0x9c, 0xfb, 0x59, 0x86, 0xce, 0x2d, 0x0d, 0x7a, 0x0c, 0xd3, 0x3f, 0x34, 0xb9, 0xfc, 0xd2, 0x97, 0xbb, 0x7e, 0x92, 0x55, 0x1a, 0xf4, 0x23, 0x80, 0xc9, 0x69, 0x68, 0x87, 0x7a, 0x9e, 0x04, 0x69, 0x94, 0x8b, 0xec, 0xaa, 0x4e, 0x96, 0xfd, 0xb3, 0x88, 0x22, 0x3a, 0xae, 0xb8, 0x34, 0xe8, 0x09, 0xdc, 0x26, 0x46, 0x63, 0xc9, 0xb0, 0x6d, 0x24, 0xc5, 0x9a, 0x4b, 0xd7, 0x23, 0x7e, 0xe1, 0xf7, 0x7d, 0x93, 0x18, 0xfd, 0x89, 0x6d, 0x1a, 0x49, 0xd7, 0x6d, 0x1c, 0x3d, 0x85, 0x3b, 0x1d, 0x5c, 0x1b, 0x55, 0xf1, 0x23, 0x7e, 0xe5, 0xf1, 0xb7, 0x3c, 0x7e, 0xed, 0x32, 0x3d, 0xe1, 0x19, 0xcc, 0x3a, 0x02, 0xe3, 0x35, 0xdf, 0x9f, 0x18, 0x2f, 0x3d, 0x03, 0x79, 0xc6, 0x5b, 0x9f, 0xea, 0x29, 0x0f, 0x21, 0xea, 0xef, 0xee, 0xf6, 0xf3, 0xca, 0x03, 0xc3, 0xee, 0xf4, 0xa5, 0xa9, 0x86, 0xfe, 0x8f, 0x2f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x49, 0x87, 0x17, 0x1f, 0x02, 0x03, 0x00, 0x00, }
if m != nil { return m.ArpNdSyncPending }
random_line_split
l2fib_evpn_ipmac_info.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: l2fib_evpn_ipmac_info.proto package cisco_ios_xr_l2vpn_oper_l2vpn_forwarding_nodes_node_l2fib_evpn_ip6macs_l2fib_evpn_ip6mac import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type L2FibEvpnIpmacInfo_KEYS struct { NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` Bdid uint32 `protobuf:"varint,2,opt,name=bdid,proto3" json:"bdid,omitempty"` IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` IsLocal bool `protobuf:"varint,4,opt,name=is_local,json=isLocal,proto3" json:"is_local,omitempty"` MacAddress string `protobuf:"bytes,5,opt,name=mac_address,json=macAddress,proto3" json:"mac_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibEvpnIpmacInfo_KEYS) Reset() { *m = L2FibEvpnIpmacInfo_KEYS{} } func (m *L2FibEvpnIpmacInfo_KEYS) String() string { return proto.CompactTextString(m) } func (*L2FibEvpnIpmacInfo_KEYS) ProtoMessage() {} func (*L2FibEvpnIpmacInfo_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{0} } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Unmarshal(m, b) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Marshal(b, m, deterministic) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Merge(m, src) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Size() int { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Size(m) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_DiscardUnknown() { xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.DiscardUnknown(m) } var xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS proto.InternalMessageInfo func (m *L2FibEvpnIpmacInfo_KEYS) GetNodeId() string { if m != nil { return m.NodeId } return "" } func (m *L2FibEvpnIpmacInfo_KEYS) GetBdid() uint32 { if m != nil { return m.Bdid } return 0 } func (m *L2FibEvpnIpmacInfo_KEYS) GetIpAddress() string { if m != nil
return "" } func (m *L2FibEvpnIpmacInfo_KEYS) GetIsLocal() bool { if m != nil { return m.IsLocal } return false } func (m *L2FibEvpnIpmacInfo_KEYS) GetMacAddress() string { if m != nil { return m.MacAddress } return "" } type L2FibIpAddrT struct { AddrType string `protobuf:"bytes,1,opt,name=addr_type,json=addrType,proto3" json:"addr_type,omitempty"` Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibIpAddrT) Reset() { *m = L2FibIpAddrT{} } func (m *L2FibIpAddrT) String() string { return proto.CompactTextString(m) } func (*L2FibIpAddrT) ProtoMessage() {} func (*L2FibIpAddrT) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{1} } func (m *L2FibIpAddrT) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibIpAddrT.Unmarshal(m, b) } func (m *L2FibIpAddrT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibIpAddrT.Marshal(b, m, deterministic) } func (m *L2FibIpAddrT) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibIpAddrT.Merge(m, src) } func (m *L2FibIpAddrT) XXX_Size() int { return xxx_messageInfo_L2FibIpAddrT.Size(m) } func (m *L2FibIpAddrT) XXX_DiscardUnknown() { xxx_messageInfo_L2FibIpAddrT.DiscardUnknown(m) } var xxx_messageInfo_L2FibIpAddrT proto.InternalMessageInfo func (m *L2FibIpAddrT) GetAddrType() string { if m != nil { return m.AddrType } return "" } func (m *L2FibIpAddrT) GetIp() string { if m != nil { return m.Ip } return "" } type L2FibEvpnIpmacInfo struct { BdidXr uint32 `protobuf:"varint,50,opt,name=bdid_xr,json=bdidXr,proto3" json:"bdid_xr,omitempty"` MacAddressXr string `protobuf:"bytes,51,opt,name=mac_address_xr,json=macAddressXr,proto3" json:"mac_address_xr,omitempty"` IpAddressXr *L2FibIpAddrT `protobuf:"bytes,52,opt,name=ip_address_xr,json=ipAddressXr,proto3" json:"ip_address_xr,omitempty"` ArpNdSyncPending bool `protobuf:"varint,53,opt,name=arp_nd_sync_pending,json=arpNdSyncPending,proto3" json:"arp_nd_sync_pending,omitempty"` ArpNdProbePending bool `protobuf:"varint,54,opt,name=arp_nd_probe_pending,json=arpNdProbePending,proto3" json:"arp_nd_probe_pending,omitempty"` ArpNdDeletePending bool `protobuf:"varint,55,opt,name=arp_nd_delete_pending,json=arpNdDeletePending,proto3" json:"arp_nd_delete_pending,omitempty"` IsLocalXr bool `protobuf:"varint,56,opt,name=is_local_xr,json=isLocalXr,proto3" json:"is_local_xr,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibEvpnIpmacInfo) Reset() { *m = L2FibEvpnIpmacInfo{} } func (m *L2FibEvpnIpmacInfo) String() string { return proto.CompactTextString(m) } func (*L2FibEvpnIpmacInfo) ProtoMessage() {} func (*L2FibEvpnIpmacInfo) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{2} } func (m *L2FibEvpnIpmacInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibEvpnIpmacInfo.Unmarshal(m, b) } func (m *L2FibEvpnIpmacInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibEvpnIpmacInfo.Marshal(b, m, deterministic) } func (m *L2FibEvpnIpmacInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibEvpnIpmacInfo.Merge(m, src) } func (m *L2FibEvpnIpmacInfo) XXX_Size() int { return xxx_messageInfo_L2FibEvpnIpmacInfo.Size(m) } func (m *L2FibEvpnIpmacInfo) XXX_DiscardUnknown() { xxx_messageInfo_L2FibEvpnIpmacInfo.DiscardUnknown(m) } var xxx_messageInfo_L2FibEvpnIpmacInfo proto.InternalMessageInfo func (m *L2FibEvpnIpmacInfo) GetBdidXr() uint32 { if m != nil { return m.BdidXr } return 0 } func (m *L2FibEvpnIpmacInfo) GetMacAddressXr() string { if m != nil { return m.MacAddressXr } return "" } func (m *L2FibEvpnIpmacInfo) GetIpAddressXr() *L2FibIpAddrT { if m != nil { return m.IpAddressXr } return nil } func (m *L2FibEvpnIpmacInfo) GetArpNdSyncPending() bool { if m != nil { return m.ArpNdSyncPending } return false } func (m *L2FibEvpnIpmacInfo) GetArpNdProbePending() bool { if m != nil { return m.ArpNdProbePending } return false } func (m *L2FibEvpnIpmacInfo) GetArpNdDeletePending() bool { if m != nil { return m.ArpNdDeletePending } return false } func (m *L2FibEvpnIpmacInfo) GetIsLocalXr() bool { if m != nil { return m.IsLocalXr } return false } func init() { proto.RegisterType((*L2FibEvpnIpmacInfo_KEYS)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_evpn_ipmac_info_KEYS") proto.RegisterType((*L2FibIpAddrT)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_ip_addr_t") proto.RegisterType((*L2FibEvpnIpmacInfo)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_evpn_ipmac_info") } func init() { proto.RegisterFile("l2fib_evpn_ipmac_info.proto", fileDescriptor_359a6e06a2413ca9) } var fileDescriptor_359a6e06a2413ca9 = []byte{ // 413 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0xcb, 0xae, 0xd3, 0x30, 0x10, 0x55, 0x7a, 0x2f, 0x6d, 0x33, 0xa1, 0x05, 0x0c, 0x15, 0x81, 0x0a, 0x88, 0x2a, 0x16, 0xd9, 0x10, 0x44, 0x0a, 0x85, 0x15, 0x12, 0x12, 0x2c, 0x10, 0x08, 0x55, 0x29, 0x8b, 0xb0, 0xb2, 0x1c, 0xdb, 0x45, 0x96, 0x52, 0xdb, 0xb2, 0x2b, 0x68, 0x3e, 0x82, 0x0f, 0x60, 0xcf, 0x87, 0x22, 0x3b, 0x49, 0xcb, 0xa3, 0xdb, 0xbb, 0xb1, 0xe6, 0x71, 0xce, 0xcc, 0x99, 0x19, 0xc3, 0xbc, 0xce, 0xb7, 0xa2, 0xc2, 0xfc, 0x9b, 0x96, 0x58, 0xe8, 0x1d, 0xa1, 0x58, 0xc8, 0xad, 0xca, 0xb4, 0x51, 0x7b, 0x85, 0x4a, 0x2a, 0x2c, 0x55, 0x58, 0x28, 0x8b, 0x0f, 0x06, 0xd7, 0xb9, 0x03, 0x29, 0xcd, 0x4d, 0xd6, 0x9a, 0x5b, 0x65, 0xbe, 0x13, 0xc3, 0x84, 0xfc, 0x9a, 0x49, 0xc5, 0xb8, 0xf5, 0x6f, 0xf6, 0x57, 0xc1, 0xd5, 0x8e, 0x50, 0xfb, 0x7f, 0x68, 0xf1, 0x2b, 0x80, 0xfb, 0x67, 0x3b, 0xe3, 0x0f, 0xef, 0xbe, 0x6c, 0xd0, 0x5d, 0x18, 0xb9, 0x5a, 0x58, 0xb0, 0x38, 0x48, 0x82, 0x34, 0x2c, 0x86, 0xce, 0x7d, 0xcf, 0x10, 0x82, 0xcb, 0x8a, 0x09, 0x16, 0x0f, 0x92, 0x20, 0x9d, 0x14, 0xde, 0x46, 0x0f, 0x00, 0x84, 0xc6, 0x84, 0x31, 0xc3, 0xad, 0x8d, 0x2f, 0x3c, 0x3e, 0x14, 0xfa, 0x4d, 0x1b, 0x40, 0xf7, 0x60, 0x2c, 0x2c, 0xae, 0x15, 0x25, 0x75, 0x7c, 0x99, 0x04, 0xe9, 0xb8, 0x18, 0x09, 0xfb, 0xd1, 0xb9, 0xe8, 0x11, 0x44, 0xae, 0x6f, 0x4f, 0xbd, 0xe6, 0xa9, 0xb0, 0x23, 0xb4, 0xe3, 0x2e, 0x5e, 0xc3, 0x8d, 0x56, 0x65, 0xd7, 0x00, 0xef, 0xd1, 0x1c, 0xc2, 0xd6, 0x6a, 0x34, 0xef, 0xc4, 0x8d, 0x5d, 0xe0, 0x73, 0xa3, 0x39, 0x9a, 0xc2, 0x40, 0x68, 0x2f, 0x2e, 0x2c, 0x06, 0x42, 0x2f, 0x7e, 0x5e, 0xc0, 0xec, 0xec, 0x98, 0x6e, 0x42, 0x27, 0x1e, 0x1f, 0x4c, 0x9c, 0xfb, 0x59, 0x86, 0xce, 0x2d, 0x0d, 0x7a, 0x0c, 0xd3, 0x3f, 0x34, 0xb9, 0xfc, 0xd2, 0x97, 0xbb, 0x7e, 0x92, 0x55, 0x1a, 0xf4, 0x23, 0x80, 0xc9, 0x69, 0x68, 0x87, 0x7a, 0x9e, 0x04, 0x69, 0x94, 0x8b, 0xec, 0xaa, 0x4e, 0x96, 0xfd, 0xb3, 0x88, 0x22, 0x3a, 0xae, 0xb8, 0x34, 0xe8, 0x09, 0xdc, 0x26, 0x46, 0x63, 0xc9, 0xb0, 0x6d, 0x24, 0xc5, 0x9a, 0x4b, 0xd7, 0x23, 0x7e, 0xe1, 0xf7, 0x7d, 0x93, 0x18, 0xfd, 0x89, 0x6d, 0x1a, 0x49, 0xd7, 0x6d, 0x1c, 0x3d, 0x85, 0x3b, 0x1d, 0x5c, 0x1b, 0x55, 0xf1, 0x23, 0x7e, 0xe5, 0xf1, 0xb7, 0x3c, 0x7e, 0xed, 0x32, 0x3d, 0xe1, 0x19, 0xcc, 0x3a, 0x02, 0xe3, 0x35, 0xdf, 0x9f, 0x18, 0x2f, 0x3d, 0x03, 0x79, 0xc6, 0x5b, 0x9f, 0xea, 0x29, 0x0f, 0x21, 0xea, 0xef, 0xee, 0xf6, 0xf3, 0xca, 0x03, 0xc3, 0xee, 0xf4, 0xa5, 0xa9, 0x86, 0xfe, 0x8f, 0x2f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x49, 0x87, 0x17, 0x1f, 0x02, 0x03, 0x00, 0x00, }
{ return m.IpAddress }
conditional_block
l2fib_evpn_ipmac_info.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: l2fib_evpn_ipmac_info.proto package cisco_ios_xr_l2vpn_oper_l2vpn_forwarding_nodes_node_l2fib_evpn_ip6macs_l2fib_evpn_ip6mac import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type L2FibEvpnIpmacInfo_KEYS struct { NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` Bdid uint32 `protobuf:"varint,2,opt,name=bdid,proto3" json:"bdid,omitempty"` IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` IsLocal bool `protobuf:"varint,4,opt,name=is_local,json=isLocal,proto3" json:"is_local,omitempty"` MacAddress string `protobuf:"bytes,5,opt,name=mac_address,json=macAddress,proto3" json:"mac_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibEvpnIpmacInfo_KEYS) Reset() { *m = L2FibEvpnIpmacInfo_KEYS{} } func (m *L2FibEvpnIpmacInfo_KEYS) String() string { return proto.CompactTextString(m) } func (*L2FibEvpnIpmacInfo_KEYS) ProtoMessage() {} func (*L2FibEvpnIpmacInfo_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{0} } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Unmarshal(m, b) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Marshal(b, m, deterministic) } func (m *L2FibEvpnIpmacInfo_KEYS)
(src proto.Message) { xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Merge(m, src) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_Size() int { return xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.Size(m) } func (m *L2FibEvpnIpmacInfo_KEYS) XXX_DiscardUnknown() { xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS.DiscardUnknown(m) } var xxx_messageInfo_L2FibEvpnIpmacInfo_KEYS proto.InternalMessageInfo func (m *L2FibEvpnIpmacInfo_KEYS) GetNodeId() string { if m != nil { return m.NodeId } return "" } func (m *L2FibEvpnIpmacInfo_KEYS) GetBdid() uint32 { if m != nil { return m.Bdid } return 0 } func (m *L2FibEvpnIpmacInfo_KEYS) GetIpAddress() string { if m != nil { return m.IpAddress } return "" } func (m *L2FibEvpnIpmacInfo_KEYS) GetIsLocal() bool { if m != nil { return m.IsLocal } return false } func (m *L2FibEvpnIpmacInfo_KEYS) GetMacAddress() string { if m != nil { return m.MacAddress } return "" } type L2FibIpAddrT struct { AddrType string `protobuf:"bytes,1,opt,name=addr_type,json=addrType,proto3" json:"addr_type,omitempty"` Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibIpAddrT) Reset() { *m = L2FibIpAddrT{} } func (m *L2FibIpAddrT) String() string { return proto.CompactTextString(m) } func (*L2FibIpAddrT) ProtoMessage() {} func (*L2FibIpAddrT) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{1} } func (m *L2FibIpAddrT) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibIpAddrT.Unmarshal(m, b) } func (m *L2FibIpAddrT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibIpAddrT.Marshal(b, m, deterministic) } func (m *L2FibIpAddrT) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibIpAddrT.Merge(m, src) } func (m *L2FibIpAddrT) XXX_Size() int { return xxx_messageInfo_L2FibIpAddrT.Size(m) } func (m *L2FibIpAddrT) XXX_DiscardUnknown() { xxx_messageInfo_L2FibIpAddrT.DiscardUnknown(m) } var xxx_messageInfo_L2FibIpAddrT proto.InternalMessageInfo func (m *L2FibIpAddrT) GetAddrType() string { if m != nil { return m.AddrType } return "" } func (m *L2FibIpAddrT) GetIp() string { if m != nil { return m.Ip } return "" } type L2FibEvpnIpmacInfo struct { BdidXr uint32 `protobuf:"varint,50,opt,name=bdid_xr,json=bdidXr,proto3" json:"bdid_xr,omitempty"` MacAddressXr string `protobuf:"bytes,51,opt,name=mac_address_xr,json=macAddressXr,proto3" json:"mac_address_xr,omitempty"` IpAddressXr *L2FibIpAddrT `protobuf:"bytes,52,opt,name=ip_address_xr,json=ipAddressXr,proto3" json:"ip_address_xr,omitempty"` ArpNdSyncPending bool `protobuf:"varint,53,opt,name=arp_nd_sync_pending,json=arpNdSyncPending,proto3" json:"arp_nd_sync_pending,omitempty"` ArpNdProbePending bool `protobuf:"varint,54,opt,name=arp_nd_probe_pending,json=arpNdProbePending,proto3" json:"arp_nd_probe_pending,omitempty"` ArpNdDeletePending bool `protobuf:"varint,55,opt,name=arp_nd_delete_pending,json=arpNdDeletePending,proto3" json:"arp_nd_delete_pending,omitempty"` IsLocalXr bool `protobuf:"varint,56,opt,name=is_local_xr,json=isLocalXr,proto3" json:"is_local_xr,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *L2FibEvpnIpmacInfo) Reset() { *m = L2FibEvpnIpmacInfo{} } func (m *L2FibEvpnIpmacInfo) String() string { return proto.CompactTextString(m) } func (*L2FibEvpnIpmacInfo) ProtoMessage() {} func (*L2FibEvpnIpmacInfo) Descriptor() ([]byte, []int) { return fileDescriptor_359a6e06a2413ca9, []int{2} } func (m *L2FibEvpnIpmacInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_L2FibEvpnIpmacInfo.Unmarshal(m, b) } func (m *L2FibEvpnIpmacInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_L2FibEvpnIpmacInfo.Marshal(b, m, deterministic) } func (m *L2FibEvpnIpmacInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_L2FibEvpnIpmacInfo.Merge(m, src) } func (m *L2FibEvpnIpmacInfo) XXX_Size() int { return xxx_messageInfo_L2FibEvpnIpmacInfo.Size(m) } func (m *L2FibEvpnIpmacInfo) XXX_DiscardUnknown() { xxx_messageInfo_L2FibEvpnIpmacInfo.DiscardUnknown(m) } var xxx_messageInfo_L2FibEvpnIpmacInfo proto.InternalMessageInfo func (m *L2FibEvpnIpmacInfo) GetBdidXr() uint32 { if m != nil { return m.BdidXr } return 0 } func (m *L2FibEvpnIpmacInfo) GetMacAddressXr() string { if m != nil { return m.MacAddressXr } return "" } func (m *L2FibEvpnIpmacInfo) GetIpAddressXr() *L2FibIpAddrT { if m != nil { return m.IpAddressXr } return nil } func (m *L2FibEvpnIpmacInfo) GetArpNdSyncPending() bool { if m != nil { return m.ArpNdSyncPending } return false } func (m *L2FibEvpnIpmacInfo) GetArpNdProbePending() bool { if m != nil { return m.ArpNdProbePending } return false } func (m *L2FibEvpnIpmacInfo) GetArpNdDeletePending() bool { if m != nil { return m.ArpNdDeletePending } return false } func (m *L2FibEvpnIpmacInfo) GetIsLocalXr() bool { if m != nil { return m.IsLocalXr } return false } func init() { proto.RegisterType((*L2FibEvpnIpmacInfo_KEYS)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_evpn_ipmac_info_KEYS") proto.RegisterType((*L2FibIpAddrT)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_ip_addr_t") proto.RegisterType((*L2FibEvpnIpmacInfo)(nil), "cisco_ios_xr_l2vpn_oper.l2vpn_forwarding.nodes.node.l2fib_evpn_ip6macs.l2fib_evpn_ip6mac.l2fib_evpn_ipmac_info") } func init() { proto.RegisterFile("l2fib_evpn_ipmac_info.proto", fileDescriptor_359a6e06a2413ca9) } var fileDescriptor_359a6e06a2413ca9 = []byte{ // 413 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0xcb, 0xae, 0xd3, 0x30, 0x10, 0x55, 0x7a, 0x2f, 0x6d, 0x33, 0xa1, 0x05, 0x0c, 0x15, 0x81, 0x0a, 0x88, 0x2a, 0x16, 0xd9, 0x10, 0x44, 0x0a, 0x85, 0x15, 0x12, 0x12, 0x2c, 0x10, 0x08, 0x55, 0x29, 0x8b, 0xb0, 0xb2, 0x1c, 0xdb, 0x45, 0x96, 0x52, 0xdb, 0xb2, 0x2b, 0x68, 0x3e, 0x82, 0x0f, 0x60, 0xcf, 0x87, 0x22, 0x3b, 0x49, 0xcb, 0xa3, 0xdb, 0xbb, 0xb1, 0xe6, 0x71, 0xce, 0xcc, 0x99, 0x19, 0xc3, 0xbc, 0xce, 0xb7, 0xa2, 0xc2, 0xfc, 0x9b, 0x96, 0x58, 0xe8, 0x1d, 0xa1, 0x58, 0xc8, 0xad, 0xca, 0xb4, 0x51, 0x7b, 0x85, 0x4a, 0x2a, 0x2c, 0x55, 0x58, 0x28, 0x8b, 0x0f, 0x06, 0xd7, 0xb9, 0x03, 0x29, 0xcd, 0x4d, 0xd6, 0x9a, 0x5b, 0x65, 0xbe, 0x13, 0xc3, 0x84, 0xfc, 0x9a, 0x49, 0xc5, 0xb8, 0xf5, 0x6f, 0xf6, 0x57, 0xc1, 0xd5, 0x8e, 0x50, 0xfb, 0x7f, 0x68, 0xf1, 0x2b, 0x80, 0xfb, 0x67, 0x3b, 0xe3, 0x0f, 0xef, 0xbe, 0x6c, 0xd0, 0x5d, 0x18, 0xb9, 0x5a, 0x58, 0xb0, 0x38, 0x48, 0x82, 0x34, 0x2c, 0x86, 0xce, 0x7d, 0xcf, 0x10, 0x82, 0xcb, 0x8a, 0x09, 0x16, 0x0f, 0x92, 0x20, 0x9d, 0x14, 0xde, 0x46, 0x0f, 0x00, 0x84, 0xc6, 0x84, 0x31, 0xc3, 0xad, 0x8d, 0x2f, 0x3c, 0x3e, 0x14, 0xfa, 0x4d, 0x1b, 0x40, 0xf7, 0x60, 0x2c, 0x2c, 0xae, 0x15, 0x25, 0x75, 0x7c, 0x99, 0x04, 0xe9, 0xb8, 0x18, 0x09, 0xfb, 0xd1, 0xb9, 0xe8, 0x11, 0x44, 0xae, 0x6f, 0x4f, 0xbd, 0xe6, 0xa9, 0xb0, 0x23, 0xb4, 0xe3, 0x2e, 0x5e, 0xc3, 0x8d, 0x56, 0x65, 0xd7, 0x00, 0xef, 0xd1, 0x1c, 0xc2, 0xd6, 0x6a, 0x34, 0xef, 0xc4, 0x8d, 0x5d, 0xe0, 0x73, 0xa3, 0x39, 0x9a, 0xc2, 0x40, 0x68, 0x2f, 0x2e, 0x2c, 0x06, 0x42, 0x2f, 0x7e, 0x5e, 0xc0, 0xec, 0xec, 0x98, 0x6e, 0x42, 0x27, 0x1e, 0x1f, 0x4c, 0x9c, 0xfb, 0x59, 0x86, 0xce, 0x2d, 0x0d, 0x7a, 0x0c, 0xd3, 0x3f, 0x34, 0xb9, 0xfc, 0xd2, 0x97, 0xbb, 0x7e, 0x92, 0x55, 0x1a, 0xf4, 0x23, 0x80, 0xc9, 0x69, 0x68, 0x87, 0x7a, 0x9e, 0x04, 0x69, 0x94, 0x8b, 0xec, 0xaa, 0x4e, 0x96, 0xfd, 0xb3, 0x88, 0x22, 0x3a, 0xae, 0xb8, 0x34, 0xe8, 0x09, 0xdc, 0x26, 0x46, 0x63, 0xc9, 0xb0, 0x6d, 0x24, 0xc5, 0x9a, 0x4b, 0xd7, 0x23, 0x7e, 0xe1, 0xf7, 0x7d, 0x93, 0x18, 0xfd, 0x89, 0x6d, 0x1a, 0x49, 0xd7, 0x6d, 0x1c, 0x3d, 0x85, 0x3b, 0x1d, 0x5c, 0x1b, 0x55, 0xf1, 0x23, 0x7e, 0xe5, 0xf1, 0xb7, 0x3c, 0x7e, 0xed, 0x32, 0x3d, 0xe1, 0x19, 0xcc, 0x3a, 0x02, 0xe3, 0x35, 0xdf, 0x9f, 0x18, 0x2f, 0x3d, 0x03, 0x79, 0xc6, 0x5b, 0x9f, 0xea, 0x29, 0x0f, 0x21, 0xea, 0xef, 0xee, 0xf6, 0xf3, 0xca, 0x03, 0xc3, 0xee, 0xf4, 0xa5, 0xa9, 0x86, 0xfe, 0x8f, 0x2f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x49, 0x87, 0x17, 0x1f, 0x02, 0x03, 0x00, 0x00, }
XXX_Merge
identifier_name
constants.rs
use lazy_static::lazy_static; use crate::options::ConfigColours; // Default widget ID pub const DEFAULT_WIDGET_ID: u64 = 56709; // How long to store data. pub const STALE_MAX_MILLISECONDS: u64 = 600 * 1000; // Keep 10 minutes of data. // How much data is SHOWN pub const DEFAULT_TIME_MILLISECONDS: u64 = 60 * 1000; // Defaults to 1 min. pub const STALE_MIN_MILLISECONDS: u64 = 30 * 1000; // Lowest is 30 seconds pub const TIME_CHANGE_MILLISECONDS: u64 = 15 * 1000; // How much to increment each time pub const AUTOHIDE_TIMEOUT_MILLISECONDS: u64 = 5000; // 5 seconds to autohide pub const TICK_RATE_IN_MILLISECONDS: u64 = 200; // How fast the screen refreshes pub const DEFAULT_REFRESH_RATE_IN_MILLISECONDS: u64 = 1000; pub const MAX_KEY_TIMEOUT_IN_MILLISECONDS: u64 = 1000; // Number of colours to generate for the CPU chart/table pub const NUM_COLOURS: usize = 256; // Limits for when we should stop showing table gaps/labels (anything less means not shown) pub const TABLE_GAP_HEIGHT_LIMIT: u16 = 7; pub const TIME_LABEL_HEIGHT_LIMIT: u16 = 7; // Side borders lazy_static! { pub static ref SIDE_BORDERS: tui::widgets::Borders = tui::widgets::Borders::from_bits_truncate(20); pub static ref TOP_LEFT_RIGHT: tui::widgets::Borders = tui::widgets::Borders::from_bits_truncate(22); pub static ref BOTTOM_LEFT_RIGHT: tui::widgets::Borders = tui::widgets::Borders::from_bits_truncate(28); pub static ref DEFAULT_TEXT_STYLE: tui::style::Style = tui::style::Style::default().fg(tui::style::Color::Gray); pub static ref DEFAULT_HEADER_STYLE: tui::style::Style = tui::style::Style::default().fg(tui::style::Color::LightBlue); } // Colour profiles lazy_static! { pub static ref DEFAULT_LIGHT_MODE_COLOUR_PALETTE: ConfigColours = ConfigColours { text_color: Some("black".to_string()), border_color: Some("black".to_string()), table_header_color: Some("black".to_string()), widget_title_color: Some("black".to_string()), selected_text_color: Some("white".to_string()), graph_color: Some("black".to_string()), disabled_text_color: Some("gray".to_string()), ..ConfigColours::default() }; pub static ref GRUVBOX_COLOUR_PALETTE: ConfigColours = ConfigColours { table_header_color: Some("#ebdbb2".to_string()), all_cpu_color: Some("#cc241d".to_string()), avg_cpu_color: Some("#98971a".to_string()), cpu_core_colors: Some(vec![ "#d79921".to_string(), "#458588".to_string(), "#b16286".to_string(), "#689d6a".to_string(), "#fb4934".to_string(), "#b8bb26".to_string(), "#fe8019".to_string(), "#fabd2f".to_string(), "#83a598".to_string(), "#d3869b".to_string(), "#8ec07c".to_string(), "#d65d0e".to_string(), "#fbf1c7".to_string(), "#ebdbb2".to_string(), "#d5c4a1".to_string(), "#bdae93".to_string(), "#a89984".to_string(), ]), ram_color: Some("#458588".to_string()), swap_color: Some("#fabd2f".to_string()), rx_color: Some("#458588".to_string()), tx_color: Some("#fabd2f".to_string()), rx_total_color: Some("#83a598".to_string()), tx_total_color: Some("#d79921".to_string()), border_color: Some("#ebdbb2".to_string()), highlighted_border_color: Some("#fe8019".to_string()), disabled_text_color: Some("#665c54".to_string()), text_color: Some("#ebdbb2".to_string()), selected_text_color: Some("#1d2021".to_string()), selected_bg_color: Some("#ebdbb2".to_string()), widget_title_color: Some("#ebdbb2".to_string()), graph_color: Some("#ebdbb2".to_string()), high_battery_color: Some("#98971a".to_string()), medium_battery_color: Some("#fabd2f".to_string()), low_battery_color: Some("#fb4934".to_string()) }; pub static ref GRUVBOX_LIGHT_COLOUR_PALETTE: ConfigColours = ConfigColours { table_header_color: Some("#3c3836".to_string()), all_cpu_color: Some("#cc241d".to_string()), avg_cpu_color: Some("#98971a".to_string()), cpu_core_colors: Some(vec![ "#d79921".to_string(), "#458588".to_string(), "#b16286".to_string(), "#689d6a".to_string(), "#fb4934".to_string(), "#b8bb26".to_string(), "#fe8019".to_string(), "#fabd2f".to_string(), "#83a598".to_string(), "#d3869b".to_string(), "#8ec07c".to_string(), "#d65d0e".to_string(), "#928374".to_string(), "#665c54".to_string(), "#504945".to_string(), "#3c3836".to_string(), "#282828".to_string(), ]), ram_color: Some("#458588".to_string()), swap_color: Some("#cc241d".to_string()), rx_color: Some("#458588".to_string()), tx_color: Some("#cc241d".to_string()), rx_total_color: Some("#83a598".to_string()), tx_total_color: Some("#9d0006".to_string()), border_color: Some("#3c3836".to_string()), highlighted_border_color: Some("#fe8019".to_string()), disabled_text_color: Some("#665c54".to_string()), text_color: Some("#3c3836".to_string()), selected_text_color: Some("#f9f5d7".to_string()), selected_bg_color: Some("#665c54".to_string()), widget_title_color: Some("#3c3836".to_string()), graph_color: Some("#3c3836".to_string()), high_battery_color: Some("#98971a".to_string()), medium_battery_color: Some("#fabd2f".to_string()), low_battery_color: Some("#fb4934".to_string()) }; // pub static ref NORD_COLOUR_PALETTE: ConfigColours = ConfigColours { // table_header_color: None,
// rx_color: None, // tx_color: None, // rx_total_color: None, // tx_total_color: None, // border_color: None, // highlighted_border_color: None, // text_color: None, // selected_text_color: None, // selected_bg_color: None, // widget_title_color: None, // graph_color: None, // high_battery_color: None, // medium_battery_color: None, // low_battery_color: None, // disabled_text_color: None, // }; } // FIXME: [HELP] I wanna update this before release... it's missing mouse too. // Help text pub const HELP_CONTENTS_TEXT: [&str; 8] = [ "Press the corresponding numbers to jump to the section, or scroll:", "1 - General", "2 - CPU widget", "3 - Process widget", "4 - Process search widget", "5 - Process sort widget", "6 - Battery widget", "7 - Basic memory widget", ]; pub const GENERAL_HELP_TEXT: [&str; 29] = [ "1 - General", "q, Ctrl-c Quit", "Esc Close dialog windows, search, widgets, or exit expanded mode", "Ctrl-r Reset display and any collected data", "f Freeze/unfreeze updating with new data", "Ctrl-Left, ", "Shift-Left, Move widget selection left", "H, A ", "Ctrl-Right, ", "Shift-Right, Move widget selection right", "L, D ", "Ctrl-Up, ", "Shift-Up, Move widget selection up", "K, W ", "Ctrl-Down, ", "Shift-Down, Move widget selection down", "J, S ", "Left, h Move left within widget", "Down, j Move down within widget", "Up, k Move up within widget", "Right, l Move right within widget", "? Open help menu", "gg Jump to the first entry", "G Jump to the last entry", "e Toggle expanding the currently selected widget", "+ Zoom in on chart (decrease time range)", "- Zoom out on chart (increase time range)", "= Reset zoom", "Mouse scroll Scroll through the tables or zoom in/out of charts by scrolling up/down", ]; pub const CPU_HELP_TEXT: [&str; 2] = [ "2 - CPU widget\n", "Mouse scroll Scrolling over an CPU core/average shows only that entry on the chart", ]; // TODO [Help]: Search in help? // TODO [Help]: Move to using tables for easier formatting? pub const PROCESS_HELP_TEXT: [&str; 13] = [ "3 - Process widget", "dd Kill the selected process", "c Sort by CPU usage, press again to reverse sorting order", "m Sort by memory usage, press again to reverse sorting order", "p Sort by PID name, press again to reverse sorting order", "n Sort by process name, press again to reverse sorting order", "Tab Group/un-group processes with the same name", "Ctrl-f, / Open process search widget", "P Toggle between showing the full command or just the process name", "s, F6 Open process sort widget", "I Invert current sort", "% Toggle between values and percentages for memory usage", "t, F5 Toggle tree mode", ]; pub const SEARCH_HELP_TEXT: [&str; 46] = [ "4 - Process search widget", "Tab Toggle between searching for PID and name", "Esc Close the search widget (retains the filter)", "Ctrl-a Skip to the start of the search query", "Ctrl-e Skip to the end of the search query", "Ctrl-u Clear the current search query", "Backspace Delete the character behind the cursor", "Delete Delete the character at the cursor", "Alt-c, F1 Toggle matching case", "Alt-w, F2 Toggle matching the entire word", "Alt-r, F3 Toggle using regex", "Left, Alt-h Move cursor left", "Right, Alt-l Move cursor right", "", "Supported search types:", "<by name/cmd> ex: btm", "pid ex: pid 825", "cpu, cpu% ex: cpu > 4.2", "mem, mem% ex: mem < 4.2", "memb ex: memb < 100 kb", "read, r/s ex: read >= 1 b", "write, w/s ex: write <= 1 tb", "tread, t.read ex: tread = 1", "twrite, t.write ex: twrite = 1", "state ex: state = running", "", "Comparison operators:", "= ex: cpu = 1", "> ex: cpu > 1", "< ex: cpu < 1", ">= ex: cpu >= 1", "<= ex: cpu <= 1", "", "Logical operators:", "and, &&, <Space> ex: btm and cpu > 1 and mem > 1", "or, || ex: btm or firefox", "", "Supported units:", "B ex: read > 1 b", "KB ex: read > 1 kb", "MB ex: read > 1 mb", "TB ex: read > 1 tb", "KiB ex: read > 1 kib", "MiB ex: read > 1 mib", "GiB ex: read > 1 gib", "TiB ex: read > 1 tib", ]; pub const SORT_HELP_TEXT: [&str; 6] = [ "5 - Sort widget\n", "Down, 'j' Scroll down in list", "Up, 'k' Scroll up in list", "Mouse scroll Scroll through sort widget", "Esc Close the sort widget", "Enter Sort by current selected column", ]; pub const BATTERY_HELP_TEXT: [&str; 3] = [ "6 - Battery widget", "Left Go to previous battery", "Right Go to next battery", ]; pub const BASIC_MEM_HELP_TEXT: [&str; 2] = [ "7 - Basic memory widget", "% Toggle between values and percentages for memory usage", ]; lazy_static! { pub static ref HELP_TEXT: Vec<Vec<&'static str>> = vec![ HELP_CONTENTS_TEXT.to_vec(), GENERAL_HELP_TEXT.to_vec(), CPU_HELP_TEXT.to_vec(), PROCESS_HELP_TEXT.to_vec(), SEARCH_HELP_TEXT.to_vec(), SORT_HELP_TEXT.to_vec(), BATTERY_HELP_TEXT.to_vec(), BASIC_MEM_HELP_TEXT.to_vec(), ]; } // Default layouts pub const DEFAULT_LAYOUT: &str = r##" [[row]] ratio=30 [[row.child]] type="cpu" [[row]] ratio=40 [[row.child]] ratio=4 type="mem" [[row.child]] ratio=3 [[row.child.child]] type="temp" [[row.child.child]] type="disk" [[row]] ratio=30 [[row.child]] type="net" [[row.child]] type="proc" default=true "##; pub const DEFAULT_BATTERY_LAYOUT: &str = r##" [[row]] ratio=30 [[row.child]] ratio=2 type="cpu" [[row.child]] ratio=1 type="battery" [[row]] ratio=40 [[row.child]] ratio=4 type="mem" [[row.child]] ratio=3 [[row.child.child]] type="temp" [[row.child.child]] type="disk" [[row]] ratio=30 [[row.child]] type="net" [[row.child]] type="proc" default=true "##; // Config and flags pub const DEFAULT_CONFIG_FILE_PATH: &str = "bottom/bottom.toml"; pub const CONFIG_TOP_HEAD: &str = r##"# This is bottom's config file. Values in this config file will change when changed in the # interface. You can also manually change these values. Be aware that contents of this file will be overwritten if something is # changed in the application; you can disable writing via the --no_write flag or no_write config option. "##; pub const CONFIG_DISPLAY_OPTIONS_HEAD: &str = r##" # These options represent settings that affect how bottom functions. # If a setting here corresponds to command-line flag, then the flag will temporarily override # the setting. "##; pub const CONFIG_COLOUR_HEAD: &str = r##" # These options represent colour values for various parts of bottom. Note that colour support # will ultimately depend on the terminal - for example, the Terminal for macOS does NOT like # custom colours and it may glitch out. "##; pub const CONFIG_LAYOUT_HEAD: &str = r##" # These options represent how bottom will lay out its widgets. Layouts follow a pattern like this: # [[row]] represents a row in the application. # [[row.child]] represents either a widget or a column. # [[row.child.child]] represents a widget. # # All widgets must have the valid type value set to one of ["cpu", "mem", "proc", "net", "temp", "disk", "empty"]. # All layout components have a ratio value - if this is not set, then it defaults to 1. "##; pub const CONFIG_FILTER_HEAD: &str = r##" # These options represent disabled entries for the temperature and disk widgets. "##;
// all_cpu_color: None, // avg_cpu_color: None, // cpu_core_colors: None, // ram_color: None, // swap_color: None,
random_line_split
etcd.go
// // Go Config // Copyright (c) 2014 Brian W. Wolter, All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the names of Brian W. Wolter nor the names of the contributors may // be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. // IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. // package conf import ( "io" "fmt" "log" "time" "sync" "strings" "net/url" "net/http" "io/ioutil" "encoding/json" ) const CONTENT_TYPE_FORM_ENCODED = "application/x-www-form-urlencoded" var httpClient = &http.Client{} /** * An etcd node */ type etcdNode struct { Created uint64 `json:"createdIndex"` Modified uint64 `json:"modifiedIndex"` Key string `json:"key"` Value string `json:"value"` } /** * An etcd response */ type etcdResponse struct { Action string `json:"action"` Node *etcdNode `json:"node"` Previous *etcdNode `json:"prevNode"` } type etcdObserver func(string, interface{}) /** * Cache */ type etcdCacheEntry struct { sync.RWMutex key string response *etcdResponse watching bool observers []etcdObserver finalize chan struct{} } /** * Create a cache entry */ func newEtcdCacheEntry(key string, rsp *etcdResponse) *etcdCacheEntry
/** * Obtain the response */ func (e *etcdCacheEntry) Response() *etcdResponse { e.RLock() defer e.RUnlock() return e.response } /** * Set the response */ func (e *etcdCacheEntry) SetResponse(rsp *etcdResponse) { e.Lock() defer e.Unlock() e.response = rsp } /** * Add an observer for this entry and begin watching if we aren't already */ func (e *etcdCacheEntry) AddObserver(c *EtcdConfig, observer etcdObserver) { e.Lock() defer e.Unlock() e.observers = append(e.observers, observer) e.startWatching(c) } /** * Remove all observers for this entry */ func (e *etcdCacheEntry) RemoveAllObservers() { e.Lock() defer e.Unlock() e.observers = make([]etcdObserver, 0) } /** * Are we watching this entry */ func (e *etcdCacheEntry) IsWatching() bool { e.RLock() defer e.RUnlock() return e.watching } /** * Start watching this entry for updates if we aren't already */ func (e *etcdCacheEntry) Watch(c *EtcdConfig) { e.Lock() defer e.Unlock() e.startWatching(c) } /** * Start watching this entry for updates if we aren't already */ func (e *etcdCacheEntry) startWatching(c *EtcdConfig) { // no locking; this must only be called by another method that handles synchronization if !e.watching { if e.finalize == nil { e.finalize = make(chan struct{}) } e.watching = true go e.watch(c) } } /** * Watch a property */ func (e *etcdCacheEntry) watch(c *EtcdConfig) { errcount := 0 backoff := time.Second maxboff := time.Second * 15 for { var err error e.RLock() key := e.key rsp := e.response e.RUnlock() rsp, err = c.get(key, true, rsp) if err == io.EOF || err == io.ErrUnexpectedEOF { errcount = 0 continue }else if err != nil { errcount++ delay := backoff * time.Duration(errcount * errcount) if delay > maxboff { delay = maxboff } log.Printf("[%s] could not watch (backing off %v) %v", key, delay, err) <- time.After(delay) continue } errcount = 0 e.Lock() e.response = rsp var observers []etcdObserver if c := len(e.observers); c > 0 { observers = make([]etcdObserver, c) copy(observers, e.observers) } e.Unlock() if observers != nil { for _, o := range observers { go o(key, rsp.Node.Value) } } } } /** * Stop watching this entry for updates */ func (e *etcdCacheEntry) Cancel() { e.Lock() defer e.Unlock() if e.watching { e.finalize <- struct{}{} e.watching = false } } /** * Cache */ type etcdCache struct { sync.RWMutex config *EtcdConfig props map[string]*etcdCacheEntry } /** * Create a cache */ func newEtcdCache(config *EtcdConfig) *etcdCache { return &etcdCache{config: config, props: make(map[string]*etcdCacheEntry)} } /** * Obtain a response from the cache */ func (c *etcdCache) Get(key string) (*etcdResponse, bool) { c.RLock() defer c.RUnlock() e, ok := c.props[key] if ok { return e.Response(), true }else{ return nil, false } } /** * Get or create a cache entry. Returns (entry, created or not); (no sync) */ func (c *etcdCache) getOrCreate(key string) (*etcdCacheEntry, bool) { e, ok := c.props[key] if ok { return e, false }else{ e = newEtcdCacheEntry(key, nil) c.props[key] = e return e, true } } /** * Set a response from the cache */ func (c *etcdCache) Set(key string, rsp *etcdResponse) { c.Lock() defer c.Unlock() c.set(key, rsp) } /** * Set a response from the cache (no sync) */ func (c *etcdCache) set(key string, rsp *etcdResponse) *etcdCacheEntry { e, ok := c.props[key] if ok { e.SetResponse(rsp) }else{ e = newEtcdCacheEntry(key, rsp) c.props[key] = e } return e } /** * Set and start watching a key */ func (c *etcdCache) SetAndWatch(key string, rsp *etcdResponse) { c.Lock() defer c.Unlock() e := c.set(key, rsp) e.Watch(c.config) } /** * Add an observer and begin watching if necessary */ func (c *etcdCache) AddObserver(key string, observer etcdObserver) { c.Lock() defer c.Unlock() e, _ := c.getOrCreate(key) e.AddObserver(c.config, observer) } /** * Delete a response from the cache */ func (c *etcdCache) Delete(key string) { c.Lock() defer c.Unlock() delete(c.props, key) } /** * An etcd backed configuration */ type EtcdConfig struct { endpoint *url.URL cache *etcdCache } /** * Create an etcd-backed configuration */ func NewEtcdConfig(endpoint string) (*EtcdConfig, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } etcd := &EtcdConfig{} etcd.endpoint = u etcd.cache = newEtcdCache(etcd) return etcd, nil } /** * Obtain a configuration node */ func (e *EtcdConfig) get(key string, wait bool, prev *etcdResponse) (*etcdResponse, error) { var u string path := keyToEtcdPath(key) if !wait { u = fmt.Sprintf("/v2/keys/%s", path) }else if prev != nil { u = fmt.Sprintf("/v2/keys/%s?wait=true&waitIndex=%d", path, prev.Node.Modified + 1) }else{ u = fmt.Sprintf("/v2/keys/%s?wait=true", path) } rel, err := url.Parse(u) if err != nil { return nil, err } abs := e.endpoint.ResolveReference(rel) log.Printf("[%s] GET %s", key, abs.String()) rsp, err := httpClient.Get(abs.String()) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK: // ok case http.StatusNotFound: return nil, NoSuchKeyError case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Obtain a configuration value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Get(key string) (interface{}, error) { rsp, ok := e.cache.Get(key) if !ok || rsp == nil { var err error rsp, err = e.get(key, false, nil) if err != nil { return nil, err }else if rsp.Node == nil { return nil, NoSuchKeyError } } e.cache.SetAndWatch(key, rsp) return rsp.Node.Value, nil } /** * Watch a configuration value for changes. */ func (e *EtcdConfig) Watch(key string, observer etcdObserver) { e.cache.AddObserver(key, observer) } /** * Set a configuration value */ func (e *EtcdConfig) set(key string, value interface{}) (*etcdResponse, error) { rel, err := url.Parse(fmt.Sprintf("/v2/keys/%s", keyToEtcdPath(key))) if err != nil { return nil, err } vals := url.Values{} switch v := value.(type) { case string: vals.Set("value", v) default: vals.Set("value", fmt.Sprintf("%v", v)) } abs := e.endpoint.ResolveReference(rel) req, err := http.NewRequest("PUT", abs.String(), strings.NewReader(vals.Encode())) if err != nil { return nil, err } req.Header.Add("Content-Type", CONTENT_TYPE_FORM_ENCODED) log.Printf("[%s] PUT %s", key, abs.String()) rsp, err := httpClient.Do(req) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK, http.StatusCreated: // ok case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Set a configuration value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Set(key string, value interface{}) (interface{}, error) { rsp, err := e.set(key, value) if err != nil { return nil, err }else if rsp.Node == nil { return nil, NoSuchKeyError } e.cache.SetAndWatch(key, rsp) return rsp.Node.Value, nil } /** * Delete a configuration node */ func (e *EtcdConfig) delete(key string) (*etcdResponse, error) { rel, err := url.Parse(fmt.Sprintf("/v2/keys/%s", keyToEtcdPath(key))) if err != nil { return nil, err } abs := e.endpoint.ResolveReference(rel) req, err := http.NewRequest("DELETE", abs.String(), nil) if err != nil { return nil, err } log.Printf("[%s] DELETE %s", key, abs.String()) rsp, err := httpClient.Do(req) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK: // ok case http.StatusNotFound: return nil, NoSuchKeyError case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Delete a configuration key/value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Delete(key string) error { rsp, err := e.delete(key) if err != nil { return err } e.cache.Set(key, rsp) return nil } /** * Translate a key to a path. Keys are specified as "a.b.c" and paths are specified as "a/b/c" */ func keyToEtcdPath(key string) string { var path string // do it the easy way for now parts := strings.Split(key, ".") for i, p := range parts { if i > 0 { path += "/" } path += url.QueryEscape(p) } return path }
{ return &etcdCacheEntry{key: key, response:rsp, observers: make([]etcdObserver, 0)} }
identifier_body
etcd.go
// // Go Config // Copyright (c) 2014 Brian W. Wolter, All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the names of Brian W. Wolter nor the names of the contributors may // be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. // IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. // package conf import ( "io" "fmt" "log" "time" "sync" "strings" "net/url" "net/http" "io/ioutil" "encoding/json" ) const CONTENT_TYPE_FORM_ENCODED = "application/x-www-form-urlencoded" var httpClient = &http.Client{} /** * An etcd node */ type etcdNode struct { Created uint64 `json:"createdIndex"` Modified uint64 `json:"modifiedIndex"` Key string `json:"key"` Value string `json:"value"` } /** * An etcd response */ type etcdResponse struct { Action string `json:"action"` Node *etcdNode `json:"node"` Previous *etcdNode `json:"prevNode"` } type etcdObserver func(string, interface{}) /** * Cache */ type etcdCacheEntry struct { sync.RWMutex key string response *etcdResponse watching bool observers []etcdObserver finalize chan struct{} } /** * Create a cache entry */ func newEtcdCacheEntry(key string, rsp *etcdResponse) *etcdCacheEntry { return &etcdCacheEntry{key: key, response:rsp, observers: make([]etcdObserver, 0)} } /** * Obtain the response */ func (e *etcdCacheEntry) Response() *etcdResponse { e.RLock() defer e.RUnlock() return e.response } /** * Set the response */ func (e *etcdCacheEntry) SetResponse(rsp *etcdResponse) { e.Lock() defer e.Unlock() e.response = rsp } /** * Add an observer for this entry and begin watching if we aren't already */ func (e *etcdCacheEntry) AddObserver(c *EtcdConfig, observer etcdObserver) { e.Lock() defer e.Unlock() e.observers = append(e.observers, observer) e.startWatching(c) } /** * Remove all observers for this entry */ func (e *etcdCacheEntry)
() { e.Lock() defer e.Unlock() e.observers = make([]etcdObserver, 0) } /** * Are we watching this entry */ func (e *etcdCacheEntry) IsWatching() bool { e.RLock() defer e.RUnlock() return e.watching } /** * Start watching this entry for updates if we aren't already */ func (e *etcdCacheEntry) Watch(c *EtcdConfig) { e.Lock() defer e.Unlock() e.startWatching(c) } /** * Start watching this entry for updates if we aren't already */ func (e *etcdCacheEntry) startWatching(c *EtcdConfig) { // no locking; this must only be called by another method that handles synchronization if !e.watching { if e.finalize == nil { e.finalize = make(chan struct{}) } e.watching = true go e.watch(c) } } /** * Watch a property */ func (e *etcdCacheEntry) watch(c *EtcdConfig) { errcount := 0 backoff := time.Second maxboff := time.Second * 15 for { var err error e.RLock() key := e.key rsp := e.response e.RUnlock() rsp, err = c.get(key, true, rsp) if err == io.EOF || err == io.ErrUnexpectedEOF { errcount = 0 continue }else if err != nil { errcount++ delay := backoff * time.Duration(errcount * errcount) if delay > maxboff { delay = maxboff } log.Printf("[%s] could not watch (backing off %v) %v", key, delay, err) <- time.After(delay) continue } errcount = 0 e.Lock() e.response = rsp var observers []etcdObserver if c := len(e.observers); c > 0 { observers = make([]etcdObserver, c) copy(observers, e.observers) } e.Unlock() if observers != nil { for _, o := range observers { go o(key, rsp.Node.Value) } } } } /** * Stop watching this entry for updates */ func (e *etcdCacheEntry) Cancel() { e.Lock() defer e.Unlock() if e.watching { e.finalize <- struct{}{} e.watching = false } } /** * Cache */ type etcdCache struct { sync.RWMutex config *EtcdConfig props map[string]*etcdCacheEntry } /** * Create a cache */ func newEtcdCache(config *EtcdConfig) *etcdCache { return &etcdCache{config: config, props: make(map[string]*etcdCacheEntry)} } /** * Obtain a response from the cache */ func (c *etcdCache) Get(key string) (*etcdResponse, bool) { c.RLock() defer c.RUnlock() e, ok := c.props[key] if ok { return e.Response(), true }else{ return nil, false } } /** * Get or create a cache entry. Returns (entry, created or not); (no sync) */ func (c *etcdCache) getOrCreate(key string) (*etcdCacheEntry, bool) { e, ok := c.props[key] if ok { return e, false }else{ e = newEtcdCacheEntry(key, nil) c.props[key] = e return e, true } } /** * Set a response from the cache */ func (c *etcdCache) Set(key string, rsp *etcdResponse) { c.Lock() defer c.Unlock() c.set(key, rsp) } /** * Set a response from the cache (no sync) */ func (c *etcdCache) set(key string, rsp *etcdResponse) *etcdCacheEntry { e, ok := c.props[key] if ok { e.SetResponse(rsp) }else{ e = newEtcdCacheEntry(key, rsp) c.props[key] = e } return e } /** * Set and start watching a key */ func (c *etcdCache) SetAndWatch(key string, rsp *etcdResponse) { c.Lock() defer c.Unlock() e := c.set(key, rsp) e.Watch(c.config) } /** * Add an observer and begin watching if necessary */ func (c *etcdCache) AddObserver(key string, observer etcdObserver) { c.Lock() defer c.Unlock() e, _ := c.getOrCreate(key) e.AddObserver(c.config, observer) } /** * Delete a response from the cache */ func (c *etcdCache) Delete(key string) { c.Lock() defer c.Unlock() delete(c.props, key) } /** * An etcd backed configuration */ type EtcdConfig struct { endpoint *url.URL cache *etcdCache } /** * Create an etcd-backed configuration */ func NewEtcdConfig(endpoint string) (*EtcdConfig, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } etcd := &EtcdConfig{} etcd.endpoint = u etcd.cache = newEtcdCache(etcd) return etcd, nil } /** * Obtain a configuration node */ func (e *EtcdConfig) get(key string, wait bool, prev *etcdResponse) (*etcdResponse, error) { var u string path := keyToEtcdPath(key) if !wait { u = fmt.Sprintf("/v2/keys/%s", path) }else if prev != nil { u = fmt.Sprintf("/v2/keys/%s?wait=true&waitIndex=%d", path, prev.Node.Modified + 1) }else{ u = fmt.Sprintf("/v2/keys/%s?wait=true", path) } rel, err := url.Parse(u) if err != nil { return nil, err } abs := e.endpoint.ResolveReference(rel) log.Printf("[%s] GET %s", key, abs.String()) rsp, err := httpClient.Get(abs.String()) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK: // ok case http.StatusNotFound: return nil, NoSuchKeyError case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Obtain a configuration value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Get(key string) (interface{}, error) { rsp, ok := e.cache.Get(key) if !ok || rsp == nil { var err error rsp, err = e.get(key, false, nil) if err != nil { return nil, err }else if rsp.Node == nil { return nil, NoSuchKeyError } } e.cache.SetAndWatch(key, rsp) return rsp.Node.Value, nil } /** * Watch a configuration value for changes. */ func (e *EtcdConfig) Watch(key string, observer etcdObserver) { e.cache.AddObserver(key, observer) } /** * Set a configuration value */ func (e *EtcdConfig) set(key string, value interface{}) (*etcdResponse, error) { rel, err := url.Parse(fmt.Sprintf("/v2/keys/%s", keyToEtcdPath(key))) if err != nil { return nil, err } vals := url.Values{} switch v := value.(type) { case string: vals.Set("value", v) default: vals.Set("value", fmt.Sprintf("%v", v)) } abs := e.endpoint.ResolveReference(rel) req, err := http.NewRequest("PUT", abs.String(), strings.NewReader(vals.Encode())) if err != nil { return nil, err } req.Header.Add("Content-Type", CONTENT_TYPE_FORM_ENCODED) log.Printf("[%s] PUT %s", key, abs.String()) rsp, err := httpClient.Do(req) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK, http.StatusCreated: // ok case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Set a configuration value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Set(key string, value interface{}) (interface{}, error) { rsp, err := e.set(key, value) if err != nil { return nil, err }else if rsp.Node == nil { return nil, NoSuchKeyError } e.cache.SetAndWatch(key, rsp) return rsp.Node.Value, nil } /** * Delete a configuration node */ func (e *EtcdConfig) delete(key string) (*etcdResponse, error) { rel, err := url.Parse(fmt.Sprintf("/v2/keys/%s", keyToEtcdPath(key))) if err != nil { return nil, err } abs := e.endpoint.ResolveReference(rel) req, err := http.NewRequest("DELETE", abs.String(), nil) if err != nil { return nil, err } log.Printf("[%s] DELETE %s", key, abs.String()) rsp, err := httpClient.Do(req) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK: // ok case http.StatusNotFound: return nil, NoSuchKeyError case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Delete a configuration key/value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Delete(key string) error { rsp, err := e.delete(key) if err != nil { return err } e.cache.Set(key, rsp) return nil } /** * Translate a key to a path. Keys are specified as "a.b.c" and paths are specified as "a/b/c" */ func keyToEtcdPath(key string) string { var path string // do it the easy way for now parts := strings.Split(key, ".") for i, p := range parts { if i > 0 { path += "/" } path += url.QueryEscape(p) } return path }
RemoveAllObservers
identifier_name
etcd.go
// // Go Config // Copyright (c) 2014 Brian W. Wolter, All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the names of Brian W. Wolter nor the names of the contributors may // be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. // IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. // package conf import ( "io" "fmt" "log" "time" "sync" "strings" "net/url" "net/http" "io/ioutil" "encoding/json" ) const CONTENT_TYPE_FORM_ENCODED = "application/x-www-form-urlencoded" var httpClient = &http.Client{} /** * An etcd node */ type etcdNode struct { Created uint64 `json:"createdIndex"` Modified uint64 `json:"modifiedIndex"` Key string `json:"key"` Value string `json:"value"` } /** * An etcd response */ type etcdResponse struct { Action string `json:"action"` Node *etcdNode `json:"node"` Previous *etcdNode `json:"prevNode"` } type etcdObserver func(string, interface{}) /** * Cache */ type etcdCacheEntry struct { sync.RWMutex key string response *etcdResponse watching bool observers []etcdObserver finalize chan struct{} } /** * Create a cache entry */ func newEtcdCacheEntry(key string, rsp *etcdResponse) *etcdCacheEntry { return &etcdCacheEntry{key: key, response:rsp, observers: make([]etcdObserver, 0)} } /** * Obtain the response */ func (e *etcdCacheEntry) Response() *etcdResponse { e.RLock() defer e.RUnlock() return e.response } /** * Set the response */ func (e *etcdCacheEntry) SetResponse(rsp *etcdResponse) { e.Lock() defer e.Unlock() e.response = rsp } /** * Add an observer for this entry and begin watching if we aren't already */ func (e *etcdCacheEntry) AddObserver(c *EtcdConfig, observer etcdObserver) { e.Lock() defer e.Unlock() e.observers = append(e.observers, observer) e.startWatching(c) } /** * Remove all observers for this entry */ func (e *etcdCacheEntry) RemoveAllObservers() { e.Lock() defer e.Unlock() e.observers = make([]etcdObserver, 0) } /** * Are we watching this entry */ func (e *etcdCacheEntry) IsWatching() bool { e.RLock() defer e.RUnlock() return e.watching } /** * Start watching this entry for updates if we aren't already */ func (e *etcdCacheEntry) Watch(c *EtcdConfig) { e.Lock() defer e.Unlock() e.startWatching(c) } /** * Start watching this entry for updates if we aren't already */ func (e *etcdCacheEntry) startWatching(c *EtcdConfig) { // no locking; this must only be called by another method that handles synchronization if !e.watching { if e.finalize == nil { e.finalize = make(chan struct{}) } e.watching = true go e.watch(c) } } /** * Watch a property */ func (e *etcdCacheEntry) watch(c *EtcdConfig) { errcount := 0 backoff := time.Second maxboff := time.Second * 15 for { var err error
e.RLock() key := e.key rsp := e.response e.RUnlock() rsp, err = c.get(key, true, rsp) if err == io.EOF || err == io.ErrUnexpectedEOF { errcount = 0 continue }else if err != nil { errcount++ delay := backoff * time.Duration(errcount * errcount) if delay > maxboff { delay = maxboff } log.Printf("[%s] could not watch (backing off %v) %v", key, delay, err) <- time.After(delay) continue } errcount = 0 e.Lock() e.response = rsp var observers []etcdObserver if c := len(e.observers); c > 0 { observers = make([]etcdObserver, c) copy(observers, e.observers) } e.Unlock() if observers != nil { for _, o := range observers { go o(key, rsp.Node.Value) } } } } /** * Stop watching this entry for updates */ func (e *etcdCacheEntry) Cancel() { e.Lock() defer e.Unlock() if e.watching { e.finalize <- struct{}{} e.watching = false } } /** * Cache */ type etcdCache struct { sync.RWMutex config *EtcdConfig props map[string]*etcdCacheEntry } /** * Create a cache */ func newEtcdCache(config *EtcdConfig) *etcdCache { return &etcdCache{config: config, props: make(map[string]*etcdCacheEntry)} } /** * Obtain a response from the cache */ func (c *etcdCache) Get(key string) (*etcdResponse, bool) { c.RLock() defer c.RUnlock() e, ok := c.props[key] if ok { return e.Response(), true }else{ return nil, false } } /** * Get or create a cache entry. Returns (entry, created or not); (no sync) */ func (c *etcdCache) getOrCreate(key string) (*etcdCacheEntry, bool) { e, ok := c.props[key] if ok { return e, false }else{ e = newEtcdCacheEntry(key, nil) c.props[key] = e return e, true } } /** * Set a response from the cache */ func (c *etcdCache) Set(key string, rsp *etcdResponse) { c.Lock() defer c.Unlock() c.set(key, rsp) } /** * Set a response from the cache (no sync) */ func (c *etcdCache) set(key string, rsp *etcdResponse) *etcdCacheEntry { e, ok := c.props[key] if ok { e.SetResponse(rsp) }else{ e = newEtcdCacheEntry(key, rsp) c.props[key] = e } return e } /** * Set and start watching a key */ func (c *etcdCache) SetAndWatch(key string, rsp *etcdResponse) { c.Lock() defer c.Unlock() e := c.set(key, rsp) e.Watch(c.config) } /** * Add an observer and begin watching if necessary */ func (c *etcdCache) AddObserver(key string, observer etcdObserver) { c.Lock() defer c.Unlock() e, _ := c.getOrCreate(key) e.AddObserver(c.config, observer) } /** * Delete a response from the cache */ func (c *etcdCache) Delete(key string) { c.Lock() defer c.Unlock() delete(c.props, key) } /** * An etcd backed configuration */ type EtcdConfig struct { endpoint *url.URL cache *etcdCache } /** * Create an etcd-backed configuration */ func NewEtcdConfig(endpoint string) (*EtcdConfig, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } etcd := &EtcdConfig{} etcd.endpoint = u etcd.cache = newEtcdCache(etcd) return etcd, nil } /** * Obtain a configuration node */ func (e *EtcdConfig) get(key string, wait bool, prev *etcdResponse) (*etcdResponse, error) { var u string path := keyToEtcdPath(key) if !wait { u = fmt.Sprintf("/v2/keys/%s", path) }else if prev != nil { u = fmt.Sprintf("/v2/keys/%s?wait=true&waitIndex=%d", path, prev.Node.Modified + 1) }else{ u = fmt.Sprintf("/v2/keys/%s?wait=true", path) } rel, err := url.Parse(u) if err != nil { return nil, err } abs := e.endpoint.ResolveReference(rel) log.Printf("[%s] GET %s", key, abs.String()) rsp, err := httpClient.Get(abs.String()) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK: // ok case http.StatusNotFound: return nil, NoSuchKeyError case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Obtain a configuration value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Get(key string) (interface{}, error) { rsp, ok := e.cache.Get(key) if !ok || rsp == nil { var err error rsp, err = e.get(key, false, nil) if err != nil { return nil, err }else if rsp.Node == nil { return nil, NoSuchKeyError } } e.cache.SetAndWatch(key, rsp) return rsp.Node.Value, nil } /** * Watch a configuration value for changes. */ func (e *EtcdConfig) Watch(key string, observer etcdObserver) { e.cache.AddObserver(key, observer) } /** * Set a configuration value */ func (e *EtcdConfig) set(key string, value interface{}) (*etcdResponse, error) { rel, err := url.Parse(fmt.Sprintf("/v2/keys/%s", keyToEtcdPath(key))) if err != nil { return nil, err } vals := url.Values{} switch v := value.(type) { case string: vals.Set("value", v) default: vals.Set("value", fmt.Sprintf("%v", v)) } abs := e.endpoint.ResolveReference(rel) req, err := http.NewRequest("PUT", abs.String(), strings.NewReader(vals.Encode())) if err != nil { return nil, err } req.Header.Add("Content-Type", CONTENT_TYPE_FORM_ENCODED) log.Printf("[%s] PUT %s", key, abs.String()) rsp, err := httpClient.Do(req) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK, http.StatusCreated: // ok case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Set a configuration value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Set(key string, value interface{}) (interface{}, error) { rsp, err := e.set(key, value) if err != nil { return nil, err }else if rsp.Node == nil { return nil, NoSuchKeyError } e.cache.SetAndWatch(key, rsp) return rsp.Node.Value, nil } /** * Delete a configuration node */ func (e *EtcdConfig) delete(key string) (*etcdResponse, error) { rel, err := url.Parse(fmt.Sprintf("/v2/keys/%s", keyToEtcdPath(key))) if err != nil { return nil, err } abs := e.endpoint.ResolveReference(rel) req, err := http.NewRequest("DELETE", abs.String(), nil) if err != nil { return nil, err } log.Printf("[%s] DELETE %s", key, abs.String()) rsp, err := httpClient.Do(req) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK: // ok case http.StatusNotFound: return nil, NoSuchKeyError case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Delete a configuration key/value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Delete(key string) error { rsp, err := e.delete(key) if err != nil { return err } e.cache.Set(key, rsp) return nil } /** * Translate a key to a path. Keys are specified as "a.b.c" and paths are specified as "a/b/c" */ func keyToEtcdPath(key string) string { var path string // do it the easy way for now parts := strings.Split(key, ".") for i, p := range parts { if i > 0 { path += "/" } path += url.QueryEscape(p) } return path }
random_line_split
etcd.go
// // Go Config // Copyright (c) 2014 Brian W. Wolter, All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the names of Brian W. Wolter nor the names of the contributors may // be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. // IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. // package conf import ( "io" "fmt" "log" "time" "sync" "strings" "net/url" "net/http" "io/ioutil" "encoding/json" ) const CONTENT_TYPE_FORM_ENCODED = "application/x-www-form-urlencoded" var httpClient = &http.Client{} /** * An etcd node */ type etcdNode struct { Created uint64 `json:"createdIndex"` Modified uint64 `json:"modifiedIndex"` Key string `json:"key"` Value string `json:"value"` } /** * An etcd response */ type etcdResponse struct { Action string `json:"action"` Node *etcdNode `json:"node"` Previous *etcdNode `json:"prevNode"` } type etcdObserver func(string, interface{}) /** * Cache */ type etcdCacheEntry struct { sync.RWMutex key string response *etcdResponse watching bool observers []etcdObserver finalize chan struct{} } /** * Create a cache entry */ func newEtcdCacheEntry(key string, rsp *etcdResponse) *etcdCacheEntry { return &etcdCacheEntry{key: key, response:rsp, observers: make([]etcdObserver, 0)} } /** * Obtain the response */ func (e *etcdCacheEntry) Response() *etcdResponse { e.RLock() defer e.RUnlock() return e.response } /** * Set the response */ func (e *etcdCacheEntry) SetResponse(rsp *etcdResponse) { e.Lock() defer e.Unlock() e.response = rsp } /** * Add an observer for this entry and begin watching if we aren't already */ func (e *etcdCacheEntry) AddObserver(c *EtcdConfig, observer etcdObserver) { e.Lock() defer e.Unlock() e.observers = append(e.observers, observer) e.startWatching(c) } /** * Remove all observers for this entry */ func (e *etcdCacheEntry) RemoveAllObservers() { e.Lock() defer e.Unlock() e.observers = make([]etcdObserver, 0) } /** * Are we watching this entry */ func (e *etcdCacheEntry) IsWatching() bool { e.RLock() defer e.RUnlock() return e.watching } /** * Start watching this entry for updates if we aren't already */ func (e *etcdCacheEntry) Watch(c *EtcdConfig) { e.Lock() defer e.Unlock() e.startWatching(c) } /** * Start watching this entry for updates if we aren't already */ func (e *etcdCacheEntry) startWatching(c *EtcdConfig) { // no locking; this must only be called by another method that handles synchronization if !e.watching { if e.finalize == nil { e.finalize = make(chan struct{}) } e.watching = true go e.watch(c) } } /** * Watch a property */ func (e *etcdCacheEntry) watch(c *EtcdConfig) { errcount := 0 backoff := time.Second maxboff := time.Second * 15 for { var err error e.RLock() key := e.key rsp := e.response e.RUnlock() rsp, err = c.get(key, true, rsp) if err == io.EOF || err == io.ErrUnexpectedEOF { errcount = 0 continue }else if err != nil { errcount++ delay := backoff * time.Duration(errcount * errcount) if delay > maxboff { delay = maxboff } log.Printf("[%s] could not watch (backing off %v) %v", key, delay, err) <- time.After(delay) continue } errcount = 0 e.Lock() e.response = rsp var observers []etcdObserver if c := len(e.observers); c > 0 { observers = make([]etcdObserver, c) copy(observers, e.observers) } e.Unlock() if observers != nil { for _, o := range observers { go o(key, rsp.Node.Value) } } } } /** * Stop watching this entry for updates */ func (e *etcdCacheEntry) Cancel() { e.Lock() defer e.Unlock() if e.watching { e.finalize <- struct{}{} e.watching = false } } /** * Cache */ type etcdCache struct { sync.RWMutex config *EtcdConfig props map[string]*etcdCacheEntry } /** * Create a cache */ func newEtcdCache(config *EtcdConfig) *etcdCache { return &etcdCache{config: config, props: make(map[string]*etcdCacheEntry)} } /** * Obtain a response from the cache */ func (c *etcdCache) Get(key string) (*etcdResponse, bool) { c.RLock() defer c.RUnlock() e, ok := c.props[key] if ok { return e.Response(), true }else{ return nil, false } } /** * Get or create a cache entry. Returns (entry, created or not); (no sync) */ func (c *etcdCache) getOrCreate(key string) (*etcdCacheEntry, bool) { e, ok := c.props[key] if ok { return e, false }else{ e = newEtcdCacheEntry(key, nil) c.props[key] = e return e, true } } /** * Set a response from the cache */ func (c *etcdCache) Set(key string, rsp *etcdResponse) { c.Lock() defer c.Unlock() c.set(key, rsp) } /** * Set a response from the cache (no sync) */ func (c *etcdCache) set(key string, rsp *etcdResponse) *etcdCacheEntry { e, ok := c.props[key] if ok { e.SetResponse(rsp) }else{ e = newEtcdCacheEntry(key, rsp) c.props[key] = e } return e } /** * Set and start watching a key */ func (c *etcdCache) SetAndWatch(key string, rsp *etcdResponse) { c.Lock() defer c.Unlock() e := c.set(key, rsp) e.Watch(c.config) } /** * Add an observer and begin watching if necessary */ func (c *etcdCache) AddObserver(key string, observer etcdObserver) { c.Lock() defer c.Unlock() e, _ := c.getOrCreate(key) e.AddObserver(c.config, observer) } /** * Delete a response from the cache */ func (c *etcdCache) Delete(key string) { c.Lock() defer c.Unlock() delete(c.props, key) } /** * An etcd backed configuration */ type EtcdConfig struct { endpoint *url.URL cache *etcdCache } /** * Create an etcd-backed configuration */ func NewEtcdConfig(endpoint string) (*EtcdConfig, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } etcd := &EtcdConfig{} etcd.endpoint = u etcd.cache = newEtcdCache(etcd) return etcd, nil } /** * Obtain a configuration node */ func (e *EtcdConfig) get(key string, wait bool, prev *etcdResponse) (*etcdResponse, error) { var u string path := keyToEtcdPath(key) if !wait { u = fmt.Sprintf("/v2/keys/%s", path) }else if prev != nil { u = fmt.Sprintf("/v2/keys/%s?wait=true&waitIndex=%d", path, prev.Node.Modified + 1) }else{ u = fmt.Sprintf("/v2/keys/%s?wait=true", path) } rel, err := url.Parse(u) if err != nil { return nil, err } abs := e.endpoint.ResolveReference(rel) log.Printf("[%s] GET %s", key, abs.String()) rsp, err := httpClient.Get(abs.String()) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK: // ok case http.StatusNotFound: return nil, NoSuchKeyError case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Obtain a configuration value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Get(key string) (interface{}, error) { rsp, ok := e.cache.Get(key) if !ok || rsp == nil { var err error rsp, err = e.get(key, false, nil) if err != nil { return nil, err }else if rsp.Node == nil { return nil, NoSuchKeyError } } e.cache.SetAndWatch(key, rsp) return rsp.Node.Value, nil } /** * Watch a configuration value for changes. */ func (e *EtcdConfig) Watch(key string, observer etcdObserver) { e.cache.AddObserver(key, observer) } /** * Set a configuration value */ func (e *EtcdConfig) set(key string, value interface{}) (*etcdResponse, error) { rel, err := url.Parse(fmt.Sprintf("/v2/keys/%s", keyToEtcdPath(key))) if err != nil { return nil, err } vals := url.Values{} switch v := value.(type) { case string: vals.Set("value", v) default: vals.Set("value", fmt.Sprintf("%v", v)) } abs := e.endpoint.ResolveReference(rel) req, err := http.NewRequest("PUT", abs.String(), strings.NewReader(vals.Encode())) if err != nil { return nil, err } req.Header.Add("Content-Type", CONTENT_TYPE_FORM_ENCODED) log.Printf("[%s] PUT %s", key, abs.String()) rsp, err := httpClient.Do(req) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK, http.StatusCreated: // ok case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Set a configuration value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Set(key string, value interface{}) (interface{}, error) { rsp, err := e.set(key, value) if err != nil { return nil, err }else if rsp.Node == nil { return nil, NoSuchKeyError } e.cache.SetAndWatch(key, rsp) return rsp.Node.Value, nil } /** * Delete a configuration node */ func (e *EtcdConfig) delete(key string) (*etcdResponse, error) { rel, err := url.Parse(fmt.Sprintf("/v2/keys/%s", keyToEtcdPath(key))) if err != nil { return nil, err } abs := e.endpoint.ResolveReference(rel) req, err := http.NewRequest("DELETE", abs.String(), nil) if err != nil { return nil, err } log.Printf("[%s] DELETE %s", key, abs.String()) rsp, err := httpClient.Do(req) if rsp != nil { defer rsp.Body.Close() // always close Body } if err != nil { return nil, err } switch rsp.StatusCode { case http.StatusOK: // ok case http.StatusNotFound: return nil, NoSuchKeyError case http.StatusBadRequest: return nil, ClientError default: return nil, ServiceError } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, err } etc := &etcdResponse{} if err := json.Unmarshal(data, etc); err != nil { return nil, err } return etc, nil } /** * Delete a configuration key/value. This method will block until it either succeeds or fails. */ func (e *EtcdConfig) Delete(key string) error { rsp, err := e.delete(key) if err != nil { return err } e.cache.Set(key, rsp) return nil } /** * Translate a key to a path. Keys are specified as "a.b.c" and paths are specified as "a/b/c" */ func keyToEtcdPath(key string) string { var path string // do it the easy way for now parts := strings.Split(key, ".") for i, p := range parts
return path }
{ if i > 0 { path += "/" } path += url.QueryEscape(p) }
conditional_block
shell.rs
use crate::error_pages::ErrorPageData; use crate::errors::*; use crate::path_prefix::get_path_prefix_client; use crate::serve::PageData; use crate::template::Template; use crate::ClientTranslationsManager; use crate::ErrorPages; use fmterr::fmt_err; use js_sys::Reflect; use std::cell::RefCell; use std::collections::HashMap; use std::rc::Rc; use sycamore::prelude::*; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; use wasm_bindgen_futures::JsFuture; use web_sys::{Element, Request, RequestInit, RequestMode, Response}; /// Fetches the given resource. This should NOT be used by end users, but it's required by the CLI. #[doc(hidden)] pub async fn fetch(url: &str) -> Result<Option<String>, ClientError> { let js_err_handler = |err: JsValue| ClientError::Js(format!("{:?}", err)); let mut opts = RequestInit::new(); opts.method("GET").mode(RequestMode::Cors); let request = Request::new_with_str_and_init(url, &opts).map_err(js_err_handler)?; let window = web_sys::window().unwrap(); // Get the response as a future and await it let res_value = JsFuture::from(window.fetch_with_request(&request)) .await .map_err(js_err_handler)?; // Turn that into a proper response object let res: Response = res_value.dyn_into().unwrap(); // If the status is 404, we should return that the request worked but no file existed if res.status() == 404 { return Ok(None); } // Get the body thereof let body_promise = res.text().map_err(js_err_handler)?; let body = JsFuture::from(body_promise).await.map_err(js_err_handler)?; // Convert that into a string (this will be `None` if it wasn't a string in the JS) let body_str = body.as_string(); let body_str = match body_str { Some(body_str) => body_str, None => { return Err(FetchError::NotString { url: url.to_string(), } .into()) } }; // Handle non-200 error codes if res.status() == 200 { Ok(Some(body_str)) } else { Err(FetchError::NotOk { url: url.to_string(), status: res.status(), err: body_str, } .into()) } } /// Gets the render configuration from the JS global variable `__PERSEUS_RENDER_CFG`, which should be inlined by the server. This will /// return `None` on any error (not found, serialization failed, etc.), which should reasonably lead to a `panic!` in the caller. pub fn get_render_cfg() -> Option<HashMap<String, String>>
/// Gets the initial state injected by the server, if there was any. This is used to differentiate initial loads from subsequent ones, /// which have different log chains to prevent double-trips (a common SPA problem). pub fn get_initial_state() -> InitialState { let val_opt = web_sys::window().unwrap().get("__PERSEUS_INITIAL_STATE"); let js_obj = match val_opt { Some(js_obj) => js_obj, None => return InitialState::NotPresent, }; // The object should only actually contain the string value that was injected let state_str = match js_obj.as_string() { Some(state_str) => state_str, None => return InitialState::NotPresent, }; // On the server-side, we encode a `None` value directly (otherwise it will be some convoluted stringified JSON) if state_str == "None" { InitialState::Present(None) } else if state_str.starts_with("error-") { // We strip the prefix and escape any tab/newline control characters (inserted by `fmterr`) // Any others are user-inserted, and this is documented let err_page_data_str = state_str .strip_prefix("error-") .unwrap() .replace("\n", "\\n") .replace("\t", "\\t"); // There will be error page data encoded after `error-` let err_page_data = match serde_json::from_str::<ErrorPageData>(&err_page_data_str) { Ok(render_cfg) => render_cfg, // If there's a serialization error, we'll create a whole new error (500) Err(err) => ErrorPageData { url: "[current]".to_string(), status: 500, err: format!( "couldn't serialize error from server: '{}'", err.to_string() ), }, }; InitialState::Error(err_page_data) } else { InitialState::Present(Some(state_str)) } } /// Marks a checkpoint in the code and alerts any tests that it's been reached by creating an element that represents it. The preferred /// solution would be emitting a DOM event, but the WebDriver specification currently doesn't support waiting on those (go figure). This /// will only create a custom element if the `__PERSEUS_TESTING` JS global variable is set to `true`. /// /// This adds a `<div id="__perseus_checkpoint-<event-name>" />` to the `<div id="__perseus_checkpoints"></div>` element, creating the /// latter if it doesn't exist. Each checkpoint must have a unique name, and if the same checkpoint is executed twice, it'll be added /// with a `-<number>` after it, starting from `0`. In this way, we have a functional checkpoints queue for signalling to test code! /// Note that the checkpoint queue is NOT cleared on subsequent loads. /// /// Note: this is not just for internal usage, it's highly recommended that you use this for your own checkpoints as well! Just make /// sure your tests don't conflict with any internal Perseus checkpoint names (preferably prefix yours with `custom-` or the like, as /// Perseus' checkpoints may change at any time, but won't ever use that namespace). /// /// WARNING: your checkpoint names must not include hyphens! This will result in a `panic!`. pub fn checkpoint(name: &str) { if name.contains('-') { panic!("checkpoint must not contain hyphens, use underscores instead (hyphens are used as an internal delimiter)"); } let val_opt = web_sys::window().unwrap().get("__PERSEUS_TESTING"); let js_obj = match val_opt { Some(js_obj) => js_obj, None => return, }; // The object should only actually contain the string value that was injected let is_testing = match js_obj.as_bool() { Some(cfg_str) => cfg_str, None => return, }; if !is_testing { return; } // If we're here, we're testing // We dispatch a console warning to reduce the likelihood of literal 'testing in prod' crate::web_log!("Perseus is in testing mode. If you're an end-user and seeing this message, please report this as a bug to the website owners!"); // Create a custom element that can be waited for by the WebDriver // This will be removed by the next checkpoint let document = web_sys::window().unwrap().document().unwrap(); let container_opt = document.query_selector("#__perseus_checkpoints").unwrap(); let container: Element; if let Some(container_i) = container_opt { container = container_i; } else { // If the container doesn't exist yet, create it container = document.create_element("div").unwrap(); container.set_id("__perseus_checkpoints"); document .query_selector("body") .unwrap() .unwrap() .append_with_node_1(&container) .unwrap(); } // Get the number of checkpoints that already exist with the same ID // We prevent having to worry about checkpoints whose names are subsets of others by using the hyphen as a delimiter let num_checkpoints = document .query_selector_all(&format!("[id^=__perseus_checkpoint-{}-]", name)) .unwrap() .length(); // Append the new checkpoint let checkpoint = document.create_element("div").unwrap(); checkpoint.set_id(&format!( "__perseus_checkpoint-{}-{}", name, num_checkpoints )); container.append_with_node_1(&checkpoint).unwrap(); } /// A representation of whether or not the initial state was present. If it was, it could be `None` (some templates take no state), and /// if not, then this isn't an initial load, and we need to request the page from the server. It could also be an error that the server /// has rendered. pub enum InitialState { /// A non-error initial state has been injected. Present(Option<String>), /// An initial state ahs been injected that indicates an error. Error(ErrorPageData), /// No initial state has been injected (or if it has, it's been deliberately unset). NotPresent, } /// Fetches the information for the given page and renders it. This should be provided the actual path of the page to render (not just the /// broader template). Asynchronous Wasm is handled here, because only a few cases need it. // TODO handle exceptions higher up pub async fn app_shell( path: String, (template, was_incremental_match): (Template<DomNode>, bool), locale: String, translations_manager: Rc<RefCell<ClientTranslationsManager>>, error_pages: Rc<ErrorPages<DomNode>>, (initial_container, container_rx_elem): (Element, Element), // The container that the server put initial load content into and the reactive container tht we'll actually use ) { checkpoint("app_shell_entry"); // Check if this was an initial load and we already have the state let initial_state = get_initial_state(); match initial_state { // If we do have an initial state, then we have everything we need for immediate hydration (no double trips) // The state is here, and the HTML has already been injected for us (including head metadata) InitialState::Present(state) => { checkpoint("initial_state_present"); // Unset the initial state variable so we perform subsequent renders correctly // This monstrosity is needed until `web-sys` adds a `.set()` method on `Window` Reflect::set( &JsValue::from(web_sys::window().unwrap()), &JsValue::from("__PERSEUS_INITIAL_STATE"), &JsValue::undefined(), ) .unwrap(); // We need to move the server-rendered content from its current container to the reactive container (otherwise Sycamore can't work with it properly) let initial_html = initial_container.inner_html(); container_rx_elem.set_inner_html(&initial_html); initial_container.set_inner_html(""); // Make the initial container invisible initial_container .set_attribute("style", "display: none;") .unwrap(); checkpoint("page_visible"); // Now that the user can see something, we can get the translator let mut translations_manager_mut = translations_manager.borrow_mut(); // This gets an `Rc<Translator>` that references the translations manager, meaning no cloning of translations let translator = translations_manager_mut .get_translator_for_locale(&locale) .await; let translator = match translator { Ok(translator) => translator, Err(err) => { // Directly eliminate the HTML sent in from the server before we render an error page container_rx_elem.set_inner_html(""); match &err { // These errors happen because we couldn't get a translator, so they certainly don't get one ClientError::FetchError(FetchError::NotOk { url, status, .. }) => return error_pages.render_page(url, status, &fmt_err(&err), None, &container_rx_elem), ClientError::FetchError(FetchError::SerFailed { url, .. }) => return error_pages.render_page(url, &500, &fmt_err(&err), None, &container_rx_elem), ClientError::LocaleNotSupported { .. } => return error_pages.render_page(&format!("/{}/...", locale), &404, &fmt_err(&err), None, &container_rx_elem), // No other errors should be returned _ => panic!("expected 'AssetNotOk'/'AssetSerFailed'/'LocaleNotSupported' error, found other unacceptable error") } } }; // Hydrate that static code using the acquired state // BUG (Sycamore): this will double-render if the component is just text (no nodes) sycamore::hydrate_to( // This function provides translator context as needed || template.render_for_template(state, Rc::clone(&translator), false), &container_rx_elem, ); checkpoint("page_interactive"); } // If we have no initial state, we should proceed as usual, fetching the content and state from the server InitialState::NotPresent => { checkpoint("initial_state_not_present"); // If we're getting data about the index page, explicitly set it to that // This can be handled by the Perseus server (and is), but not by static exporting let path = match path.is_empty() { true => "index".to_string(), false => path, }; // Get the static page data let asset_url = format!( "{}/.perseus/page/{}/{}.json?template_name={}&was_incremental_match={}", get_path_prefix_client(), locale, path.to_string(), template.get_path(), was_incremental_match ); // If this doesn't exist, then it's a 404 (we went here by explicit navigation, but it may be an unservable ISR page or the like) let page_data_str = fetch(&asset_url).await; match page_data_str { Ok(page_data_str) => match page_data_str { Some(page_data_str) => { // All good, deserialize the page data let page_data = serde_json::from_str::<PageData>(&page_data_str); match page_data { Ok(page_data) => { // We have the page data ready, render everything // Interpolate the HTML directly into the document (we'll hydrate it later) container_rx_elem.set_inner_html(&page_data.content); // Interpolate the metadata directly into the document's `<head>` // Get the current head let head_elem = web_sys::window() .unwrap() .document() .unwrap() .query_selector("head") .unwrap() .unwrap(); let head_html = head_elem.inner_html(); // We'll assume that there's already previously interpolated head in addition to the hardcoded stuff, but it will be separated by the server-injected delimiter comment // Thus, we replace the stuff after that delimiter comment with the new head let head_parts: Vec<&str> = head_html .split("<!--PERSEUS_INTERPOLATED_HEAD_BEGINS-->") .collect(); let new_head = format!( "{}\n<!--PERSEUS_INTERPOLATED_HEAD_BEGINS-->\n{}", head_parts[0], &page_data.head ); head_elem.set_inner_html(&new_head); checkpoint("page_visible"); // Now that the user can see something, we can get the translator let mut translations_manager_mut = translations_manager.borrow_mut(); // This gets an `Rc<Translator>` that references the translations manager, meaning no cloning of translations let translator = translations_manager_mut .get_translator_for_locale(&locale) .await; let translator = match translator { Ok(translator) => translator, Err(err) => match &err { // These errors happen because we couldn't get a translator, so they certainly don't get one ClientError::FetchError(FetchError::NotOk { url, status, .. }) => return error_pages.render_page(url, status, &fmt_err(&err), None, &container_rx_elem), ClientError::FetchError(FetchError::SerFailed { url, .. }) => return error_pages.render_page(url, &500, &fmt_err(&err), None, &container_rx_elem), ClientError::LocaleNotSupported { locale } => return error_pages.render_page(&format!("/{}/...", locale), &404, &fmt_err(&err), None, &container_rx_elem), // No other errors should be returned _ => panic!("expected 'AssetNotOk'/'AssetSerFailed'/'LocaleNotSupported' error, found other unacceptable error") } }; // Hydrate that static code using the acquired state // BUG (Sycamore): this will double-render if the component is just text (no nodes) sycamore::hydrate_to( // This function provides translator context as needed || { template.render_for_template( page_data.state, Rc::clone(&translator), false, ) }, &container_rx_elem, ); checkpoint("page_interactive"); } // If the page failed to serialize, an exception has occurred Err(err) => panic!("page data couldn't be serialized: '{}'", err), }; } // No translators ready yet None => error_pages.render_page( &asset_url, &404, "page not found", None, &container_rx_elem, ), }, Err(err) => match &err { // No translators ready yet ClientError::FetchError(FetchError::NotOk { url, status, .. }) => error_pages .render_page(url, status, &fmt_err(&err), None, &container_rx_elem), // No other errors should be returned _ => panic!("expected 'AssetNotOk' error, found other unacceptable error"), }, }; } // Nothing should be done if an error was sent down InitialState::Error(ErrorPageData { url, status, err }) => { checkpoint("initial_state_error"); // We need to move the server-rendered content from its current container to the reactive container (otherwise Sycamore can't work with it properly) let initial_html = initial_container.inner_html(); container_rx_elem.set_inner_html(&initial_html); initial_container.set_inner_html(""); // Make the initial container invisible initial_container .set_attribute("style", "display: none;") .unwrap(); // Hydrate the currently static error page // Right now, we don't provide translators to any error pages that have come from the server error_pages.hydrate_page(&url, &status, &err, None, &container_rx_elem); } }; }
{ let val_opt = web_sys::window().unwrap().get("__PERSEUS_RENDER_CFG"); let js_obj = match val_opt { Some(js_obj) => js_obj, None => return None, }; // The object should only actually contain the string value that was injected let cfg_str = match js_obj.as_string() { Some(cfg_str) => cfg_str, None => return None, }; let render_cfg = match serde_json::from_str::<HashMap<String, String>>(&cfg_str) { Ok(render_cfg) => render_cfg, Err(_) => return None, }; Some(render_cfg) }
identifier_body
shell.rs
use crate::error_pages::ErrorPageData; use crate::errors::*; use crate::path_prefix::get_path_prefix_client; use crate::serve::PageData; use crate::template::Template; use crate::ClientTranslationsManager; use crate::ErrorPages; use fmterr::fmt_err; use js_sys::Reflect; use std::cell::RefCell; use std::collections::HashMap; use std::rc::Rc; use sycamore::prelude::*; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; use wasm_bindgen_futures::JsFuture; use web_sys::{Element, Request, RequestInit, RequestMode, Response}; /// Fetches the given resource. This should NOT be used by end users, but it's required by the CLI. #[doc(hidden)] pub async fn fetch(url: &str) -> Result<Option<String>, ClientError> { let js_err_handler = |err: JsValue| ClientError::Js(format!("{:?}", err)); let mut opts = RequestInit::new(); opts.method("GET").mode(RequestMode::Cors); let request = Request::new_with_str_and_init(url, &opts).map_err(js_err_handler)?; let window = web_sys::window().unwrap(); // Get the response as a future and await it let res_value = JsFuture::from(window.fetch_with_request(&request)) .await .map_err(js_err_handler)?; // Turn that into a proper response object let res: Response = res_value.dyn_into().unwrap(); // If the status is 404, we should return that the request worked but no file existed if res.status() == 404 { return Ok(None); } // Get the body thereof let body_promise = res.text().map_err(js_err_handler)?; let body = JsFuture::from(body_promise).await.map_err(js_err_handler)?; // Convert that into a string (this will be `None` if it wasn't a string in the JS) let body_str = body.as_string(); let body_str = match body_str { Some(body_str) => body_str, None => { return Err(FetchError::NotString { url: url.to_string(), } .into()) } }; // Handle non-200 error codes if res.status() == 200 { Ok(Some(body_str)) } else { Err(FetchError::NotOk { url: url.to_string(), status: res.status(), err: body_str, } .into()) } } /// Gets the render configuration from the JS global variable `__PERSEUS_RENDER_CFG`, which should be inlined by the server. This will /// return `None` on any error (not found, serialization failed, etc.), which should reasonably lead to a `panic!` in the caller. pub fn get_render_cfg() -> Option<HashMap<String, String>> { let val_opt = web_sys::window().unwrap().get("__PERSEUS_RENDER_CFG"); let js_obj = match val_opt { Some(js_obj) => js_obj, None => return None, }; // The object should only actually contain the string value that was injected let cfg_str = match js_obj.as_string() { Some(cfg_str) => cfg_str, None => return None, }; let render_cfg = match serde_json::from_str::<HashMap<String, String>>(&cfg_str) { Ok(render_cfg) => render_cfg, Err(_) => return None, }; Some(render_cfg) } /// Gets the initial state injected by the server, if there was any. This is used to differentiate initial loads from subsequent ones, /// which have different log chains to prevent double-trips (a common SPA problem). pub fn
() -> InitialState { let val_opt = web_sys::window().unwrap().get("__PERSEUS_INITIAL_STATE"); let js_obj = match val_opt { Some(js_obj) => js_obj, None => return InitialState::NotPresent, }; // The object should only actually contain the string value that was injected let state_str = match js_obj.as_string() { Some(state_str) => state_str, None => return InitialState::NotPresent, }; // On the server-side, we encode a `None` value directly (otherwise it will be some convoluted stringified JSON) if state_str == "None" { InitialState::Present(None) } else if state_str.starts_with("error-") { // We strip the prefix and escape any tab/newline control characters (inserted by `fmterr`) // Any others are user-inserted, and this is documented let err_page_data_str = state_str .strip_prefix("error-") .unwrap() .replace("\n", "\\n") .replace("\t", "\\t"); // There will be error page data encoded after `error-` let err_page_data = match serde_json::from_str::<ErrorPageData>(&err_page_data_str) { Ok(render_cfg) => render_cfg, // If there's a serialization error, we'll create a whole new error (500) Err(err) => ErrorPageData { url: "[current]".to_string(), status: 500, err: format!( "couldn't serialize error from server: '{}'", err.to_string() ), }, }; InitialState::Error(err_page_data) } else { InitialState::Present(Some(state_str)) } } /// Marks a checkpoint in the code and alerts any tests that it's been reached by creating an element that represents it. The preferred /// solution would be emitting a DOM event, but the WebDriver specification currently doesn't support waiting on those (go figure). This /// will only create a custom element if the `__PERSEUS_TESTING` JS global variable is set to `true`. /// /// This adds a `<div id="__perseus_checkpoint-<event-name>" />` to the `<div id="__perseus_checkpoints"></div>` element, creating the /// latter if it doesn't exist. Each checkpoint must have a unique name, and if the same checkpoint is executed twice, it'll be added /// with a `-<number>` after it, starting from `0`. In this way, we have a functional checkpoints queue for signalling to test code! /// Note that the checkpoint queue is NOT cleared on subsequent loads. /// /// Note: this is not just for internal usage, it's highly recommended that you use this for your own checkpoints as well! Just make /// sure your tests don't conflict with any internal Perseus checkpoint names (preferably prefix yours with `custom-` or the like, as /// Perseus' checkpoints may change at any time, but won't ever use that namespace). /// /// WARNING: your checkpoint names must not include hyphens! This will result in a `panic!`. pub fn checkpoint(name: &str) { if name.contains('-') { panic!("checkpoint must not contain hyphens, use underscores instead (hyphens are used as an internal delimiter)"); } let val_opt = web_sys::window().unwrap().get("__PERSEUS_TESTING"); let js_obj = match val_opt { Some(js_obj) => js_obj, None => return, }; // The object should only actually contain the string value that was injected let is_testing = match js_obj.as_bool() { Some(cfg_str) => cfg_str, None => return, }; if !is_testing { return; } // If we're here, we're testing // We dispatch a console warning to reduce the likelihood of literal 'testing in prod' crate::web_log!("Perseus is in testing mode. If you're an end-user and seeing this message, please report this as a bug to the website owners!"); // Create a custom element that can be waited for by the WebDriver // This will be removed by the next checkpoint let document = web_sys::window().unwrap().document().unwrap(); let container_opt = document.query_selector("#__perseus_checkpoints").unwrap(); let container: Element; if let Some(container_i) = container_opt { container = container_i; } else { // If the container doesn't exist yet, create it container = document.create_element("div").unwrap(); container.set_id("__perseus_checkpoints"); document .query_selector("body") .unwrap() .unwrap() .append_with_node_1(&container) .unwrap(); } // Get the number of checkpoints that already exist with the same ID // We prevent having to worry about checkpoints whose names are subsets of others by using the hyphen as a delimiter let num_checkpoints = document .query_selector_all(&format!("[id^=__perseus_checkpoint-{}-]", name)) .unwrap() .length(); // Append the new checkpoint let checkpoint = document.create_element("div").unwrap(); checkpoint.set_id(&format!( "__perseus_checkpoint-{}-{}", name, num_checkpoints )); container.append_with_node_1(&checkpoint).unwrap(); } /// A representation of whether or not the initial state was present. If it was, it could be `None` (some templates take no state), and /// if not, then this isn't an initial load, and we need to request the page from the server. It could also be an error that the server /// has rendered. pub enum InitialState { /// A non-error initial state has been injected. Present(Option<String>), /// An initial state ahs been injected that indicates an error. Error(ErrorPageData), /// No initial state has been injected (or if it has, it's been deliberately unset). NotPresent, } /// Fetches the information for the given page and renders it. This should be provided the actual path of the page to render (not just the /// broader template). Asynchronous Wasm is handled here, because only a few cases need it. // TODO handle exceptions higher up pub async fn app_shell( path: String, (template, was_incremental_match): (Template<DomNode>, bool), locale: String, translations_manager: Rc<RefCell<ClientTranslationsManager>>, error_pages: Rc<ErrorPages<DomNode>>, (initial_container, container_rx_elem): (Element, Element), // The container that the server put initial load content into and the reactive container tht we'll actually use ) { checkpoint("app_shell_entry"); // Check if this was an initial load and we already have the state let initial_state = get_initial_state(); match initial_state { // If we do have an initial state, then we have everything we need for immediate hydration (no double trips) // The state is here, and the HTML has already been injected for us (including head metadata) InitialState::Present(state) => { checkpoint("initial_state_present"); // Unset the initial state variable so we perform subsequent renders correctly // This monstrosity is needed until `web-sys` adds a `.set()` method on `Window` Reflect::set( &JsValue::from(web_sys::window().unwrap()), &JsValue::from("__PERSEUS_INITIAL_STATE"), &JsValue::undefined(), ) .unwrap(); // We need to move the server-rendered content from its current container to the reactive container (otherwise Sycamore can't work with it properly) let initial_html = initial_container.inner_html(); container_rx_elem.set_inner_html(&initial_html); initial_container.set_inner_html(""); // Make the initial container invisible initial_container .set_attribute("style", "display: none;") .unwrap(); checkpoint("page_visible"); // Now that the user can see something, we can get the translator let mut translations_manager_mut = translations_manager.borrow_mut(); // This gets an `Rc<Translator>` that references the translations manager, meaning no cloning of translations let translator = translations_manager_mut .get_translator_for_locale(&locale) .await; let translator = match translator { Ok(translator) => translator, Err(err) => { // Directly eliminate the HTML sent in from the server before we render an error page container_rx_elem.set_inner_html(""); match &err { // These errors happen because we couldn't get a translator, so they certainly don't get one ClientError::FetchError(FetchError::NotOk { url, status, .. }) => return error_pages.render_page(url, status, &fmt_err(&err), None, &container_rx_elem), ClientError::FetchError(FetchError::SerFailed { url, .. }) => return error_pages.render_page(url, &500, &fmt_err(&err), None, &container_rx_elem), ClientError::LocaleNotSupported { .. } => return error_pages.render_page(&format!("/{}/...", locale), &404, &fmt_err(&err), None, &container_rx_elem), // No other errors should be returned _ => panic!("expected 'AssetNotOk'/'AssetSerFailed'/'LocaleNotSupported' error, found other unacceptable error") } } }; // Hydrate that static code using the acquired state // BUG (Sycamore): this will double-render if the component is just text (no nodes) sycamore::hydrate_to( // This function provides translator context as needed || template.render_for_template(state, Rc::clone(&translator), false), &container_rx_elem, ); checkpoint("page_interactive"); } // If we have no initial state, we should proceed as usual, fetching the content and state from the server InitialState::NotPresent => { checkpoint("initial_state_not_present"); // If we're getting data about the index page, explicitly set it to that // This can be handled by the Perseus server (and is), but not by static exporting let path = match path.is_empty() { true => "index".to_string(), false => path, }; // Get the static page data let asset_url = format!( "{}/.perseus/page/{}/{}.json?template_name={}&was_incremental_match={}", get_path_prefix_client(), locale, path.to_string(), template.get_path(), was_incremental_match ); // If this doesn't exist, then it's a 404 (we went here by explicit navigation, but it may be an unservable ISR page or the like) let page_data_str = fetch(&asset_url).await; match page_data_str { Ok(page_data_str) => match page_data_str { Some(page_data_str) => { // All good, deserialize the page data let page_data = serde_json::from_str::<PageData>(&page_data_str); match page_data { Ok(page_data) => { // We have the page data ready, render everything // Interpolate the HTML directly into the document (we'll hydrate it later) container_rx_elem.set_inner_html(&page_data.content); // Interpolate the metadata directly into the document's `<head>` // Get the current head let head_elem = web_sys::window() .unwrap() .document() .unwrap() .query_selector("head") .unwrap() .unwrap(); let head_html = head_elem.inner_html(); // We'll assume that there's already previously interpolated head in addition to the hardcoded stuff, but it will be separated by the server-injected delimiter comment // Thus, we replace the stuff after that delimiter comment with the new head let head_parts: Vec<&str> = head_html .split("<!--PERSEUS_INTERPOLATED_HEAD_BEGINS-->") .collect(); let new_head = format!( "{}\n<!--PERSEUS_INTERPOLATED_HEAD_BEGINS-->\n{}", head_parts[0], &page_data.head ); head_elem.set_inner_html(&new_head); checkpoint("page_visible"); // Now that the user can see something, we can get the translator let mut translations_manager_mut = translations_manager.borrow_mut(); // This gets an `Rc<Translator>` that references the translations manager, meaning no cloning of translations let translator = translations_manager_mut .get_translator_for_locale(&locale) .await; let translator = match translator { Ok(translator) => translator, Err(err) => match &err { // These errors happen because we couldn't get a translator, so they certainly don't get one ClientError::FetchError(FetchError::NotOk { url, status, .. }) => return error_pages.render_page(url, status, &fmt_err(&err), None, &container_rx_elem), ClientError::FetchError(FetchError::SerFailed { url, .. }) => return error_pages.render_page(url, &500, &fmt_err(&err), None, &container_rx_elem), ClientError::LocaleNotSupported { locale } => return error_pages.render_page(&format!("/{}/...", locale), &404, &fmt_err(&err), None, &container_rx_elem), // No other errors should be returned _ => panic!("expected 'AssetNotOk'/'AssetSerFailed'/'LocaleNotSupported' error, found other unacceptable error") } }; // Hydrate that static code using the acquired state // BUG (Sycamore): this will double-render if the component is just text (no nodes) sycamore::hydrate_to( // This function provides translator context as needed || { template.render_for_template( page_data.state, Rc::clone(&translator), false, ) }, &container_rx_elem, ); checkpoint("page_interactive"); } // If the page failed to serialize, an exception has occurred Err(err) => panic!("page data couldn't be serialized: '{}'", err), }; } // No translators ready yet None => error_pages.render_page( &asset_url, &404, "page not found", None, &container_rx_elem, ), }, Err(err) => match &err { // No translators ready yet ClientError::FetchError(FetchError::NotOk { url, status, .. }) => error_pages .render_page(url, status, &fmt_err(&err), None, &container_rx_elem), // No other errors should be returned _ => panic!("expected 'AssetNotOk' error, found other unacceptable error"), }, }; } // Nothing should be done if an error was sent down InitialState::Error(ErrorPageData { url, status, err }) => { checkpoint("initial_state_error"); // We need to move the server-rendered content from its current container to the reactive container (otherwise Sycamore can't work with it properly) let initial_html = initial_container.inner_html(); container_rx_elem.set_inner_html(&initial_html); initial_container.set_inner_html(""); // Make the initial container invisible initial_container .set_attribute("style", "display: none;") .unwrap(); // Hydrate the currently static error page // Right now, we don't provide translators to any error pages that have come from the server error_pages.hydrate_page(&url, &status, &err, None, &container_rx_elem); } }; }
get_initial_state
identifier_name
shell.rs
use crate::error_pages::ErrorPageData; use crate::errors::*; use crate::path_prefix::get_path_prefix_client; use crate::serve::PageData; use crate::template::Template; use crate::ClientTranslationsManager; use crate::ErrorPages; use fmterr::fmt_err; use js_sys::Reflect; use std::cell::RefCell; use std::collections::HashMap; use std::rc::Rc; use sycamore::prelude::*; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; use wasm_bindgen_futures::JsFuture; use web_sys::{Element, Request, RequestInit, RequestMode, Response}; /// Fetches the given resource. This should NOT be used by end users, but it's required by the CLI. #[doc(hidden)] pub async fn fetch(url: &str) -> Result<Option<String>, ClientError> { let js_err_handler = |err: JsValue| ClientError::Js(format!("{:?}", err)); let mut opts = RequestInit::new(); opts.method("GET").mode(RequestMode::Cors); let request = Request::new_with_str_and_init(url, &opts).map_err(js_err_handler)?; let window = web_sys::window().unwrap(); // Get the response as a future and await it let res_value = JsFuture::from(window.fetch_with_request(&request)) .await .map_err(js_err_handler)?; // Turn that into a proper response object let res: Response = res_value.dyn_into().unwrap(); // If the status is 404, we should return that the request worked but no file existed if res.status() == 404 { return Ok(None); } // Get the body thereof let body_promise = res.text().map_err(js_err_handler)?; let body = JsFuture::from(body_promise).await.map_err(js_err_handler)?; // Convert that into a string (this will be `None` if it wasn't a string in the JS) let body_str = body.as_string(); let body_str = match body_str { Some(body_str) => body_str, None => { return Err(FetchError::NotString { url: url.to_string(), } .into()) } }; // Handle non-200 error codes if res.status() == 200 { Ok(Some(body_str)) } else { Err(FetchError::NotOk { url: url.to_string(), status: res.status(), err: body_str, } .into()) } } /// Gets the render configuration from the JS global variable `__PERSEUS_RENDER_CFG`, which should be inlined by the server. This will /// return `None` on any error (not found, serialization failed, etc.), which should reasonably lead to a `panic!` in the caller. pub fn get_render_cfg() -> Option<HashMap<String, String>> { let val_opt = web_sys::window().unwrap().get("__PERSEUS_RENDER_CFG"); let js_obj = match val_opt { Some(js_obj) => js_obj, None => return None, }; // The object should only actually contain the string value that was injected let cfg_str = match js_obj.as_string() { Some(cfg_str) => cfg_str, None => return None, }; let render_cfg = match serde_json::from_str::<HashMap<String, String>>(&cfg_str) { Ok(render_cfg) => render_cfg, Err(_) => return None, }; Some(render_cfg) } /// Gets the initial state injected by the server, if there was any. This is used to differentiate initial loads from subsequent ones, /// which have different log chains to prevent double-trips (a common SPA problem). pub fn get_initial_state() -> InitialState { let val_opt = web_sys::window().unwrap().get("__PERSEUS_INITIAL_STATE"); let js_obj = match val_opt { Some(js_obj) => js_obj, None => return InitialState::NotPresent, }; // The object should only actually contain the string value that was injected let state_str = match js_obj.as_string() { Some(state_str) => state_str, None => return InitialState::NotPresent, }; // On the server-side, we encode a `None` value directly (otherwise it will be some convoluted stringified JSON) if state_str == "None" { InitialState::Present(None) } else if state_str.starts_with("error-") { // We strip the prefix and escape any tab/newline control characters (inserted by `fmterr`) // Any others are user-inserted, and this is documented let err_page_data_str = state_str .strip_prefix("error-") .unwrap() .replace("\n", "\\n") .replace("\t", "\\t"); // There will be error page data encoded after `error-` let err_page_data = match serde_json::from_str::<ErrorPageData>(&err_page_data_str) { Ok(render_cfg) => render_cfg, // If there's a serialization error, we'll create a whole new error (500) Err(err) => ErrorPageData { url: "[current]".to_string(), status: 500, err: format!( "couldn't serialize error from server: '{}'", err.to_string() ), }, }; InitialState::Error(err_page_data) } else { InitialState::Present(Some(state_str)) } } /// Marks a checkpoint in the code and alerts any tests that it's been reached by creating an element that represents it. The preferred /// solution would be emitting a DOM event, but the WebDriver specification currently doesn't support waiting on those (go figure). This /// will only create a custom element if the `__PERSEUS_TESTING` JS global variable is set to `true`. /// /// This adds a `<div id="__perseus_checkpoint-<event-name>" />` to the `<div id="__perseus_checkpoints"></div>` element, creating the /// latter if it doesn't exist. Each checkpoint must have a unique name, and if the same checkpoint is executed twice, it'll be added /// with a `-<number>` after it, starting from `0`. In this way, we have a functional checkpoints queue for signalling to test code! /// Note that the checkpoint queue is NOT cleared on subsequent loads. /// /// Note: this is not just for internal usage, it's highly recommended that you use this for your own checkpoints as well! Just make /// sure your tests don't conflict with any internal Perseus checkpoint names (preferably prefix yours with `custom-` or the like, as /// Perseus' checkpoints may change at any time, but won't ever use that namespace). /// /// WARNING: your checkpoint names must not include hyphens! This will result in a `panic!`. pub fn checkpoint(name: &str) { if name.contains('-') { panic!("checkpoint must not contain hyphens, use underscores instead (hyphens are used as an internal delimiter)"); } let val_opt = web_sys::window().unwrap().get("__PERSEUS_TESTING"); let js_obj = match val_opt { Some(js_obj) => js_obj, None => return, }; // The object should only actually contain the string value that was injected let is_testing = match js_obj.as_bool() { Some(cfg_str) => cfg_str, None => return, }; if !is_testing { return; } // If we're here, we're testing // We dispatch a console warning to reduce the likelihood of literal 'testing in prod' crate::web_log!("Perseus is in testing mode. If you're an end-user and seeing this message, please report this as a bug to the website owners!"); // Create a custom element that can be waited for by the WebDriver // This will be removed by the next checkpoint let document = web_sys::window().unwrap().document().unwrap(); let container_opt = document.query_selector("#__perseus_checkpoints").unwrap(); let container: Element; if let Some(container_i) = container_opt { container = container_i; } else { // If the container doesn't exist yet, create it container = document.create_element("div").unwrap(); container.set_id("__perseus_checkpoints"); document .query_selector("body") .unwrap() .unwrap() .append_with_node_1(&container) .unwrap(); } // Get the number of checkpoints that already exist with the same ID // We prevent having to worry about checkpoints whose names are subsets of others by using the hyphen as a delimiter let num_checkpoints = document .query_selector_all(&format!("[id^=__perseus_checkpoint-{}-]", name)) .unwrap() .length(); // Append the new checkpoint let checkpoint = document.create_element("div").unwrap(); checkpoint.set_id(&format!( "__perseus_checkpoint-{}-{}", name, num_checkpoints )); container.append_with_node_1(&checkpoint).unwrap(); }
/// A representation of whether or not the initial state was present. If it was, it could be `None` (some templates take no state), and /// if not, then this isn't an initial load, and we need to request the page from the server. It could also be an error that the server /// has rendered. pub enum InitialState { /// A non-error initial state has been injected. Present(Option<String>), /// An initial state ahs been injected that indicates an error. Error(ErrorPageData), /// No initial state has been injected (or if it has, it's been deliberately unset). NotPresent, } /// Fetches the information for the given page and renders it. This should be provided the actual path of the page to render (not just the /// broader template). Asynchronous Wasm is handled here, because only a few cases need it. // TODO handle exceptions higher up pub async fn app_shell( path: String, (template, was_incremental_match): (Template<DomNode>, bool), locale: String, translations_manager: Rc<RefCell<ClientTranslationsManager>>, error_pages: Rc<ErrorPages<DomNode>>, (initial_container, container_rx_elem): (Element, Element), // The container that the server put initial load content into and the reactive container tht we'll actually use ) { checkpoint("app_shell_entry"); // Check if this was an initial load and we already have the state let initial_state = get_initial_state(); match initial_state { // If we do have an initial state, then we have everything we need for immediate hydration (no double trips) // The state is here, and the HTML has already been injected for us (including head metadata) InitialState::Present(state) => { checkpoint("initial_state_present"); // Unset the initial state variable so we perform subsequent renders correctly // This monstrosity is needed until `web-sys` adds a `.set()` method on `Window` Reflect::set( &JsValue::from(web_sys::window().unwrap()), &JsValue::from("__PERSEUS_INITIAL_STATE"), &JsValue::undefined(), ) .unwrap(); // We need to move the server-rendered content from its current container to the reactive container (otherwise Sycamore can't work with it properly) let initial_html = initial_container.inner_html(); container_rx_elem.set_inner_html(&initial_html); initial_container.set_inner_html(""); // Make the initial container invisible initial_container .set_attribute("style", "display: none;") .unwrap(); checkpoint("page_visible"); // Now that the user can see something, we can get the translator let mut translations_manager_mut = translations_manager.borrow_mut(); // This gets an `Rc<Translator>` that references the translations manager, meaning no cloning of translations let translator = translations_manager_mut .get_translator_for_locale(&locale) .await; let translator = match translator { Ok(translator) => translator, Err(err) => { // Directly eliminate the HTML sent in from the server before we render an error page container_rx_elem.set_inner_html(""); match &err { // These errors happen because we couldn't get a translator, so they certainly don't get one ClientError::FetchError(FetchError::NotOk { url, status, .. }) => return error_pages.render_page(url, status, &fmt_err(&err), None, &container_rx_elem), ClientError::FetchError(FetchError::SerFailed { url, .. }) => return error_pages.render_page(url, &500, &fmt_err(&err), None, &container_rx_elem), ClientError::LocaleNotSupported { .. } => return error_pages.render_page(&format!("/{}/...", locale), &404, &fmt_err(&err), None, &container_rx_elem), // No other errors should be returned _ => panic!("expected 'AssetNotOk'/'AssetSerFailed'/'LocaleNotSupported' error, found other unacceptable error") } } }; // Hydrate that static code using the acquired state // BUG (Sycamore): this will double-render if the component is just text (no nodes) sycamore::hydrate_to( // This function provides translator context as needed || template.render_for_template(state, Rc::clone(&translator), false), &container_rx_elem, ); checkpoint("page_interactive"); } // If we have no initial state, we should proceed as usual, fetching the content and state from the server InitialState::NotPresent => { checkpoint("initial_state_not_present"); // If we're getting data about the index page, explicitly set it to that // This can be handled by the Perseus server (and is), but not by static exporting let path = match path.is_empty() { true => "index".to_string(), false => path, }; // Get the static page data let asset_url = format!( "{}/.perseus/page/{}/{}.json?template_name={}&was_incremental_match={}", get_path_prefix_client(), locale, path.to_string(), template.get_path(), was_incremental_match ); // If this doesn't exist, then it's a 404 (we went here by explicit navigation, but it may be an unservable ISR page or the like) let page_data_str = fetch(&asset_url).await; match page_data_str { Ok(page_data_str) => match page_data_str { Some(page_data_str) => { // All good, deserialize the page data let page_data = serde_json::from_str::<PageData>(&page_data_str); match page_data { Ok(page_data) => { // We have the page data ready, render everything // Interpolate the HTML directly into the document (we'll hydrate it later) container_rx_elem.set_inner_html(&page_data.content); // Interpolate the metadata directly into the document's `<head>` // Get the current head let head_elem = web_sys::window() .unwrap() .document() .unwrap() .query_selector("head") .unwrap() .unwrap(); let head_html = head_elem.inner_html(); // We'll assume that there's already previously interpolated head in addition to the hardcoded stuff, but it will be separated by the server-injected delimiter comment // Thus, we replace the stuff after that delimiter comment with the new head let head_parts: Vec<&str> = head_html .split("<!--PERSEUS_INTERPOLATED_HEAD_BEGINS-->") .collect(); let new_head = format!( "{}\n<!--PERSEUS_INTERPOLATED_HEAD_BEGINS-->\n{}", head_parts[0], &page_data.head ); head_elem.set_inner_html(&new_head); checkpoint("page_visible"); // Now that the user can see something, we can get the translator let mut translations_manager_mut = translations_manager.borrow_mut(); // This gets an `Rc<Translator>` that references the translations manager, meaning no cloning of translations let translator = translations_manager_mut .get_translator_for_locale(&locale) .await; let translator = match translator { Ok(translator) => translator, Err(err) => match &err { // These errors happen because we couldn't get a translator, so they certainly don't get one ClientError::FetchError(FetchError::NotOk { url, status, .. }) => return error_pages.render_page(url, status, &fmt_err(&err), None, &container_rx_elem), ClientError::FetchError(FetchError::SerFailed { url, .. }) => return error_pages.render_page(url, &500, &fmt_err(&err), None, &container_rx_elem), ClientError::LocaleNotSupported { locale } => return error_pages.render_page(&format!("/{}/...", locale), &404, &fmt_err(&err), None, &container_rx_elem), // No other errors should be returned _ => panic!("expected 'AssetNotOk'/'AssetSerFailed'/'LocaleNotSupported' error, found other unacceptable error") } }; // Hydrate that static code using the acquired state // BUG (Sycamore): this will double-render if the component is just text (no nodes) sycamore::hydrate_to( // This function provides translator context as needed || { template.render_for_template( page_data.state, Rc::clone(&translator), false, ) }, &container_rx_elem, ); checkpoint("page_interactive"); } // If the page failed to serialize, an exception has occurred Err(err) => panic!("page data couldn't be serialized: '{}'", err), }; } // No translators ready yet None => error_pages.render_page( &asset_url, &404, "page not found", None, &container_rx_elem, ), }, Err(err) => match &err { // No translators ready yet ClientError::FetchError(FetchError::NotOk { url, status, .. }) => error_pages .render_page(url, status, &fmt_err(&err), None, &container_rx_elem), // No other errors should be returned _ => panic!("expected 'AssetNotOk' error, found other unacceptable error"), }, }; } // Nothing should be done if an error was sent down InitialState::Error(ErrorPageData { url, status, err }) => { checkpoint("initial_state_error"); // We need to move the server-rendered content from its current container to the reactive container (otherwise Sycamore can't work with it properly) let initial_html = initial_container.inner_html(); container_rx_elem.set_inner_html(&initial_html); initial_container.set_inner_html(""); // Make the initial container invisible initial_container .set_attribute("style", "display: none;") .unwrap(); // Hydrate the currently static error page // Right now, we don't provide translators to any error pages that have come from the server error_pages.hydrate_page(&url, &status, &err, None, &container_rx_elem); } }; }
random_line_split
groups-edit.component.ts
import {Component, OnInit, ViewChild} from '@angular/core'; import {Observable} from 'rxjs/Observable'; import {Subscription} from 'rxjs/Subscription'; import {GroupEditEntity, GroupEntity, GroupsHttpService} from '../groups-http.service'; import {GlobalService} from '../../../core/global.service'; import {RegionEntity} from '../../../core/region-http.service'; import {ParkingsHttpService} from '../../parkings/parkings-http.service'; import {ActivatedRoute, Router} from '@angular/router'; import {HttpErrorEntity} from '../../../core/http.service'; import {GroupsDataService, ParkingItem} from '../groups-data.service'; import {GlobalConst} from '../../../share/global-const'; import {CanDeactivateComponent} from '../../../share/interfaces/can-deactivate-component'; import {ParkingsSearchParams} from '../../parkings/parkings.model'; import {CheckboxState} from "../../../share/components/beautify-checkbox/beautify-checkbox.component"; @Component({ selector: 'app-groups-edit', templateUrl: './groups-edit.component.html', styleUrls: ['..//groups.component.css', './groups-edit.component.css'], providers: [ParkingsHttpService] }) export class GroupsEditComponent implements OnInit, CanDeactivateComponent { public groupsInfo: GroupEntity = new GroupEntity(); public updateGroupInfo: GroupEditEntity = new GroupEditEntity(); public groupTypeList: Array<number> = [1, 2, 3, 4, 5, 0]; public regionsList: Array<RegionEntity> = []; public selectedRegion: RegionEntity = new RegionEntity; public isShow = true; public group_id: string; private temSelectedGroupArr: Array<any> = []; private editSuccess = false; // 编辑是否成功 private isInitSearchParkings = true; private groupTypeDirty = false; private parkingDirty = false; private fromPath: 'list' | 'detail' = 'list'; public searchParams: ParkingsSearchParams = new ParkingsSearchParams(); public parkingsList: Array<ParkingItem> = []; public selectedParkingList: Array<ParkingItem> = []; private containerHeight: number; // 容器的高度 private loadingFlag = true; private tempRegionParkingList: Array<any> = []; // 临时区域停车场数据列表 private isLinkUrl = true; // 是否存在link private currentScrollElement: any; // 当前滚动条元素 private loadingTimerSubscription: Subscription; @ViewChild('editGroupsForm') public editGroupsForm: any; /** * 获取是否有组类型被选中了(表单校验) * @returns {boolean} */ public get checkedGroupsType(): boolean { if (this.temSelectedGroupArr.length > 0) { return true; } return false; } constructor(private router: Router, private route: ActivatedRoute, private globalService: GlobalService, private parkingsHttpService: ParkingsHttpService, private groupsHttpService: GroupsHttpService, private groupsDataService: GroupsDataService) { this.route.paramMap.subscribe(map => { this.group_id = map.get('parking_group_id'); }); this.route.queryParamMap.subscribe(map => { this.fromPath = map.get('from') === 'detail' ? 'detail' : 'list'; }); this.searchParams.page_num = 1; this.searchParams.page_size = GlobalConst.PageSize; this.searchParams.status = '1'; } public ngOnInit() { this.requestGroupsByIdData(); } private requestGroupsByIdData() { this.groupsHttpService.requestGroupsByIdData(this.group_id).subscribe(data => { this.selectedParkingList = []; this.groupsInfo = data; this.temSelectedGroupArr = this.groupsInfo.parking_group_types; this.selectedParkingList = this.groupsInfo.parkings.map(parking => new ParkingItem(parking)); // 获取省市区数据 this.getRegionsData(); }); } /* 容器div滚动事件 */ public onScrollContainerScroll(event: any) { const target = event.target; this.currentScrollElement = target; this.containerHeight = $('.scroll-container').outerHeight(); if (target.scrollTop + this.containerHeight >= target.scrollHeight / 2) { // 当滚动高度与容器高度之和超过总高度一半就开始加载新数据 this.loadMoreData(); } } private loadMoreData() { if (this.loadingFlag) { return; } this.loadingFlag = true; // 模拟1.5秒获取到数据 this.loadingTimerSubscription && this.loadingTimerSubscription.unsubscribe(); this.loadingTimerSubscription = Observable.timer(1500).subscribe(() => { if (this.isLinkUrl) { this.searchParams.page_num++; this.requestParkingsData(); } }); } // 选择组类型 public changeGroupsType(event) { const park_type = this.temSelectedGroupArr; const index = park_type.indexOf(event[1]); this.groupTypeDirty = true; if (index < 0) { if (event[0] === CheckboxState.checked) { this.temSelectedGroupArr.push(event[1]); } } else { this.temSelectedGroupArr.splice(index, 1); } } // 获取省市区数据 public getRegionsData() { this.globalService.regions.subscribe(() => { this.getRegionsByRegionId(GlobalConst.RegionID); }); } // 通过省市区code显示所需省市区数据 public getRegionsByRegionId(region_id: string) { this.globalService.getRegionById(region_id).subscribe(data => { if (data && data.length > 0) { this.regionsList = data[0].cities; this.selectedRegion = this.regionsList[0]; this.searchParams.page_num = 1; this.searchParams.region_id = this.selectedRegion.region_id; this.requestParkingsData(); } }); } /** * 选择区域请求当前区域停车场数据 * 发出请求前判断当前区域停车场数据是否请求过,如果请求过,则不再发出区域停车场数据请求 * 临时区域停车场数据列表中没有当前区域停车场数据时发出请求 */ public requestRegionParkings(region: RegionEntity, num: number) { if (num === 1) { this.isShow = !this.isShow; } else { this.isShow = true; } $(this.currentScrollElement).scrollTop(0); this.selectedRegion = region; if (this.tempRegionParkingList.length > 0) { for (const regionParkingIndex of this.tempRegionParkingList) { if (regionParkingIndex.region_id === region.region_id) { this.parkingsList = []; this.parkingsList = regionParkingIndex.parkingsList; return; } } } this.isInitSearchParkings = true; this.searchParams.page_num = 1; this.searchParams.region_id = region.region_id; this.searchParams.parking_name = ''; this.requestParkingsData(); } // 输入停车场名称查询 public searchParking() { this.searchParams.page_num = 1; this.parkingsList = []; this.requestParkingsData(); } // 选择停车场 public selectedParkings(event: any) { if (event[0] === CheckboxState.checked) { for (const selectedParking of this.selectedParkingList) { if (event[1].source.parking_id === selectedParking.source.parking_id) { return; } } event[1].isChecked = true; this.selectedParkingList.push(event[1]); } else if (event[0] === CheckboxState.unchecked) { this.selectedParkingList = this.selectedParkingList.filter(selectedParking => { return event[1].source.parking_id !== selectedParking.source.parking_id; }); event[1].isChecked = false; } this.parkingDirty = true; } // 取消选择 public cancelSelectedParkings(index: number) { const selectedParking = this.selectedParkingList[index]; for (const parking of this.selectedParkingList) { if (parking.source.parking_id === selectedParking.source.parking_id) { parking.isChecked = false; this.selectedParkingList.splice(index, 1); break; } } for (const parkingItem of this.parkingsList) { if (parkingItem.source.parking_id === selectedParking.source.parking_id) { parkingItem.isChecked = false; break; } } } // 提交添加组管理数据 public onAddGroupsFormSubmit() { const parking_ids = []; this.selectedParkingList.forEach(parking => { parking_ids.push(parking.source.parking_id); }); this.updateGroupInfo.parking_group_name = this.groupsInfo.parking_group_name; this.updateGroupInfo.parking_group_types = this.temSelectedGroupArr.join(); this.updateGroupInfo.parking_ids = parking_ids.join(); this.groupsHttpService.requestUpdateGroupsData(this.updateGroupInfo, this.group_id).subscribe(() => { this.globalService.promptBox.open('编辑成功!', () => { // 组数据有改动时更新global.service中组数据 this.globalService.resetGroups(); this.editSuccess = true; this.groupsDataService.clear(); this.navigated(); }); }, err => { if (!this.globalService.httpErrorProcess(err)) { if (err.status === 422) { const error: HttpErrorEntity = HttpErrorEntity.Create(err.json()); for (const content of error.errors) { if (content.field === 'parking_group_name' && content.code === 'missing_field') { this.globalService.promptBox.open('分组名称参数缺失!'); return; } else if (content.field === 'parking_group_name' && content.code === 'invalid') { this.globalService.promptBox.open('分组名称无效或不合法!'); return; } else if (content.field === 'parking_group_name' && content.code === 'failed') { this.globalService.promptBox.open('添加失败!'); return; } else if (content.resource === 'parking_group' && content.code === 'already_exist') { this.globalService.promptBox.open('分组名称不能重复,请重新输入!'); return; } } } } }); } // 请求停车场数据 public requestParkingsData() { this.loadingFlag = true; this.loadingTimerSubscription && this.loadingTimerSubscription.unsubscribe(); this.parkingsHttpService.requestParkingsData(this.searchParams, false).subscribe(data => { this.loadingFlag = false; this.isLinkUrl = data.linkUrl ? true : false; if (this.isInitSearchParkings) { this.isInitSearchParkings = false; this.parkingsList = []; this.processParking(data.results); // 初始化请求停车场数据时缓存一份临时停车场数据 this.tempRegionParkingList.push({'region_id': this.searchParams.region_id, 'parkingsList': this.parkingsList}); } else { this.processParking(data.results); } }, err => { this.globalService.httpErrorProcess(err); }); } // 处理停车场数据 private processParking(results: Array<any>) { if (this.selectedParkingList.length > 0) { results.forEach(parking => { const parkingItem = new ParkingItem(parking); this.selectedParkingList.forEach(selectedParking => { if (selectedParking.source.parking_id === parking.parking_id) { parkingItem.isChecked = true; } }); this.parkingsList.push(parkingItem); }); } else { results.forEach(parking => { this.parkingsList.push(new ParkingItem(parking)); }); } } public onCancelBtnClick() { this.navigated(); } private navigated() { if (this.fromPath === 'detail') { this.router.navigate(['../../detail', this.group_id], {relativeTo: this.route}); } else { this.router.navigate(['../'], {relativeTo: this.route}); } } public canDeactivate(): boolean { return this.editSuccess || !this.editGroupsForm || (!this.editGroupsForm.dirty && !this.groupTypeDirty && !this.parkingDirty); } }
identifier_name
groups-edit.component.ts
import {Component, OnInit, ViewChild} from '@angular/core'; import {Observable} from 'rxjs/Observable'; import {Subscription} from 'rxjs/Subscription'; import {GroupEditEntity, GroupEntity, GroupsHttpService} from '../groups-http.service'; import {GlobalService} from '../../../core/global.service'; import {RegionEntity} from '../../../core/region-http.service'; import {ParkingsHttpService} from '../../parkings/parkings-http.service'; import {ActivatedRoute, Router} from '@angular/router'; import {HttpErrorEntity} from '../../../core/http.service'; import {GroupsDataService, ParkingItem} from '../groups-data.service'; import {GlobalConst} from '../../../share/global-const'; import {CanDeactivateComponent} from '../../../share/interfaces/can-deactivate-component'; import {ParkingsSearchParams} from '../../parkings/parkings.model'; import {CheckboxState} from "../../../share/components/beautify-checkbox/beautify-checkbox.component"; @Component({ selector: 'app-groups-edit', templateUrl: './groups-edit.component.html', styleUrls: ['..//groups.component.css', './groups-edit.component.css'],
}) export class GroupsEditComponent implements OnInit, CanDeactivateComponent { public groupsInfo: GroupEntity = new GroupEntity(); public updateGroupInfo: GroupEditEntity = new GroupEditEntity(); public groupTypeList: Array<number> = [1, 2, 3, 4, 5, 0]; public regionsList: Array<RegionEntity> = []; public selectedRegion: RegionEntity = new RegionEntity; public isShow = true; public group_id: string; private temSelectedGroupArr: Array<any> = []; private editSuccess = false; // 编辑是否成功 private isInitSearchParkings = true; private groupTypeDirty = false; private parkingDirty = false; private fromPath: 'list' | 'detail' = 'list'; public searchParams: ParkingsSearchParams = new ParkingsSearchParams(); public parkingsList: Array<ParkingItem> = []; public selectedParkingList: Array<ParkingItem> = []; private containerHeight: number; // 容器的高度 private loadingFlag = true; private tempRegionParkingList: Array<any> = []; // 临时区域停车场数据列表 private isLinkUrl = true; // 是否存在link private currentScrollElement: any; // 当前滚动条元素 private loadingTimerSubscription: Subscription; @ViewChild('editGroupsForm') public editGroupsForm: any; /** * 获取是否有组类型被选中了(表单校验) * @returns {boolean} */ public get checkedGroupsType(): boolean { if (this.temSelectedGroupArr.length > 0) { return true; } return false; } constructor(private router: Router, private route: ActivatedRoute, private globalService: GlobalService, private parkingsHttpService: ParkingsHttpService, private groupsHttpService: GroupsHttpService, private groupsDataService: GroupsDataService) { this.route.paramMap.subscribe(map => { this.group_id = map.get('parking_group_id'); }); this.route.queryParamMap.subscribe(map => { this.fromPath = map.get('from') === 'detail' ? 'detail' : 'list'; }); this.searchParams.page_num = 1; this.searchParams.page_size = GlobalConst.PageSize; this.searchParams.status = '1'; } public ngOnInit() { this.requestGroupsByIdData(); } private requestGroupsByIdData() { this.groupsHttpService.requestGroupsByIdData(this.group_id).subscribe(data => { this.selectedParkingList = []; this.groupsInfo = data; this.temSelectedGroupArr = this.groupsInfo.parking_group_types; this.selectedParkingList = this.groupsInfo.parkings.map(parking => new ParkingItem(parking)); // 获取省市区数据 this.getRegionsData(); }); } /* 容器div滚动事件 */ public onScrollContainerScroll(event: any) { const target = event.target; this.currentScrollElement = target; this.containerHeight = $('.scroll-container').outerHeight(); if (target.scrollTop + this.containerHeight >= target.scrollHeight / 2) { // 当滚动高度与容器高度之和超过总高度一半就开始加载新数据 this.loadMoreData(); } } private loadMoreData() { if (this.loadingFlag) { return; } this.loadingFlag = true; // 模拟1.5秒获取到数据 this.loadingTimerSubscription && this.loadingTimerSubscription.unsubscribe(); this.loadingTimerSubscription = Observable.timer(1500).subscribe(() => { if (this.isLinkUrl) { this.searchParams.page_num++; this.requestParkingsData(); } }); } // 选择组类型 public changeGroupsType(event) { const park_type = this.temSelectedGroupArr; const index = park_type.indexOf(event[1]); this.groupTypeDirty = true; if (index < 0) { if (event[0] === CheckboxState.checked) { this.temSelectedGroupArr.push(event[1]); } } else { this.temSelectedGroupArr.splice(index, 1); } } // 获取省市区数据 public getRegionsData() { this.globalService.regions.subscribe(() => { this.getRegionsByRegionId(GlobalConst.RegionID); }); } // 通过省市区code显示所需省市区数据 public getRegionsByRegionId(region_id: string) { this.globalService.getRegionById(region_id).subscribe(data => { if (data && data.length > 0) { this.regionsList = data[0].cities; this.selectedRegion = this.regionsList[0]; this.searchParams.page_num = 1; this.searchParams.region_id = this.selectedRegion.region_id; this.requestParkingsData(); } }); } /** * 选择区域请求当前区域停车场数据 * 发出请求前判断当前区域停车场数据是否请求过,如果请求过,则不再发出区域停车场数据请求 * 临时区域停车场数据列表中没有当前区域停车场数据时发出请求 */ public requestRegionParkings(region: RegionEntity, num: number) { if (num === 1) { this.isShow = !this.isShow; } else { this.isShow = true; } $(this.currentScrollElement).scrollTop(0); this.selectedRegion = region; if (this.tempRegionParkingList.length > 0) { for (const regionParkingIndex of this.tempRegionParkingList) { if (regionParkingIndex.region_id === region.region_id) { this.parkingsList = []; this.parkingsList = regionParkingIndex.parkingsList; return; } } } this.isInitSearchParkings = true; this.searchParams.page_num = 1; this.searchParams.region_id = region.region_id; this.searchParams.parking_name = ''; this.requestParkingsData(); } // 输入停车场名称查询 public searchParking() { this.searchParams.page_num = 1; this.parkingsList = []; this.requestParkingsData(); } // 选择停车场 public selectedParkings(event: any) { if (event[0] === CheckboxState.checked) { for (const selectedParking of this.selectedParkingList) { if (event[1].source.parking_id === selectedParking.source.parking_id) { return; } } event[1].isChecked = true; this.selectedParkingList.push(event[1]); } else if (event[0] === CheckboxState.unchecked) { this.selectedParkingList = this.selectedParkingList.filter(selectedParking => { return event[1].source.parking_id !== selectedParking.source.parking_id; }); event[1].isChecked = false; } this.parkingDirty = true; } // 取消选择 public cancelSelectedParkings(index: number) { const selectedParking = this.selectedParkingList[index]; for (const parking of this.selectedParkingList) { if (parking.source.parking_id === selectedParking.source.parking_id) { parking.isChecked = false; this.selectedParkingList.splice(index, 1); break; } } for (const parkingItem of this.parkingsList) { if (parkingItem.source.parking_id === selectedParking.source.parking_id) { parkingItem.isChecked = false; break; } } } // 提交添加组管理数据 public onAddGroupsFormSubmit() { const parking_ids = []; this.selectedParkingList.forEach(parking => { parking_ids.push(parking.source.parking_id); }); this.updateGroupInfo.parking_group_name = this.groupsInfo.parking_group_name; this.updateGroupInfo.parking_group_types = this.temSelectedGroupArr.join(); this.updateGroupInfo.parking_ids = parking_ids.join(); this.groupsHttpService.requestUpdateGroupsData(this.updateGroupInfo, this.group_id).subscribe(() => { this.globalService.promptBox.open('编辑成功!', () => { // 组数据有改动时更新global.service中组数据 this.globalService.resetGroups(); this.editSuccess = true; this.groupsDataService.clear(); this.navigated(); }); }, err => { if (!this.globalService.httpErrorProcess(err)) { if (err.status === 422) { const error: HttpErrorEntity = HttpErrorEntity.Create(err.json()); for (const content of error.errors) { if (content.field === 'parking_group_name' && content.code === 'missing_field') { this.globalService.promptBox.open('分组名称参数缺失!'); return; } else if (content.field === 'parking_group_name' && content.code === 'invalid') { this.globalService.promptBox.open('分组名称无效或不合法!'); return; } else if (content.field === 'parking_group_name' && content.code === 'failed') { this.globalService.promptBox.open('添加失败!'); return; } else if (content.resource === 'parking_group' && content.code === 'already_exist') { this.globalService.promptBox.open('分组名称不能重复,请重新输入!'); return; } } } } }); } // 请求停车场数据 public requestParkingsData() { this.loadingFlag = true; this.loadingTimerSubscription && this.loadingTimerSubscription.unsubscribe(); this.parkingsHttpService.requestParkingsData(this.searchParams, false).subscribe(data => { this.loadingFlag = false; this.isLinkUrl = data.linkUrl ? true : false; if (this.isInitSearchParkings) { this.isInitSearchParkings = false; this.parkingsList = []; this.processParking(data.results); // 初始化请求停车场数据时缓存一份临时停车场数据 this.tempRegionParkingList.push({'region_id': this.searchParams.region_id, 'parkingsList': this.parkingsList}); } else { this.processParking(data.results); } }, err => { this.globalService.httpErrorProcess(err); }); } // 处理停车场数据 private processParking(results: Array<any>) { if (this.selectedParkingList.length > 0) { results.forEach(parking => { const parkingItem = new ParkingItem(parking); this.selectedParkingList.forEach(selectedParking => { if (selectedParking.source.parking_id === parking.parking_id) { parkingItem.isChecked = true; } }); this.parkingsList.push(parkingItem); }); } else { results.forEach(parking => { this.parkingsList.push(new ParkingItem(parking)); }); } } public onCancelBtnClick() { this.navigated(); } private navigated() { if (this.fromPath === 'detail') { this.router.navigate(['../../detail', this.group_id], {relativeTo: this.route}); } else { this.router.navigate(['../'], {relativeTo: this.route}); } } public canDeactivate(): boolean { return this.editSuccess || !this.editGroupsForm || (!this.editGroupsForm.dirty && !this.groupTypeDirty && !this.parkingDirty); } }
providers: [ParkingsHttpService]
random_line_split
groups-edit.component.ts
import {Component, OnInit, ViewChild} from '@angular/core'; import {Observable} from 'rxjs/Observable'; import {Subscription} from 'rxjs/Subscription'; import {GroupEditEntity, GroupEntity, GroupsHttpService} from '../groups-http.service'; import {GlobalService} from '../../../core/global.service'; import {RegionEntity} from '../../../core/region-http.service'; import {ParkingsHttpService} from '../../parkings/parkings-http.service'; import {ActivatedRoute, Router} from '@angular/router'; import {HttpErrorEntity} from '../../../core/http.service'; import {GroupsDataService, ParkingItem} from '../groups-data.service'; import {GlobalConst} from '../../../share/global-const'; import {CanDeactivateComponent} from '../../../share/interfaces/can-deactivate-component'; import {ParkingsSearchParams} from '../../parkings/parkings.model'; import {CheckboxState} from "../../../share/components/beautify-checkbox/beautify-checkbox.component"; @Component({ selector: 'app-groups-edit', templateUrl: './groups-edit.component.html', styleUrls: ['..//groups.component.css', './groups-edit.component.css'], providers: [ParkingsHttpService] }) export class GroupsEditComponent implements OnInit, CanDeactivateComponent { public groupsInfo: GroupEntity = new GroupEntity(); public updateGroupInfo: GroupEditEntity = new GroupEditEntity(); public groupTypeList: Array<number> = [1, 2, 3, 4, 5, 0]; public regionsList: Array<RegionEntity> = []; public selectedRegion: RegionEntity = new RegionEntity; public isShow = true; public group_id: string; private temSelectedGroupArr: Array<any> = []; private editSuccess = false; // 编辑是否成功 private isInitSearchParkings = true; private groupTypeDirty = false; private parkingDirty = false; private fromPath: 'list' | 'detail' = 'list'; public searchParams: ParkingsSearchParams = new ParkingsSearchParams(); public parkingsList: Array<ParkingItem> = []; public selectedParkingList: Array<ParkingItem> = []; private containerHeight: number; // 容器的高度 private loadingFlag = true; private tempRegionParkingList: Array<any> = []; // 临时区域停车场数据列表 private isLinkUrl = true; // 是否存在link private currentScrollElement: any; // 当前滚动条元素 private loadingTimerSubscription: Subscription; @ViewChild('editGroupsForm') public editGroupsForm: any; /** * 获取是否有组类型被选中了(表单校验) * @returns {boolean} */ public get checkedGroupsType(): boolean { if (this.temSelectedGroupArr.length > 0) { return true; } return false; } constructor(private router: Router, private route: ActivatedRoute, private globalService: GlobalService, private parkingsHttpService: ParkingsHttpService, private groupsHttpService: GroupsHttpService, private groupsDataService: GroupsDataService) { this.route.paramMap.subscribe(map => { this.group_id = map.get('parking_group_id'); }); this.route.queryParamMap.subscribe(map => { this.fromPath = map.get('from') === 'detail' ? 'detail' : 'list'; }); this.searchParams.page_num = 1; this.searchParams.page_size = GlobalConst.PageSize; this.searchParams.status = '1'; } public ngOnInit() { this.requestGroupsByIdData(); } private requestGroupsByIdData() { this.groupsHttpService.requestGroupsByIdData(this.group_id).subscribe(data => { this.selectedParkingList = []; this.groupsInfo = data; this.temSelectedGroupArr = this.groupsInfo.parking_group_types; this.selectedParkingList = this.groupsInfo.parkings.map(parking => new ParkingItem(parking)); // 获取省市区数据 this.getRegionsData(); }); } /* 容器div滚动事件 */ public onScrollContainerScroll(event: any) { const target = event.target; this.currentScrollElement = target; this.containerHeight = $('.scroll-container').outerHeight(); if (target.scrollTop + this.containerHeight >= target.scrollHeight / 2) { // 当滚动高度与容器高度之和超过总高度一半就开始加载新数据 this.loadMoreData(); } } private loadMoreData() { if (this.loadingFlag) { return; } this.loadingFlag = true; // 模拟1.5秒获取到数据 this.loadingTimerSubscription && this.loadingTimerSubscription.unsubscribe(); this.loadingTimerSubscription = Observable.timer(1500).subscribe(() => { if (this.isLinkUrl) { this.searchParams.page_num++; this.requestParkingsData(); } }); } // 选择组类型 public changeGroupsType(event) { const park_type = this.temSelectedGroupArr; const index = park_type.indexOf(event[1]); this.groupTypeDirty = true; if (index < 0) { if (event[0] === CheckboxState.checked) { this.temSelectedGroupArr.push(event[1]); } } else { this.temSelectedGroupArr.splice(index, 1); } } // 获取省市区数据 public getRegionsData() { this.globalService.regions.subscribe(() => { this.getRegionsByRegionId(GlobalConst.RegionID); }); } // 通过省市区code显示所需省市区数据 public getRegionsByRegionId(region_id: string) { this.globalService.getRegionById(region_id).subscribe(data => { if (data && data.length > 0) { this.regionsList = data[0].cities; this.selectedRegion = this.regionsList[0]; this.searchParams.page_num = 1; this.searchParams.region_id = this.selectedRegion.region_id; this.requestParkingsData(); } }); } /** * 选择区域请求当前区域停车场数据 * 发出请求前判断当前区域停车场数据是否请求过,如果请求过,则不再发出区域停车场数据请求 * 临时区域停车场数据列表中没有当前区域停车场数据时发出请求 */ public requestRegionParkings(region: RegionEntity, num: number) { if (num === 1) { this.isShow = !this.isShow; } else { this.isShow = true; } $(this.currentScrollElement).scrollTop(0); this.selectedRegion = region; if (this.tempRegionParkingList.length > 0) { for (const regionParkingIndex of this.tempRegionParkingList) { if (regionParkingIndex.region_id === region.region_id) { this.parkingsList = []; this.parkingsList = regionParkingIndex.parkingsList; return; } } } this.isInitSearchParkings = true; this.searchParams.page_num = 1; this.searchParams.region_id = region.region_id; this.searchParams.parking_name = ''; this.requestParkingsData(); } // 输入停车场名称查询 public searchParking() { this.searchParams.page_num = 1; this.parkingsList = []; this.requestParkingsData(); } // 选择停车场 public selectedParkings(event: any) { if (event[0] === CheckboxState.checked) { for (const selectedParking of this.selectedParkingList) { if (event[1].source.parking_id === selectedParking.source.parking_id) { return; } } event[1].isChecked = true; this.selectedParkingList.push(event[1]); } else if (event[0] === CheckboxState.unchecked) { this.selectedParkingList = this.selectedParkingList.filter(selectedParking => { return event[1].source.parking_id !== selectedParking.source.parking_id; }); event[1].isChecked = false; } this.parkingDirty = true; } // 取消选择 public cancelSelectedParkings(index: number) { const selectedParking = this.selectedParkingList[index]; for (const parking of this.selectedParkingList) { if (parking.source.parking_id === selectedParking.source.parking_id) { parking.isChecked = false; this.selectedParkingList.splice(index, 1); break; } } for (const parkingItem of this.parkingsList) { if (parkingItem.source.parking_id === selectedParking.source.parking_id) { parkingItem.isChecked = false; break; } } } // 提交添加组管理数据 public onAddGroupsFormSubmit() { const parking_ids = []; this.selectedParkingList.forEach(parking => { parking_ids.push(parking.source.parking_id); }); this.updateGroupInfo.parking_group_name = this.groupsInfo.parking_group_name; this.updateGroupInfo.parking_group_types = this.temSelectedGroupArr.join(); this.updateGroupInfo.parking_ids = parking_ids.join(); this.groupsHttpService.requestUpdateGroupsData(this.updateGroupInfo, this.group_id).subscribe(() => { this.globalService.promptBox.open('编辑成功!', () => { // 组数据有改动时更新global.service中组数据 this.globalService.resetGroups(); this.editSuccess = true; this.groupsDataService.clear(); this.navigated(); }); }, err => { if (!this.globalService.httpErrorProcess(err)) { if (err.status === 422) { const error: HttpErrorEntity = HttpErrorEntity.Create(err.json()); for (const content of error.errors) { if (content.field === 'parking_group_name' && content.code === 'missing_field') { this.globalService.promptBox.open('分组名称参数缺失!'); return; } else if (content.field === 'parking_group_name' && content.code === 'invalid') { this.globalService.promptBox.open('分组名称无效或不合法!'); return; } else if (content.field === 'parking_group_name' && content.code === 'failed') { this.globalService.promptBox.open('添加失败!'); return; } else if (content.resource === 'parking_group' && content.code === 'already_exist') { this.globalService.promptBox.open('分组名称不能重复,请重新输入!'); return; } } } } }); } // 请求停车场数据 public requestParkingsData() { this.loadingFlag = true; this.loadingTimerSubscription && this.loadingTimerSubscription.unsubscribe(); this.parkingsHttpService.requestParkingsData(this.searchParams, false).subscribe(data => { this.loadingFlag = false; this.isLinkUrl = data.linkUrl ? true : false; if (this.isInitSearchParkings) { this.isInitSearchParkings = false; this.parkingsList = []; this.processParking(data.results); // 初始化请求停车场数据时缓存一份临时停车场数据 this.tempRegionParkingList.push({'region_id': this.searchParams.region_id, 'parkingsList': this.parkingsList}); } else { this.processParking(data.results); } }, err => { this.globalService.httpErrorProcess(err); }); } // 处理停车场数据 private processParking(results: Array<any>) { if (this.selectedParkingList.length > 0) { results.forEach(parking => { const parkingItem = new ParkingItem(parking); this.selectedParkingList.forEach(selectedParking => { if (selectedParking.source.parking_id === parking.parking_id) { parkingItem.isChecked = true; } }); this.parkingsList.push(parkingItem); }); } else { results.forEach(parking => { this.parkingsList.push(new ParkingItem(parking)); }); } } public onCancelBtnClick() { this.navigated(); } private navigated() { if (this.fromPath === 'detail') { this.router.navigate(['../../detail', this.group_id], {relativeTo: this.route}); } else { this.router.navigate(['../'], {relativeTo: this.route}); } } public canDeactivate(): boolean { return this.editSuccess || !this.editGroupsForm || (!this.editGroupsForm.dirty && !this.groupTypeDirty && !this.parkingDirty); } }
identifier_body
groups-edit.component.ts
import {Component, OnInit, ViewChild} from '@angular/core'; import {Observable} from 'rxjs/Observable'; import {Subscription} from 'rxjs/Subscription'; import {GroupEditEntity, GroupEntity, GroupsHttpService} from '../groups-http.service'; import {GlobalService} from '../../../core/global.service'; import {RegionEntity} from '../../../core/region-http.service'; import {ParkingsHttpService} from '../../parkings/parkings-http.service'; import {ActivatedRoute, Router} from '@angular/router'; import {HttpErrorEntity} from '../../../core/http.service'; import {GroupsDataService, ParkingItem} from '../groups-data.service'; import {GlobalConst} from '../../../share/global-const'; import {CanDeactivateComponent} from '../../../share/interfaces/can-deactivate-component'; import {ParkingsSearchParams} from '../../parkings/parkings.model'; import {CheckboxState} from "../../../share/components/beautify-checkbox/beautify-checkbox.component"; @Component({ selector: 'app-groups-edit', templateUrl: './groups-edit.component.html', styleUrls: ['..//groups.component.css', './groups-edit.component.css'], providers: [ParkingsHttpService] }) export class GroupsEditComponent implements OnInit, CanDeactivateComponent { public groupsInfo: GroupEntity = new GroupEntity(); public updateGroupInfo: GroupEditEntity = new GroupEditEntity(); public groupTypeList: Array<number> = [1, 2, 3, 4, 5, 0]; public regionsList: Array<RegionEntity> = []; public selectedRegion: RegionEntity = new RegionEntity; public isShow = true; public group_id: string; private temSelectedGroupArr: Array<any> = []; private editSuccess = false; // 编辑是否成功 private isInitSearchParkings = true; private groupTypeDirty = false; private parkingDirty = false; private fromPath: 'list' | 'detail' = 'list'; public searchParams: ParkingsSearchParams = new ParkingsSearchParams(); public parkingsList: Array<ParkingItem> = []; public selectedParkingList: Array<ParkingItem> = []; private containerHeight: number; // 容器的高度 private loadingFlag = true; private tempRegionParkingList: Array<any> = []; // 临时区域停车场数据列表 private isLinkUrl = true; // 是否存在link private currentScrollElement: any; // 当前滚动条元素 private loadingTimerSubscription: Subscription; @ViewChild('editGroupsForm') public editGroupsForm: any; /** * 获取是否有组类型被选中了(表单校验) * @returns {boolean} */ public get checkedGroupsType(): boolean { if (this.temSelectedGroupArr.length > 0) { return true; } return false; } constructor(private router: Router, private route: ActivatedRoute, private globalService: GlobalService, private parkingsHttpService: ParkingsHttpService, private groupsHttpService: GroupsHttpService, private groupsDataService: GroupsDataService) { this.route.paramMap.subscribe(map => { this.group_id = map.get('parking_group_id'); }); this.route.queryParamMap.subscribe(map => { this.fromPath = map.get('from') === 'detail' ? 'detail' : 'list'; }); this.searchParams.page_num = 1; this.searchParams.page_size = GlobalConst.PageSize; this.searchParams.status = '1'; } public ngOnInit() { this.requestGroupsByIdData(); } private requestGroupsByIdData() { this.groupsHttpService.requestGroupsByIdData(this.group_id).subscribe(data => { this.selectedParkingList = []; this.groupsInfo = data; this.temSelectedGroupArr = this.groupsInfo.parking_group_types; this.selectedParkingList = this.groupsInfo.parkings.map(parking => new ParkingItem(parking)); // 获取省市区数据 this.getRegionsData(); }); } /* 容器div滚动事件 */ public onScrollContainerScroll(event: any) { const target = event.target; this.currentScrollElement = target; this.containerHeight = $('.scroll-container').outerHeight(); if (target.scrollTop + this.containerHeight >= target.scrollHeight / 2) { // 当滚动高度与容器高度之和超过总高度一半就开始加载新数据 this.loadMoreData(); } } private loadMoreData() { if (this.loadingFlag) { return; } this.loadingFlag = true; // 模拟1.5秒获取到数据 this.loadingTimerSubscription && this.loadingTimerSubscription.unsubscribe(); this.loadingTimerSubscription = Observable.timer(1500).subscribe(() => { if (this.isLinkUrl) { this.searchParams.page_num++; this.requestParkingsData(); } }); } // 选择组类型 public changeGroupsType(event) { const park_type = this.temSelectedGroupArr; const index = park_type.indexOf(event[1]); this.groupTypeDirty = true; if (index < 0) { if (event[0] === CheckboxState.checked) { this.temSelectedGroupArr.push(event[1]); } } else { this.temSelectedGroupArr.splice(index, 1); } } // 获取省市区数据 public getRegionsData() { this.globalService.regions.subscribe(() => { this.getRegionsByRegionId(GlobalConst.RegionID); }); } // 通过省市区code显示所需省市区数据 public getRegionsByRegionId(region_id: string) { this.globalService.getRegionById(region_id).subscribe(data => { if (data && data.length > 0) { this.regionsList = data[0].cities; this.selectedRegion = this.regionsList[0]; this.searchParams.page_num = 1; this.searchParams.region_id = this.selectedRegion.region_id; this.requestParkingsData(); } }); } /** * 选择区域请求当前区域停车场数据 * 发出请求前判断当前区域停车场数据是否请求过,如果请求过,则不再发出区域停车场数据请求 * 临时区域停车场数据列表中没有当前区域停车场数据时发出请求 */ public requestRegionParkings(region: RegionEntity, num: number) { if (num === 1) { this.isShow = !this.isShow; } else { this.isShow = true; } $(this.currentScrollElement).scrollTop(0); this.selectedRegion = region; if (this.tempRegionParkingList.length > 0) { for (const regionParkingIndex of this.tempRegionParkingList) { if (regionParkingIndex.region_id === region.region_id) { this.parkingsList = []; this.parkingsList = regionParkingIndex.parkingsList; return; } } } this.isInitSearchParkings = true; this.searchParams.page_num = 1; this.searchParams.region_id = region.region_id; this.searchParams.parking_name = ''; this.requestParkingsData(); } // 输入停车场名称查询 public searchParking() { this.searchParams.page_num = 1; this.parkingsList = []; this.requestParkingsData(); } // 选择停车场 public selectedParkings(event: any) { if (event[0] === CheckboxState.checked) { for (const selectedParking of this.selectedParkingList) { if (event[1].source.parking_id === selectedParking.source.parking_id) { return; } } event[1].isChecked = true; this.selectedParkingList.push(event[1]); } else if (event[0] === CheckboxState.unchecked) { this.selectedParkingList = this.selectedParkingList.filter(selectedParking => { return event[1].source.parking_id !== selectedParking.source.parking_id; }); event[1].isChecked = false; } this.parkingDirty = true; } // 取消选择 public cancelSelectedParkings(index: number) { const selectedParking = this.selectedParkingList[index]; for (const parking of this.selectedParkingList) { if (parking.source.parking_id === selectedParking.source.parking_id) { parking.isChecked = false; this.selectedParkingList.splice(index, 1); break; } } for (const parkingItem of this.parkingsList) { if (parkingItem.source.parking_id === selectedParking.source.parking_id) { parkingItem.isChecked = false; break; } } } // 提交添加组管理数据 public onAddGroupsFormSubmit() { const parking_ids = []; this.selectedParkingList.forEach(parking => { parking_ids.push(parking.source.parking_id); }); this.updateGroupInfo.parking_group_name = this.groupsInfo.parking_group_name; this.updateGroupInfo.parking_group_types = this.temSelectedGroupArr.join(); this.updateGroupInfo.parking_ids = parking_ids.join(); this.groupsHttpService.requestUpdateGroupsData(this.updateGroupInfo, this.group_id).subscribe(() => { this.globalService.promptBox.open('编辑成功!', () => { // 组数据有改动时更新global.service中组数据 this.globalService.resetGroups(); this.editSuccess = true; this.groupsDataService.clear(); this.navigated(); }); }, err => { if (!this.globalService.httpErrorProcess(err)) { if (err.status === 422) { const error: HttpErrorEntity = HttpErrorEntity.Create(err.json()); for (const content of error.errors) { if (content.field === 'parking_group_name' && content.code === 'missing_field') { this.globalService.promptBox.open('分组名称参数缺失!'); return; } else if (content.field === 'parking_group_name' && content.code === 'invalid') { this.globalService.promptBox.open('分组名称无效或不合法!'); return; } else if (content.field === 'parking_group_name' && content.code === 'failed') { this.globalService.promptBox.open('添加失败!'); return; } else if (content.resource === 'parking_group' && content.code === 'already_exist') { this.globalService.promptBox.open('分组名称不能重复,请重新输入!'); return; } } } } }); } // 请求停车场数据 public requestParkingsData() { this.loadingFlag = true; this.loadingTimerSubscription && this.loadingTimerSubscription.unsubscribe(); this.parkingsHttpService.requestParkingsData(this.searchParams, false).subscribe(data => { this.loadingFlag = false; this.isLinkUrl = data.linkUrl ? true : false; if (this.isInitSearchParkings) { this.isInitSearchParkings = false; this.parkingsList = []; this.processParking(data.results); // 初始化请求停车场数据时缓存一份临时停车场数据 this.tempRegionParkingList.push({'region_id': this.searchParams.region_id, 'parkingsList': this.parkingsList}); } else { this.processParking(data.results); } }, err => { this.globalService.httpErrorProcess(err); }); } // 处理停车场数据 private processParking(results: Array<any>) { if (this.selectedParkingList.length > 0) { results.forEach(parking => { const parkingItem = new ParkingItem(parking); this.selectedParkingList.forEach(selectedParking => { if (selectedParking.source.parking_id === parking.parking_id) { parkingItem.isChecked = true; } }); this.parkingsList.push(parkingItem); }); } else { results.forEach(parking => { this
}); } } public onCancelBtnClick() { this.navigated(); } private navigated() { if (this.fromPath === 'detail') { this.router.navigate(['../../detail', this.group_id], {relativeTo: this.route}); } else { this.router.navigate(['../'], {relativeTo: this.route}); } } public canDeactivate(): boolean { return this.editSuccess || !this.editGroupsForm || (!this.editGroupsForm.dirty && !this.groupTypeDirty && !this.parkingDirty); } }
.parkingsList.push(new ParkingItem(parking));
conditional_block
review.go
/* Copyright 2015 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package review contains the data structures used to represent code reviews. package review import ( "bytes" "encoding/json" "fmt" "sort" "github.com/google/git-appraise/repository" "github.com/google/git-appraise/review/analyses" "github.com/google/git-appraise/review/ci" "github.com/google/git-appraise/review/comment" "github.com/google/git-appraise/review/gpg" "github.com/google/git-appraise/review/request" ) const archiveRef = "refs/devtools/archives/reviews" var emptyTree = repository.NewTree(map[string]repository.TreeChild{}) // CommentThread represents the tree-based hierarchy of comments. // // The Resolved field represents the aggregate status of the entire thread. If // it is set to false, then it indicates that there is an unaddressed comment // in the thread. If it is unset, then that means that the root comment is an // FYI only, and that there are no unaddressed comments. If it is set to true, // then that means that there are no unaddressed comments, and that the root // comment has its resolved bit set to true. type CommentThread struct { Hash string `json:"hash,omitempty"` Comment comment.Comment `json:"comment"` Original *comment.Comment `json:"original,omitempty"` Edits []*comment.Comment `json:"edits,omitempty"` Children []CommentThread `json:"children,omitempty"` Resolved *bool `json:"resolved,omitempty"` Edited bool `json:"edited,omitempty"` } // Summary represents the high-level state of a code review. // // This high-level state corresponds to the data that can be quickly read // directly from the repo, so other methods that need to operate on a lot // of reviews (such as listing the open reviews) should prefer operating on // the summary rather than the details. // // Review summaries have two status fields which are orthogonal: // 1. Resolved indicates if a reviewer has accepted or rejected the change. // 2. Submitted indicates if the change has been incorporated into the target. type Summary struct { Repo repository.Repo `json:"-"` Revision string `json:"revision"` Request request.Request `json:"request"` AllRequests []request.Request `json:"-"` Comments []CommentThread `json:"comments,omitempty"` Resolved *bool `json:"resolved,omitempty"` Submitted bool `json:"submitted"` } // Review represents the entire state of a code review. // // This extends Summary to also include a list of reports for both the // continuous integration status, and the static analysis runs. Those reports // correspond to either the current commit in the review ref (for pending // reviews), or to the last commented-upon commit (for submitted reviews). type Review struct { *Summary Reports []ci.Report `json:"reports,omitempty"` Analyses []analyses.Report `json:"analyses,omitempty"` } type commentsByTimestamp []*comment.Comment // Interface methods for sorting comment threads by timestamp func (cs commentsByTimestamp) Len() int { return len(cs) } func (cs commentsByTimestamp) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] } func (cs commentsByTimestamp) Less(i, j int) bool { return cs[i].Timestamp < cs[j].Timestamp } type byTimestamp []CommentThread // Interface methods for sorting comment threads by timestamp func (threads byTimestamp) Len() int { return len(threads) } func (threads byTimestamp) Swap(i, j int) { threads[i], threads[j] = threads[j], threads[i] } func (threads byTimestamp) Less(i, j int) bool { return threads[i].Comment.Timestamp < threads[j].Comment.Timestamp } type requestsByTimestamp []request.Request // Interface methods for sorting review requests by timestamp func (requests requestsByTimestamp) Len() int { return len(requests) } func (requests requestsByTimestamp) Swap(i, j int) { requests[i], requests[j] = requests[j], requests[i] } func (requests requestsByTimestamp) Less(i, j int) bool { return requests[i].Timestamp < requests[j].Timestamp } type summariesWithNewestRequestsFirst []Summary // Interface methods for sorting review summaries in reverse chronological order func (summaries summariesWithNewestRequestsFirst) Len() int { return len(summaries) } func (summaries summariesWithNewestRequestsFirst) Swap(i, j int) { summaries[i], summaries[j] = summaries[j], summaries[i] } func (summaries summariesWithNewestRequestsFirst) Less(i, j int) bool { return summaries[i].Request.Timestamp > summaries[j].Request.Timestamp } // updateThreadsStatus calculates the aggregate status of a sequence of comment threads. // // The aggregate status is the conjunction of all of the non-nil child statuses. // // This has the side-effect of setting the "Resolved" field of all descendant comment threads. func updateThreadsStatus(threads []CommentThread) *bool { sort.Stable(byTimestamp(threads)) noUnresolved := true var result *bool for i := range threads { thread := &threads[i] thread.updateResolvedStatus() if thread.Resolved != nil { noUnresolved = noUnresolved && *thread.Resolved result = &noUnresolved } } return result } // updateResolvedStatus calculates the aggregate status of a single comment thread, // and updates the "Resolved" field of that thread accordingly. func (thread *CommentThread) updateResolvedStatus() { resolved := updateThreadsStatus(thread.Children) if resolved == nil { thread.Resolved = thread.Comment.Resolved return } if !*resolved { thread.Resolved = resolved return } if thread.Comment.Resolved == nil || !*thread.Comment.Resolved { thread.Resolved = nil return } thread.Resolved = resolved } // Verify verifies the signature on a comment. func (thread *CommentThread) Verify() error { err := gpg.Verify(&thread.Comment) if err != nil { hash, _ := thread.Comment.Hash() return fmt.Errorf("verification of comment [%s] failed: %s", hash, err) } for _, child := range thread.Children { err = child.Verify() if err != nil { return err } } return nil } // mutableThread is an internal-only data structure used to store partially constructed comment threads. type mutableThread struct { Hash string Comment comment.Comment Edits []*comment.Comment Children []*mutableThread } // fixMutableThread is a helper method to finalize a mutableThread struct // (partially constructed comment thread) as a CommentThread struct // (fully constructed comment thread). func fixMutableThread(mutableThread *mutableThread) CommentThread { var children []CommentThread edited := len(mutableThread.Edits) > 0 for _, mutableChild := range mutableThread.Children { child := fixMutableThread(mutableChild) if (!edited) && child.Edited { edited = true } children = append(children, child) } comment := &mutableThread.Comment if len(mutableThread.Edits) > 0 { sort.Stable(commentsByTimestamp(mutableThread.Edits)) comment = mutableThread.Edits[len(mutableThread.Edits)-1] } return CommentThread{ Hash: mutableThread.Hash, Comment: *comment, Original: &mutableThread.Comment, Edits: mutableThread.Edits, Children: children, Edited: edited, } } // This function builds the comment thread tree from the log-based list of comments. // // Since the comments can be processed in any order, this uses an internal mutable // data structure, and then converts it to the proper CommentThread structure at the end. func buildCommentThreads(commentsByHash map[string]comment.Comment) []CommentThread { threadsByHash := make(map[string]*mutableThread) for hash, comment := range commentsByHash { thread, ok := threadsByHash[hash] if !ok { thread = &mutableThread{ Hash: hash, Comment: comment, } threadsByHash[hash] = thread } } var rootHashes []string for hash, thread := range threadsByHash { if thread.Comment.Original != "" { original, ok := threadsByHash[thread.Comment.Original] if ok { original.Edits = append(original.Edits, &thread.Comment) } } else if thread.Comment.Parent == "" { rootHashes = append(rootHashes, hash) } else { parent, ok := threadsByHash[thread.Comment.Parent] if ok { parent.Children = append(parent.Children, thread) } } } var threads []CommentThread for _, hash := range rootHashes { threads = append(threads, fixMutableThread(threadsByHash[hash])) } return threads } // getCommentsFromNotes parses the log-structured sequence of comments for a commit, // and then builds the corresponding tree-structured comment threads. func getCommentsFromNotes(repo repository.Repo, revision string, commentNotes []repository.Note) ([]CommentThread, *bool) { commentsByHash := comment.ParseAllValid(commentNotes) comments := buildCommentThreads(commentsByHash) resolved := updateThreadsStatus(comments) return comments, resolved } func getSummaryFromNotes(repo repository.Repo, revision string, requestNotes, commentNotes []repository.Note) (*Summary, error) { requests := request.ParseAllValid(requestNotes) if requests == nil { return nil, fmt.Errorf("Could not find any review requests for %q", revision) } sort.Stable(requestsByTimestamp(requests)) reviewSummary := Summary{ Repo: repo, Revision: revision, Request: requests[len(requests)-1], AllRequests: requests, } comments, resolved := getCommentsFromNotes(repo, revision, commentNotes) reviewSummary.Comments = comments reviewSummary.Resolved = resolved return &reviewSummary, nil } func GetComments(repo repository.Repo, revision string) ([]CommentThread, error) { commentNotes := repo.GetNotes(comment.Ref, revision) c, _ := getCommentsFromNotes(repo, revision, commentNotes) return c, nil } // GetSummary returns the summary of the code review specified by its revision // and the references which contain that reviews summary and comments. // // If no review request exists, the returned review summary is nil. func GetSummaryViaRefs(repo repository.Repo, requestRef, commentRef, revision string) (*Summary, error) { if err := repo.VerifyCommit(revision); err != nil { return nil, fmt.Errorf("Could not find a commit named %q", revision) } requestNotes := repo.GetNotes(requestRef, revision) commentNotes := repo.GetNotes(commentRef, revision) summary, err := getSummaryFromNotes(repo, revision, requestNotes, commentNotes) if err != nil { return nil, err } currentCommit := revision if summary.Request.Alias != "" { currentCommit = summary.Request.Alias } if !summary.IsAbandoned() { submitted, err := repo.IsAncestor(currentCommit, summary.Request.TargetRef) if err != nil { return nil, err } summary.Submitted = submitted } return summary, nil } // GetSummary returns the summary of the specified code review. // // If no review request exists, the returned review summary is nil. func GetSummary(repo repository.Repo, revision string) (*Summary, error) { return GetSummaryViaRefs(repo, request.Ref, comment.Ref, revision) } // Details returns the detailed review for the given summary. func (r *Summary) Details() (*Review, error) { review := Review{ Summary: r, } currentCommit, err := review.GetHeadCommit() if err == nil { review.Reports = ci.ParseAllValid(review.Repo.GetNotes(ci.Ref, currentCommit)) review.Analyses = analyses.ParseAllValid(review.Repo.GetNotes(analyses.Ref, currentCommit)) } return &review, nil } // IsAbandoned returns whether or not the given review has been abandoned. func (r *Summary) IsAbandoned() bool { return r.Request.TargetRef == "" } // IsOpen returns whether or not the given review is still open (neither submitted nor abandoned). func (r *Summary) IsOpen() bool { return !r.Submitted && !r.IsAbandoned() } // Verify returns whether or not a summary's comments are a) signed, and b) /// that those signatures are verifiable. func (r *Summary) Verify() error { err := gpg.Verify(&r.Request) if err != nil { return fmt.Errorf("couldn't verify request targeting: %q: %s", r.Request.TargetRef, err) } for _, thread := range r.Comments { err := thread.Verify() if err != nil { return err } } return nil } // Get returns the specified code review. // // If no review request exists, the returned review is nil. func Get(repo repository.Repo, revision string) (*Review, error) { summary, err := GetSummary(repo, revision) if err != nil { return nil, err } if summary == nil { return nil, nil } return summary.Details() } func getIsSubmittedCheck(repo repository.Repo) func(ref, commit string) bool { refCommitsMap := make(map[string]map[string]bool) getRefCommitsMap := func(ref string) map[string]bool { commitsMap, ok := refCommitsMap[ref] if ok { return commitsMap } commitsMap = make(map[string]bool) for _, commit := range repo.ListCommits(ref) { commitsMap[commit] = true } refCommitsMap[ref] = commitsMap return commitsMap } return func(ref, commit string) bool { return getRefCommitsMap(ref)[commit] } } func unsortedListAll(repo repository.Repo) []Summary { reviewNotesMap, err := repo.GetAllNotes(request.Ref) if err != nil { return nil } discussNotesMap, err := repo.GetAllNotes(comment.Ref) if err != nil { return nil } isSubmittedCheck := getIsSubmittedCheck(repo) var reviews []Summary for commit, notes := range reviewNotesMap { summary, err := getSummaryFromNotes(repo, commit, notes, discussNotesMap[commit]) if err != nil { continue } if !summary.IsAbandoned() { summary.Submitted = isSubmittedCheck(summary.Request.TargetRef, summary.getStartingCommit()) } reviews = append(reviews, *summary) } return reviews } // ListAll returns all reviews stored in the git-notes. func ListAll(repo repository.Repo) []Summary { reviews := unsortedListAll(repo) sort.Stable(summariesWithNewestRequestsFirst(reviews)) return reviews } // ListOpen returns all reviews that are not yet incorporated into their target refs. func ListOpen(repo repository.Repo) []Summary { var openReviews []Summary for _, review := range unsortedListAll(repo) { if review.IsOpen() { openReviews = append(openReviews, review) } } sort.Stable(summariesWithNewestRequestsFirst(openReviews)) return openReviews } // GetCurrent returns the current, open code review. // // If there are multiple matching reviews, then an error is returned. func GetCurrent(repo repository.Repo) (*Review, error) { reviewRef, err := repo.GetHeadRef() if err != nil { return nil, err } var matchingReviews []Summary for _, review := range ListOpen(repo) { if review.Request.ReviewRef == reviewRef { matchingReviews = append(matchingReviews, review) } } if matchingReviews == nil { return nil, nil } if len(matchingReviews) != 1 { return nil, fmt.Errorf("There are %d open reviews for the ref \"%s\"", len(matchingReviews), reviewRef) } return matchingReviews[0].Details() } // GetBuildStatusMessage returns a string of the current build-and-test status // of the review, or "unknown" if the build-and-test status cannot be determined. func (r *Review) GetBuildStatusMessage() string { statusMessage := "unknown" ciReport, err := ci.GetLatestCIReport(r.Reports) if err != nil { return fmt.Sprintf("unknown: %s", err) } if ciReport != nil { statusMessage = fmt.Sprintf("%s (%q)", ciReport.Status, ciReport.URL) } return statusMessage } // GetAnalysesNotes returns all of the notes from the most recent static // analysis run recorded in the git notes. func (r *Review) GetAnalysesNotes() ([]analyses.Note, error) { latestAnalyses, err := analyses.GetLatestAnalysesReport(r.Analyses) if err != nil { return nil, err } if latestAnalyses == nil { return nil, fmt.Errorf("No analyses available") } return latestAnalyses.GetNotes() } // GetAnalysesMessage returns a string summarizing the results of the // most recent static analyses. func (r *Review) GetAnalysesMessage() string { latestAnalyses, err := analyses.GetLatestAnalysesReport(r.Analyses) if err != nil { return err.Error() } if latestAnalyses == nil { return "No analyses available" } status := latestAnalyses.Status if status != "" && status != analyses.StatusNeedsMoreWork { return status } analysesNotes, err := latestAnalyses.GetNotes() if err != nil { return err.Error() } if analysesNotes == nil { return "passed" } return fmt.Sprintf("%d warnings\n", len(analysesNotes)) // TODO(ojarjur): Figure out the best place to display the actual notes } func prettyPrintJSON(jsonBytes []byte) (string, error) { var prettyBytes bytes.Buffer err := json.Indent(&prettyBytes, jsonBytes, "", " ") if err != nil { return "", err } return prettyBytes.String(), nil } // GetCommentsJSON returns the pretty printed JSON for a slice of comment threads. func GetCommentsJSON(cs []CommentThread) (string, error) { jsonBytes, err := json.Marshal(cs) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // GetJSON returns the pretty printed JSON for a review summary. func (r *Summary) GetJSON() (string, error) { jsonBytes, err := json.Marshal(*r) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // GetJSON returns the pretty printed JSON for a review. func (r *Review) GetJSON() (string, error) { jsonBytes, err := json.Marshal(*r) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // findLastCommit returns the later (newest) commit from the union of the provided commit // and all of the commits that are referenced in the given comment threads. func (r *Review) findLastCommit(startingCommit, latestCommit string, commentThreads []CommentThread) string { isLater := func(commit string) bool { if err := r.Repo.VerifyCommit(commit); err != nil { return false } if t, e := r.Repo.IsAncestor(latestCommit, commit); e == nil && t { return true } if t, e := r.Repo.IsAncestor(startingCommit, commit); e == nil && !t { return false } if t, e := r.Repo.IsAncestor(commit, latestCommit); e == nil && t
ct, err := r.Repo.GetCommitTime(commit) if err != nil { return false } lt, err := r.Repo.GetCommitTime(latestCommit) if err != nil { return true } return ct > lt } updateLatest := func(commit string) { if commit == "" { return } if isLater(commit) { latestCommit = commit } } for _, commentThread := range commentThreads { comment := commentThread.Comment if comment.Location != nil { updateLatest(comment.Location.Commit) } updateLatest(r.findLastCommit(startingCommit, latestCommit, commentThread.Children)) } return latestCommit } func (r *Summary) getStartingCommit() string { if r.Request.Alias != "" { return r.Request.Alias } return r.Revision } // GetHeadCommit returns the latest commit in a review. func (r *Review) GetHeadCommit() (string, error) { currentCommit := r.getStartingCommit() if r.Request.ReviewRef == "" { return currentCommit, nil } if r.Submitted { // The review has already been submitted. // Go through the list of comments and find the last commented upon commit. return r.findLastCommit(currentCommit, currentCommit, r.Comments), nil } // It is possible that the review ref is no longer an ancestor of the starting // commit (e.g. if a rebase left us in a detached head), in which case we have to // find the head commit without using it. useReviewRef, err := r.Repo.IsAncestor(currentCommit, r.Request.ReviewRef) if err != nil { return "", err } if useReviewRef { return r.Repo.ResolveRefCommit(r.Request.ReviewRef) } return r.findLastCommit(currentCommit, currentCommit, r.Comments), nil } // GetBaseCommit returns the commit against which a review should be compared. func (r *Review) GetBaseCommit() (string, error) { if !r.IsOpen() { if r.Request.BaseCommit != "" { return r.Request.BaseCommit, nil } // This means the review has been submitted, but did not specify a base commit. // In this case, we have to treat the last parent commit as the base. This is // usually what we want, since merging a target branch into a feature branch // results in the previous commit to the feature branch being the first parent, // and the latest commit to the target branch being the second parent. return r.Repo.GetLastParent(r.Revision) } targetRefHead, err := r.Repo.ResolveRefCommit(r.Request.TargetRef) if err != nil { return "", err } leftHandSide := targetRefHead rightHandSide := r.Revision if r.Request.ReviewRef != "" { if reviewRefHead, err := r.Repo.ResolveRefCommit(r.Request.ReviewRef); err == nil { rightHandSide = reviewRefHead } } return r.Repo.MergeBase(leftHandSide, rightHandSide) } // ListCommits lists the commits included in a review. func (r *Review) ListCommits() ([]string, error) { baseCommit, err := r.GetBaseCommit() if err != nil { return nil, err } headCommit, err := r.GetHeadCommit() if err != nil { return nil, err } return r.Repo.ListCommitsBetween(baseCommit, headCommit) } // GetDiff returns the diff for a review. func (r *Review) GetDiff(diffArgs ...string) (string, error) { var baseCommit, headCommit string baseCommit, err := r.GetBaseCommit() if err == nil { headCommit, err = r.GetHeadCommit() } if err == nil { return r.Repo.Diff(baseCommit, headCommit, diffArgs...) } return "", err } // AddComment adds the given comment to the review. func (r *Review) AddComment(c comment.Comment) error { commentNote, err := c.Write() if err != nil { return err } r.Repo.AppendNote(comment.Ref, r.Revision, commentNote) return nil } // Rebase performs an interactive rebase of the review onto its target ref. // // If the 'archivePrevious' argument is true, then the previous head of the // review will be added to the 'refs/devtools/archives/reviews' ref prior // to being rewritten. That ensures the review history is kept from being // garbage collected. func (r *Review) Rebase(archivePrevious bool) error { if archivePrevious { orig, err := r.GetHeadCommit() if err != nil { return err } if err := r.Repo.ArchiveRef(orig, archiveRef); err != nil { return err } } if err := r.Repo.SwitchToRef(r.Request.ReviewRef); err != nil { return err } err := r.Repo.RebaseRef(r.Request.TargetRef) if err != nil { return err } alias, err := r.Repo.GetCommitHash("HEAD") if err != nil { return err } r.Request.Alias = alias newNote, err := r.Request.Write() if err != nil { return err } return r.Repo.AppendNote(request.Ref, r.Revision, newNote) } // RebaseAndSign performs an interactive rebase of the review onto its // target ref. It signs the result of the rebase as well as (re)signs // the review request itself. // // If the 'archivePrevious' argument is true, then the previous head of the // review will be added to the 'refs/devtools/archives/reviews' ref prior // to being rewritten. That ensures the review history is kept from being // garbage collected. func (r *Review) RebaseAndSign(archivePrevious bool) error { if archivePrevious { orig, err := r.GetHeadCommit() if err != nil { return err } if err := r.Repo.ArchiveRef(orig, archiveRef); err != nil { return err } } if err := r.Repo.SwitchToRef(r.Request.ReviewRef); err != nil { return err } err := r.Repo.RebaseAndSignRef(r.Request.TargetRef) if err != nil { return err } alias, err := r.Repo.GetCommitHash("HEAD") if err != nil { return err } r.Request.Alias = alias key, err := r.Repo.GetUserSigningKey() if err != nil { return err } err = gpg.Sign(key, &r.Request) if err != nil { return err } newNote, err := r.Request.Write() if err != nil { return err } return r.Repo.AppendNote(request.Ref, r.Revision, newNote) } func wellKnownCommitForPath(repo repository.Repo, path string, archive bool) (string, error) { commitDetails := &repository.CommitDetails{ Author: "nobody", AuthorEmail: "nobody", AuthorTime: "100000000 +0000", Committer: "nobody", CommitterEmail: "nobody", Time: "100000000 +0000", Summary: path, } commitHash, err := repo.CreateCommitWithTree(commitDetails, emptyTree) if err != nil { return "", err } if !archive { return commitHash, nil } if err := repo.ArchiveRef(commitHash, archiveRef); err != nil { return "", err } return commitHash, nil } func AddDetachedComment(repo repository.Repo, c *comment.Comment) error { path := c.Location.Path wellKnownCommit, err := wellKnownCommitForPath(repo, path, true) if err != nil { return fmt.Errorf("Failure finding the well-known commit for detached comments on %q: %v", path, err) } commentNote, err := c.Write() if err != nil { return err } return repo.AppendNote(comment.Ref, wellKnownCommit, commentNote) } func GetDetachedComments(repo repository.Repo, path string) ([]CommentThread, error) { wellKnownCommit, err := wellKnownCommitForPath(repo, path, false) if err != nil { return nil, fmt.Errorf("Failure finding the well-known commit for detached comments on %q: %v", path, err) } return GetComments(repo, wellKnownCommit) }
{ return false }
conditional_block
review.go
/* Copyright 2015 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package review contains the data structures used to represent code reviews. package review import ( "bytes" "encoding/json" "fmt" "sort" "github.com/google/git-appraise/repository" "github.com/google/git-appraise/review/analyses" "github.com/google/git-appraise/review/ci" "github.com/google/git-appraise/review/comment" "github.com/google/git-appraise/review/gpg" "github.com/google/git-appraise/review/request" ) const archiveRef = "refs/devtools/archives/reviews" var emptyTree = repository.NewTree(map[string]repository.TreeChild{}) // CommentThread represents the tree-based hierarchy of comments. // // The Resolved field represents the aggregate status of the entire thread. If // it is set to false, then it indicates that there is an unaddressed comment // in the thread. If it is unset, then that means that the root comment is an // FYI only, and that there are no unaddressed comments. If it is set to true, // then that means that there are no unaddressed comments, and that the root // comment has its resolved bit set to true. type CommentThread struct { Hash string `json:"hash,omitempty"` Comment comment.Comment `json:"comment"` Original *comment.Comment `json:"original,omitempty"` Edits []*comment.Comment `json:"edits,omitempty"` Children []CommentThread `json:"children,omitempty"` Resolved *bool `json:"resolved,omitempty"` Edited bool `json:"edited,omitempty"` } // Summary represents the high-level state of a code review. // // This high-level state corresponds to the data that can be quickly read // directly from the repo, so other methods that need to operate on a lot // of reviews (such as listing the open reviews) should prefer operating on // the summary rather than the details. // // Review summaries have two status fields which are orthogonal: // 1. Resolved indicates if a reviewer has accepted or rejected the change. // 2. Submitted indicates if the change has been incorporated into the target. type Summary struct { Repo repository.Repo `json:"-"` Revision string `json:"revision"` Request request.Request `json:"request"` AllRequests []request.Request `json:"-"` Comments []CommentThread `json:"comments,omitempty"` Resolved *bool `json:"resolved,omitempty"` Submitted bool `json:"submitted"` } // Review represents the entire state of a code review. // // This extends Summary to also include a list of reports for both the // continuous integration status, and the static analysis runs. Those reports // correspond to either the current commit in the review ref (for pending // reviews), or to the last commented-upon commit (for submitted reviews). type Review struct { *Summary Reports []ci.Report `json:"reports,omitempty"` Analyses []analyses.Report `json:"analyses,omitempty"` } type commentsByTimestamp []*comment.Comment // Interface methods for sorting comment threads by timestamp func (cs commentsByTimestamp) Len() int { return len(cs) } func (cs commentsByTimestamp) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] } func (cs commentsByTimestamp) Less(i, j int) bool { return cs[i].Timestamp < cs[j].Timestamp } type byTimestamp []CommentThread // Interface methods for sorting comment threads by timestamp func (threads byTimestamp) Len() int { return len(threads) } func (threads byTimestamp) Swap(i, j int) { threads[i], threads[j] = threads[j], threads[i] } func (threads byTimestamp) Less(i, j int) bool { return threads[i].Comment.Timestamp < threads[j].Comment.Timestamp } type requestsByTimestamp []request.Request // Interface methods for sorting review requests by timestamp func (requests requestsByTimestamp) Len() int { return len(requests) } func (requests requestsByTimestamp) Swap(i, j int) { requests[i], requests[j] = requests[j], requests[i] } func (requests requestsByTimestamp) Less(i, j int) bool { return requests[i].Timestamp < requests[j].Timestamp } type summariesWithNewestRequestsFirst []Summary // Interface methods for sorting review summaries in reverse chronological order func (summaries summariesWithNewestRequestsFirst) Len() int { return len(summaries) } func (summaries summariesWithNewestRequestsFirst) Swap(i, j int) { summaries[i], summaries[j] = summaries[j], summaries[i] } func (summaries summariesWithNewestRequestsFirst) Less(i, j int) bool { return summaries[i].Request.Timestamp > summaries[j].Request.Timestamp } // updateThreadsStatus calculates the aggregate status of a sequence of comment threads. // // The aggregate status is the conjunction of all of the non-nil child statuses. // // This has the side-effect of setting the "Resolved" field of all descendant comment threads. func updateThreadsStatus(threads []CommentThread) *bool { sort.Stable(byTimestamp(threads)) noUnresolved := true var result *bool for i := range threads { thread := &threads[i] thread.updateResolvedStatus() if thread.Resolved != nil { noUnresolved = noUnresolved && *thread.Resolved result = &noUnresolved } } return result } // updateResolvedStatus calculates the aggregate status of a single comment thread, // and updates the "Resolved" field of that thread accordingly. func (thread *CommentThread) updateResolvedStatus()
// Verify verifies the signature on a comment. func (thread *CommentThread) Verify() error { err := gpg.Verify(&thread.Comment) if err != nil { hash, _ := thread.Comment.Hash() return fmt.Errorf("verification of comment [%s] failed: %s", hash, err) } for _, child := range thread.Children { err = child.Verify() if err != nil { return err } } return nil } // mutableThread is an internal-only data structure used to store partially constructed comment threads. type mutableThread struct { Hash string Comment comment.Comment Edits []*comment.Comment Children []*mutableThread } // fixMutableThread is a helper method to finalize a mutableThread struct // (partially constructed comment thread) as a CommentThread struct // (fully constructed comment thread). func fixMutableThread(mutableThread *mutableThread) CommentThread { var children []CommentThread edited := len(mutableThread.Edits) > 0 for _, mutableChild := range mutableThread.Children { child := fixMutableThread(mutableChild) if (!edited) && child.Edited { edited = true } children = append(children, child) } comment := &mutableThread.Comment if len(mutableThread.Edits) > 0 { sort.Stable(commentsByTimestamp(mutableThread.Edits)) comment = mutableThread.Edits[len(mutableThread.Edits)-1] } return CommentThread{ Hash: mutableThread.Hash, Comment: *comment, Original: &mutableThread.Comment, Edits: mutableThread.Edits, Children: children, Edited: edited, } } // This function builds the comment thread tree from the log-based list of comments. // // Since the comments can be processed in any order, this uses an internal mutable // data structure, and then converts it to the proper CommentThread structure at the end. func buildCommentThreads(commentsByHash map[string]comment.Comment) []CommentThread { threadsByHash := make(map[string]*mutableThread) for hash, comment := range commentsByHash { thread, ok := threadsByHash[hash] if !ok { thread = &mutableThread{ Hash: hash, Comment: comment, } threadsByHash[hash] = thread } } var rootHashes []string for hash, thread := range threadsByHash { if thread.Comment.Original != "" { original, ok := threadsByHash[thread.Comment.Original] if ok { original.Edits = append(original.Edits, &thread.Comment) } } else if thread.Comment.Parent == "" { rootHashes = append(rootHashes, hash) } else { parent, ok := threadsByHash[thread.Comment.Parent] if ok { parent.Children = append(parent.Children, thread) } } } var threads []CommentThread for _, hash := range rootHashes { threads = append(threads, fixMutableThread(threadsByHash[hash])) } return threads } // getCommentsFromNotes parses the log-structured sequence of comments for a commit, // and then builds the corresponding tree-structured comment threads. func getCommentsFromNotes(repo repository.Repo, revision string, commentNotes []repository.Note) ([]CommentThread, *bool) { commentsByHash := comment.ParseAllValid(commentNotes) comments := buildCommentThreads(commentsByHash) resolved := updateThreadsStatus(comments) return comments, resolved } func getSummaryFromNotes(repo repository.Repo, revision string, requestNotes, commentNotes []repository.Note) (*Summary, error) { requests := request.ParseAllValid(requestNotes) if requests == nil { return nil, fmt.Errorf("Could not find any review requests for %q", revision) } sort.Stable(requestsByTimestamp(requests)) reviewSummary := Summary{ Repo: repo, Revision: revision, Request: requests[len(requests)-1], AllRequests: requests, } comments, resolved := getCommentsFromNotes(repo, revision, commentNotes) reviewSummary.Comments = comments reviewSummary.Resolved = resolved return &reviewSummary, nil } func GetComments(repo repository.Repo, revision string) ([]CommentThread, error) { commentNotes := repo.GetNotes(comment.Ref, revision) c, _ := getCommentsFromNotes(repo, revision, commentNotes) return c, nil } // GetSummary returns the summary of the code review specified by its revision // and the references which contain that reviews summary and comments. // // If no review request exists, the returned review summary is nil. func GetSummaryViaRefs(repo repository.Repo, requestRef, commentRef, revision string) (*Summary, error) { if err := repo.VerifyCommit(revision); err != nil { return nil, fmt.Errorf("Could not find a commit named %q", revision) } requestNotes := repo.GetNotes(requestRef, revision) commentNotes := repo.GetNotes(commentRef, revision) summary, err := getSummaryFromNotes(repo, revision, requestNotes, commentNotes) if err != nil { return nil, err } currentCommit := revision if summary.Request.Alias != "" { currentCommit = summary.Request.Alias } if !summary.IsAbandoned() { submitted, err := repo.IsAncestor(currentCommit, summary.Request.TargetRef) if err != nil { return nil, err } summary.Submitted = submitted } return summary, nil } // GetSummary returns the summary of the specified code review. // // If no review request exists, the returned review summary is nil. func GetSummary(repo repository.Repo, revision string) (*Summary, error) { return GetSummaryViaRefs(repo, request.Ref, comment.Ref, revision) } // Details returns the detailed review for the given summary. func (r *Summary) Details() (*Review, error) { review := Review{ Summary: r, } currentCommit, err := review.GetHeadCommit() if err == nil { review.Reports = ci.ParseAllValid(review.Repo.GetNotes(ci.Ref, currentCommit)) review.Analyses = analyses.ParseAllValid(review.Repo.GetNotes(analyses.Ref, currentCommit)) } return &review, nil } // IsAbandoned returns whether or not the given review has been abandoned. func (r *Summary) IsAbandoned() bool { return r.Request.TargetRef == "" } // IsOpen returns whether or not the given review is still open (neither submitted nor abandoned). func (r *Summary) IsOpen() bool { return !r.Submitted && !r.IsAbandoned() } // Verify returns whether or not a summary's comments are a) signed, and b) /// that those signatures are verifiable. func (r *Summary) Verify() error { err := gpg.Verify(&r.Request) if err != nil { return fmt.Errorf("couldn't verify request targeting: %q: %s", r.Request.TargetRef, err) } for _, thread := range r.Comments { err := thread.Verify() if err != nil { return err } } return nil } // Get returns the specified code review. // // If no review request exists, the returned review is nil. func Get(repo repository.Repo, revision string) (*Review, error) { summary, err := GetSummary(repo, revision) if err != nil { return nil, err } if summary == nil { return nil, nil } return summary.Details() } func getIsSubmittedCheck(repo repository.Repo) func(ref, commit string) bool { refCommitsMap := make(map[string]map[string]bool) getRefCommitsMap := func(ref string) map[string]bool { commitsMap, ok := refCommitsMap[ref] if ok { return commitsMap } commitsMap = make(map[string]bool) for _, commit := range repo.ListCommits(ref) { commitsMap[commit] = true } refCommitsMap[ref] = commitsMap return commitsMap } return func(ref, commit string) bool { return getRefCommitsMap(ref)[commit] } } func unsortedListAll(repo repository.Repo) []Summary { reviewNotesMap, err := repo.GetAllNotes(request.Ref) if err != nil { return nil } discussNotesMap, err := repo.GetAllNotes(comment.Ref) if err != nil { return nil } isSubmittedCheck := getIsSubmittedCheck(repo) var reviews []Summary for commit, notes := range reviewNotesMap { summary, err := getSummaryFromNotes(repo, commit, notes, discussNotesMap[commit]) if err != nil { continue } if !summary.IsAbandoned() { summary.Submitted = isSubmittedCheck(summary.Request.TargetRef, summary.getStartingCommit()) } reviews = append(reviews, *summary) } return reviews } // ListAll returns all reviews stored in the git-notes. func ListAll(repo repository.Repo) []Summary { reviews := unsortedListAll(repo) sort.Stable(summariesWithNewestRequestsFirst(reviews)) return reviews } // ListOpen returns all reviews that are not yet incorporated into their target refs. func ListOpen(repo repository.Repo) []Summary { var openReviews []Summary for _, review := range unsortedListAll(repo) { if review.IsOpen() { openReviews = append(openReviews, review) } } sort.Stable(summariesWithNewestRequestsFirst(openReviews)) return openReviews } // GetCurrent returns the current, open code review. // // If there are multiple matching reviews, then an error is returned. func GetCurrent(repo repository.Repo) (*Review, error) { reviewRef, err := repo.GetHeadRef() if err != nil { return nil, err } var matchingReviews []Summary for _, review := range ListOpen(repo) { if review.Request.ReviewRef == reviewRef { matchingReviews = append(matchingReviews, review) } } if matchingReviews == nil { return nil, nil } if len(matchingReviews) != 1 { return nil, fmt.Errorf("There are %d open reviews for the ref \"%s\"", len(matchingReviews), reviewRef) } return matchingReviews[0].Details() } // GetBuildStatusMessage returns a string of the current build-and-test status // of the review, or "unknown" if the build-and-test status cannot be determined. func (r *Review) GetBuildStatusMessage() string { statusMessage := "unknown" ciReport, err := ci.GetLatestCIReport(r.Reports) if err != nil { return fmt.Sprintf("unknown: %s", err) } if ciReport != nil { statusMessage = fmt.Sprintf("%s (%q)", ciReport.Status, ciReport.URL) } return statusMessage } // GetAnalysesNotes returns all of the notes from the most recent static // analysis run recorded in the git notes. func (r *Review) GetAnalysesNotes() ([]analyses.Note, error) { latestAnalyses, err := analyses.GetLatestAnalysesReport(r.Analyses) if err != nil { return nil, err } if latestAnalyses == nil { return nil, fmt.Errorf("No analyses available") } return latestAnalyses.GetNotes() } // GetAnalysesMessage returns a string summarizing the results of the // most recent static analyses. func (r *Review) GetAnalysesMessage() string { latestAnalyses, err := analyses.GetLatestAnalysesReport(r.Analyses) if err != nil { return err.Error() } if latestAnalyses == nil { return "No analyses available" } status := latestAnalyses.Status if status != "" && status != analyses.StatusNeedsMoreWork { return status } analysesNotes, err := latestAnalyses.GetNotes() if err != nil { return err.Error() } if analysesNotes == nil { return "passed" } return fmt.Sprintf("%d warnings\n", len(analysesNotes)) // TODO(ojarjur): Figure out the best place to display the actual notes } func prettyPrintJSON(jsonBytes []byte) (string, error) { var prettyBytes bytes.Buffer err := json.Indent(&prettyBytes, jsonBytes, "", " ") if err != nil { return "", err } return prettyBytes.String(), nil } // GetCommentsJSON returns the pretty printed JSON for a slice of comment threads. func GetCommentsJSON(cs []CommentThread) (string, error) { jsonBytes, err := json.Marshal(cs) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // GetJSON returns the pretty printed JSON for a review summary. func (r *Summary) GetJSON() (string, error) { jsonBytes, err := json.Marshal(*r) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // GetJSON returns the pretty printed JSON for a review. func (r *Review) GetJSON() (string, error) { jsonBytes, err := json.Marshal(*r) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // findLastCommit returns the later (newest) commit from the union of the provided commit // and all of the commits that are referenced in the given comment threads. func (r *Review) findLastCommit(startingCommit, latestCommit string, commentThreads []CommentThread) string { isLater := func(commit string) bool { if err := r.Repo.VerifyCommit(commit); err != nil { return false } if t, e := r.Repo.IsAncestor(latestCommit, commit); e == nil && t { return true } if t, e := r.Repo.IsAncestor(startingCommit, commit); e == nil && !t { return false } if t, e := r.Repo.IsAncestor(commit, latestCommit); e == nil && t { return false } ct, err := r.Repo.GetCommitTime(commit) if err != nil { return false } lt, err := r.Repo.GetCommitTime(latestCommit) if err != nil { return true } return ct > lt } updateLatest := func(commit string) { if commit == "" { return } if isLater(commit) { latestCommit = commit } } for _, commentThread := range commentThreads { comment := commentThread.Comment if comment.Location != nil { updateLatest(comment.Location.Commit) } updateLatest(r.findLastCommit(startingCommit, latestCommit, commentThread.Children)) } return latestCommit } func (r *Summary) getStartingCommit() string { if r.Request.Alias != "" { return r.Request.Alias } return r.Revision } // GetHeadCommit returns the latest commit in a review. func (r *Review) GetHeadCommit() (string, error) { currentCommit := r.getStartingCommit() if r.Request.ReviewRef == "" { return currentCommit, nil } if r.Submitted { // The review has already been submitted. // Go through the list of comments and find the last commented upon commit. return r.findLastCommit(currentCommit, currentCommit, r.Comments), nil } // It is possible that the review ref is no longer an ancestor of the starting // commit (e.g. if a rebase left us in a detached head), in which case we have to // find the head commit without using it. useReviewRef, err := r.Repo.IsAncestor(currentCommit, r.Request.ReviewRef) if err != nil { return "", err } if useReviewRef { return r.Repo.ResolveRefCommit(r.Request.ReviewRef) } return r.findLastCommit(currentCommit, currentCommit, r.Comments), nil } // GetBaseCommit returns the commit against which a review should be compared. func (r *Review) GetBaseCommit() (string, error) { if !r.IsOpen() { if r.Request.BaseCommit != "" { return r.Request.BaseCommit, nil } // This means the review has been submitted, but did not specify a base commit. // In this case, we have to treat the last parent commit as the base. This is // usually what we want, since merging a target branch into a feature branch // results in the previous commit to the feature branch being the first parent, // and the latest commit to the target branch being the second parent. return r.Repo.GetLastParent(r.Revision) } targetRefHead, err := r.Repo.ResolveRefCommit(r.Request.TargetRef) if err != nil { return "", err } leftHandSide := targetRefHead rightHandSide := r.Revision if r.Request.ReviewRef != "" { if reviewRefHead, err := r.Repo.ResolveRefCommit(r.Request.ReviewRef); err == nil { rightHandSide = reviewRefHead } } return r.Repo.MergeBase(leftHandSide, rightHandSide) } // ListCommits lists the commits included in a review. func (r *Review) ListCommits() ([]string, error) { baseCommit, err := r.GetBaseCommit() if err != nil { return nil, err } headCommit, err := r.GetHeadCommit() if err != nil { return nil, err } return r.Repo.ListCommitsBetween(baseCommit, headCommit) } // GetDiff returns the diff for a review. func (r *Review) GetDiff(diffArgs ...string) (string, error) { var baseCommit, headCommit string baseCommit, err := r.GetBaseCommit() if err == nil { headCommit, err = r.GetHeadCommit() } if err == nil { return r.Repo.Diff(baseCommit, headCommit, diffArgs...) } return "", err } // AddComment adds the given comment to the review. func (r *Review) AddComment(c comment.Comment) error { commentNote, err := c.Write() if err != nil { return err } r.Repo.AppendNote(comment.Ref, r.Revision, commentNote) return nil } // Rebase performs an interactive rebase of the review onto its target ref. // // If the 'archivePrevious' argument is true, then the previous head of the // review will be added to the 'refs/devtools/archives/reviews' ref prior // to being rewritten. That ensures the review history is kept from being // garbage collected. func (r *Review) Rebase(archivePrevious bool) error { if archivePrevious { orig, err := r.GetHeadCommit() if err != nil { return err } if err := r.Repo.ArchiveRef(orig, archiveRef); err != nil { return err } } if err := r.Repo.SwitchToRef(r.Request.ReviewRef); err != nil { return err } err := r.Repo.RebaseRef(r.Request.TargetRef) if err != nil { return err } alias, err := r.Repo.GetCommitHash("HEAD") if err != nil { return err } r.Request.Alias = alias newNote, err := r.Request.Write() if err != nil { return err } return r.Repo.AppendNote(request.Ref, r.Revision, newNote) } // RebaseAndSign performs an interactive rebase of the review onto its // target ref. It signs the result of the rebase as well as (re)signs // the review request itself. // // If the 'archivePrevious' argument is true, then the previous head of the // review will be added to the 'refs/devtools/archives/reviews' ref prior // to being rewritten. That ensures the review history is kept from being // garbage collected. func (r *Review) RebaseAndSign(archivePrevious bool) error { if archivePrevious { orig, err := r.GetHeadCommit() if err != nil { return err } if err := r.Repo.ArchiveRef(orig, archiveRef); err != nil { return err } } if err := r.Repo.SwitchToRef(r.Request.ReviewRef); err != nil { return err } err := r.Repo.RebaseAndSignRef(r.Request.TargetRef) if err != nil { return err } alias, err := r.Repo.GetCommitHash("HEAD") if err != nil { return err } r.Request.Alias = alias key, err := r.Repo.GetUserSigningKey() if err != nil { return err } err = gpg.Sign(key, &r.Request) if err != nil { return err } newNote, err := r.Request.Write() if err != nil { return err } return r.Repo.AppendNote(request.Ref, r.Revision, newNote) } func wellKnownCommitForPath(repo repository.Repo, path string, archive bool) (string, error) { commitDetails := &repository.CommitDetails{ Author: "nobody", AuthorEmail: "nobody", AuthorTime: "100000000 +0000", Committer: "nobody", CommitterEmail: "nobody", Time: "100000000 +0000", Summary: path, } commitHash, err := repo.CreateCommitWithTree(commitDetails, emptyTree) if err != nil { return "", err } if !archive { return commitHash, nil } if err := repo.ArchiveRef(commitHash, archiveRef); err != nil { return "", err } return commitHash, nil } func AddDetachedComment(repo repository.Repo, c *comment.Comment) error { path := c.Location.Path wellKnownCommit, err := wellKnownCommitForPath(repo, path, true) if err != nil { return fmt.Errorf("Failure finding the well-known commit for detached comments on %q: %v", path, err) } commentNote, err := c.Write() if err != nil { return err } return repo.AppendNote(comment.Ref, wellKnownCommit, commentNote) } func GetDetachedComments(repo repository.Repo, path string) ([]CommentThread, error) { wellKnownCommit, err := wellKnownCommitForPath(repo, path, false) if err != nil { return nil, fmt.Errorf("Failure finding the well-known commit for detached comments on %q: %v", path, err) } return GetComments(repo, wellKnownCommit) }
{ resolved := updateThreadsStatus(thread.Children) if resolved == nil { thread.Resolved = thread.Comment.Resolved return } if !*resolved { thread.Resolved = resolved return } if thread.Comment.Resolved == nil || !*thread.Comment.Resolved { thread.Resolved = nil return } thread.Resolved = resolved }
identifier_body
review.go
/* Copyright 2015 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package review contains the data structures used to represent code reviews. package review import ( "bytes" "encoding/json" "fmt" "sort" "github.com/google/git-appraise/repository" "github.com/google/git-appraise/review/analyses" "github.com/google/git-appraise/review/ci" "github.com/google/git-appraise/review/comment" "github.com/google/git-appraise/review/gpg" "github.com/google/git-appraise/review/request" ) const archiveRef = "refs/devtools/archives/reviews" var emptyTree = repository.NewTree(map[string]repository.TreeChild{}) // CommentThread represents the tree-based hierarchy of comments. // // The Resolved field represents the aggregate status of the entire thread. If // it is set to false, then it indicates that there is an unaddressed comment // in the thread. If it is unset, then that means that the root comment is an // FYI only, and that there are no unaddressed comments. If it is set to true, // then that means that there are no unaddressed comments, and that the root // comment has its resolved bit set to true. type CommentThread struct { Hash string `json:"hash,omitempty"` Comment comment.Comment `json:"comment"` Original *comment.Comment `json:"original,omitempty"` Edits []*comment.Comment `json:"edits,omitempty"` Children []CommentThread `json:"children,omitempty"` Resolved *bool `json:"resolved,omitempty"` Edited bool `json:"edited,omitempty"` } // Summary represents the high-level state of a code review. // // This high-level state corresponds to the data that can be quickly read // directly from the repo, so other methods that need to operate on a lot // of reviews (such as listing the open reviews) should prefer operating on // the summary rather than the details. // // Review summaries have two status fields which are orthogonal: // 1. Resolved indicates if a reviewer has accepted or rejected the change. // 2. Submitted indicates if the change has been incorporated into the target. type Summary struct { Repo repository.Repo `json:"-"` Revision string `json:"revision"` Request request.Request `json:"request"` AllRequests []request.Request `json:"-"` Comments []CommentThread `json:"comments,omitempty"` Resolved *bool `json:"resolved,omitempty"` Submitted bool `json:"submitted"` } // Review represents the entire state of a code review. // // This extends Summary to also include a list of reports for both the // continuous integration status, and the static analysis runs. Those reports // correspond to either the current commit in the review ref (for pending // reviews), or to the last commented-upon commit (for submitted reviews). type Review struct { *Summary Reports []ci.Report `json:"reports,omitempty"` Analyses []analyses.Report `json:"analyses,omitempty"` } type commentsByTimestamp []*comment.Comment // Interface methods for sorting comment threads by timestamp func (cs commentsByTimestamp) Len() int { return len(cs) } func (cs commentsByTimestamp) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] } func (cs commentsByTimestamp) Less(i, j int) bool { return cs[i].Timestamp < cs[j].Timestamp } type byTimestamp []CommentThread // Interface methods for sorting comment threads by timestamp func (threads byTimestamp) Len() int { return len(threads) } func (threads byTimestamp) Swap(i, j int) { threads[i], threads[j] = threads[j], threads[i] } func (threads byTimestamp) Less(i, j int) bool { return threads[i].Comment.Timestamp < threads[j].Comment.Timestamp } type requestsByTimestamp []request.Request // Interface methods for sorting review requests by timestamp func (requests requestsByTimestamp) Len() int { return len(requests) } func (requests requestsByTimestamp) Swap(i, j int) { requests[i], requests[j] = requests[j], requests[i] } func (requests requestsByTimestamp) Less(i, j int) bool { return requests[i].Timestamp < requests[j].Timestamp } type summariesWithNewestRequestsFirst []Summary // Interface methods for sorting review summaries in reverse chronological order func (summaries summariesWithNewestRequestsFirst) Len() int { return len(summaries) } func (summaries summariesWithNewestRequestsFirst) Swap(i, j int) { summaries[i], summaries[j] = summaries[j], summaries[i] } func (summaries summariesWithNewestRequestsFirst) Less(i, j int) bool { return summaries[i].Request.Timestamp > summaries[j].Request.Timestamp } // updateThreadsStatus calculates the aggregate status of a sequence of comment threads. // // The aggregate status is the conjunction of all of the non-nil child statuses. // // This has the side-effect of setting the "Resolved" field of all descendant comment threads. func updateThreadsStatus(threads []CommentThread) *bool { sort.Stable(byTimestamp(threads)) noUnresolved := true var result *bool for i := range threads { thread := &threads[i] thread.updateResolvedStatus() if thread.Resolved != nil { noUnresolved = noUnresolved && *thread.Resolved result = &noUnresolved } } return result } // updateResolvedStatus calculates the aggregate status of a single comment thread, // and updates the "Resolved" field of that thread accordingly. func (thread *CommentThread) updateResolvedStatus() { resolved := updateThreadsStatus(thread.Children) if resolved == nil { thread.Resolved = thread.Comment.Resolved return }
return } if thread.Comment.Resolved == nil || !*thread.Comment.Resolved { thread.Resolved = nil return } thread.Resolved = resolved } // Verify verifies the signature on a comment. func (thread *CommentThread) Verify() error { err := gpg.Verify(&thread.Comment) if err != nil { hash, _ := thread.Comment.Hash() return fmt.Errorf("verification of comment [%s] failed: %s", hash, err) } for _, child := range thread.Children { err = child.Verify() if err != nil { return err } } return nil } // mutableThread is an internal-only data structure used to store partially constructed comment threads. type mutableThread struct { Hash string Comment comment.Comment Edits []*comment.Comment Children []*mutableThread } // fixMutableThread is a helper method to finalize a mutableThread struct // (partially constructed comment thread) as a CommentThread struct // (fully constructed comment thread). func fixMutableThread(mutableThread *mutableThread) CommentThread { var children []CommentThread edited := len(mutableThread.Edits) > 0 for _, mutableChild := range mutableThread.Children { child := fixMutableThread(mutableChild) if (!edited) && child.Edited { edited = true } children = append(children, child) } comment := &mutableThread.Comment if len(mutableThread.Edits) > 0 { sort.Stable(commentsByTimestamp(mutableThread.Edits)) comment = mutableThread.Edits[len(mutableThread.Edits)-1] } return CommentThread{ Hash: mutableThread.Hash, Comment: *comment, Original: &mutableThread.Comment, Edits: mutableThread.Edits, Children: children, Edited: edited, } } // This function builds the comment thread tree from the log-based list of comments. // // Since the comments can be processed in any order, this uses an internal mutable // data structure, and then converts it to the proper CommentThread structure at the end. func buildCommentThreads(commentsByHash map[string]comment.Comment) []CommentThread { threadsByHash := make(map[string]*mutableThread) for hash, comment := range commentsByHash { thread, ok := threadsByHash[hash] if !ok { thread = &mutableThread{ Hash: hash, Comment: comment, } threadsByHash[hash] = thread } } var rootHashes []string for hash, thread := range threadsByHash { if thread.Comment.Original != "" { original, ok := threadsByHash[thread.Comment.Original] if ok { original.Edits = append(original.Edits, &thread.Comment) } } else if thread.Comment.Parent == "" { rootHashes = append(rootHashes, hash) } else { parent, ok := threadsByHash[thread.Comment.Parent] if ok { parent.Children = append(parent.Children, thread) } } } var threads []CommentThread for _, hash := range rootHashes { threads = append(threads, fixMutableThread(threadsByHash[hash])) } return threads } // getCommentsFromNotes parses the log-structured sequence of comments for a commit, // and then builds the corresponding tree-structured comment threads. func getCommentsFromNotes(repo repository.Repo, revision string, commentNotes []repository.Note) ([]CommentThread, *bool) { commentsByHash := comment.ParseAllValid(commentNotes) comments := buildCommentThreads(commentsByHash) resolved := updateThreadsStatus(comments) return comments, resolved } func getSummaryFromNotes(repo repository.Repo, revision string, requestNotes, commentNotes []repository.Note) (*Summary, error) { requests := request.ParseAllValid(requestNotes) if requests == nil { return nil, fmt.Errorf("Could not find any review requests for %q", revision) } sort.Stable(requestsByTimestamp(requests)) reviewSummary := Summary{ Repo: repo, Revision: revision, Request: requests[len(requests)-1], AllRequests: requests, } comments, resolved := getCommentsFromNotes(repo, revision, commentNotes) reviewSummary.Comments = comments reviewSummary.Resolved = resolved return &reviewSummary, nil } func GetComments(repo repository.Repo, revision string) ([]CommentThread, error) { commentNotes := repo.GetNotes(comment.Ref, revision) c, _ := getCommentsFromNotes(repo, revision, commentNotes) return c, nil } // GetSummary returns the summary of the code review specified by its revision // and the references which contain that reviews summary and comments. // // If no review request exists, the returned review summary is nil. func GetSummaryViaRefs(repo repository.Repo, requestRef, commentRef, revision string) (*Summary, error) { if err := repo.VerifyCommit(revision); err != nil { return nil, fmt.Errorf("Could not find a commit named %q", revision) } requestNotes := repo.GetNotes(requestRef, revision) commentNotes := repo.GetNotes(commentRef, revision) summary, err := getSummaryFromNotes(repo, revision, requestNotes, commentNotes) if err != nil { return nil, err } currentCommit := revision if summary.Request.Alias != "" { currentCommit = summary.Request.Alias } if !summary.IsAbandoned() { submitted, err := repo.IsAncestor(currentCommit, summary.Request.TargetRef) if err != nil { return nil, err } summary.Submitted = submitted } return summary, nil } // GetSummary returns the summary of the specified code review. // // If no review request exists, the returned review summary is nil. func GetSummary(repo repository.Repo, revision string) (*Summary, error) { return GetSummaryViaRefs(repo, request.Ref, comment.Ref, revision) } // Details returns the detailed review for the given summary. func (r *Summary) Details() (*Review, error) { review := Review{ Summary: r, } currentCommit, err := review.GetHeadCommit() if err == nil { review.Reports = ci.ParseAllValid(review.Repo.GetNotes(ci.Ref, currentCommit)) review.Analyses = analyses.ParseAllValid(review.Repo.GetNotes(analyses.Ref, currentCommit)) } return &review, nil } // IsAbandoned returns whether or not the given review has been abandoned. func (r *Summary) IsAbandoned() bool { return r.Request.TargetRef == "" } // IsOpen returns whether or not the given review is still open (neither submitted nor abandoned). func (r *Summary) IsOpen() bool { return !r.Submitted && !r.IsAbandoned() } // Verify returns whether or not a summary's comments are a) signed, and b) /// that those signatures are verifiable. func (r *Summary) Verify() error { err := gpg.Verify(&r.Request) if err != nil { return fmt.Errorf("couldn't verify request targeting: %q: %s", r.Request.TargetRef, err) } for _, thread := range r.Comments { err := thread.Verify() if err != nil { return err } } return nil } // Get returns the specified code review. // // If no review request exists, the returned review is nil. func Get(repo repository.Repo, revision string) (*Review, error) { summary, err := GetSummary(repo, revision) if err != nil { return nil, err } if summary == nil { return nil, nil } return summary.Details() } func getIsSubmittedCheck(repo repository.Repo) func(ref, commit string) bool { refCommitsMap := make(map[string]map[string]bool) getRefCommitsMap := func(ref string) map[string]bool { commitsMap, ok := refCommitsMap[ref] if ok { return commitsMap } commitsMap = make(map[string]bool) for _, commit := range repo.ListCommits(ref) { commitsMap[commit] = true } refCommitsMap[ref] = commitsMap return commitsMap } return func(ref, commit string) bool { return getRefCommitsMap(ref)[commit] } } func unsortedListAll(repo repository.Repo) []Summary { reviewNotesMap, err := repo.GetAllNotes(request.Ref) if err != nil { return nil } discussNotesMap, err := repo.GetAllNotes(comment.Ref) if err != nil { return nil } isSubmittedCheck := getIsSubmittedCheck(repo) var reviews []Summary for commit, notes := range reviewNotesMap { summary, err := getSummaryFromNotes(repo, commit, notes, discussNotesMap[commit]) if err != nil { continue } if !summary.IsAbandoned() { summary.Submitted = isSubmittedCheck(summary.Request.TargetRef, summary.getStartingCommit()) } reviews = append(reviews, *summary) } return reviews } // ListAll returns all reviews stored in the git-notes. func ListAll(repo repository.Repo) []Summary { reviews := unsortedListAll(repo) sort.Stable(summariesWithNewestRequestsFirst(reviews)) return reviews } // ListOpen returns all reviews that are not yet incorporated into their target refs. func ListOpen(repo repository.Repo) []Summary { var openReviews []Summary for _, review := range unsortedListAll(repo) { if review.IsOpen() { openReviews = append(openReviews, review) } } sort.Stable(summariesWithNewestRequestsFirst(openReviews)) return openReviews } // GetCurrent returns the current, open code review. // // If there are multiple matching reviews, then an error is returned. func GetCurrent(repo repository.Repo) (*Review, error) { reviewRef, err := repo.GetHeadRef() if err != nil { return nil, err } var matchingReviews []Summary for _, review := range ListOpen(repo) { if review.Request.ReviewRef == reviewRef { matchingReviews = append(matchingReviews, review) } } if matchingReviews == nil { return nil, nil } if len(matchingReviews) != 1 { return nil, fmt.Errorf("There are %d open reviews for the ref \"%s\"", len(matchingReviews), reviewRef) } return matchingReviews[0].Details() } // GetBuildStatusMessage returns a string of the current build-and-test status // of the review, or "unknown" if the build-and-test status cannot be determined. func (r *Review) GetBuildStatusMessage() string { statusMessage := "unknown" ciReport, err := ci.GetLatestCIReport(r.Reports) if err != nil { return fmt.Sprintf("unknown: %s", err) } if ciReport != nil { statusMessage = fmt.Sprintf("%s (%q)", ciReport.Status, ciReport.URL) } return statusMessage } // GetAnalysesNotes returns all of the notes from the most recent static // analysis run recorded in the git notes. func (r *Review) GetAnalysesNotes() ([]analyses.Note, error) { latestAnalyses, err := analyses.GetLatestAnalysesReport(r.Analyses) if err != nil { return nil, err } if latestAnalyses == nil { return nil, fmt.Errorf("No analyses available") } return latestAnalyses.GetNotes() } // GetAnalysesMessage returns a string summarizing the results of the // most recent static analyses. func (r *Review) GetAnalysesMessage() string { latestAnalyses, err := analyses.GetLatestAnalysesReport(r.Analyses) if err != nil { return err.Error() } if latestAnalyses == nil { return "No analyses available" } status := latestAnalyses.Status if status != "" && status != analyses.StatusNeedsMoreWork { return status } analysesNotes, err := latestAnalyses.GetNotes() if err != nil { return err.Error() } if analysesNotes == nil { return "passed" } return fmt.Sprintf("%d warnings\n", len(analysesNotes)) // TODO(ojarjur): Figure out the best place to display the actual notes } func prettyPrintJSON(jsonBytes []byte) (string, error) { var prettyBytes bytes.Buffer err := json.Indent(&prettyBytes, jsonBytes, "", " ") if err != nil { return "", err } return prettyBytes.String(), nil } // GetCommentsJSON returns the pretty printed JSON for a slice of comment threads. func GetCommentsJSON(cs []CommentThread) (string, error) { jsonBytes, err := json.Marshal(cs) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // GetJSON returns the pretty printed JSON for a review summary. func (r *Summary) GetJSON() (string, error) { jsonBytes, err := json.Marshal(*r) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // GetJSON returns the pretty printed JSON for a review. func (r *Review) GetJSON() (string, error) { jsonBytes, err := json.Marshal(*r) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // findLastCommit returns the later (newest) commit from the union of the provided commit // and all of the commits that are referenced in the given comment threads. func (r *Review) findLastCommit(startingCommit, latestCommit string, commentThreads []CommentThread) string { isLater := func(commit string) bool { if err := r.Repo.VerifyCommit(commit); err != nil { return false } if t, e := r.Repo.IsAncestor(latestCommit, commit); e == nil && t { return true } if t, e := r.Repo.IsAncestor(startingCommit, commit); e == nil && !t { return false } if t, e := r.Repo.IsAncestor(commit, latestCommit); e == nil && t { return false } ct, err := r.Repo.GetCommitTime(commit) if err != nil { return false } lt, err := r.Repo.GetCommitTime(latestCommit) if err != nil { return true } return ct > lt } updateLatest := func(commit string) { if commit == "" { return } if isLater(commit) { latestCommit = commit } } for _, commentThread := range commentThreads { comment := commentThread.Comment if comment.Location != nil { updateLatest(comment.Location.Commit) } updateLatest(r.findLastCommit(startingCommit, latestCommit, commentThread.Children)) } return latestCommit } func (r *Summary) getStartingCommit() string { if r.Request.Alias != "" { return r.Request.Alias } return r.Revision } // GetHeadCommit returns the latest commit in a review. func (r *Review) GetHeadCommit() (string, error) { currentCommit := r.getStartingCommit() if r.Request.ReviewRef == "" { return currentCommit, nil } if r.Submitted { // The review has already been submitted. // Go through the list of comments and find the last commented upon commit. return r.findLastCommit(currentCommit, currentCommit, r.Comments), nil } // It is possible that the review ref is no longer an ancestor of the starting // commit (e.g. if a rebase left us in a detached head), in which case we have to // find the head commit without using it. useReviewRef, err := r.Repo.IsAncestor(currentCommit, r.Request.ReviewRef) if err != nil { return "", err } if useReviewRef { return r.Repo.ResolveRefCommit(r.Request.ReviewRef) } return r.findLastCommit(currentCommit, currentCommit, r.Comments), nil } // GetBaseCommit returns the commit against which a review should be compared. func (r *Review) GetBaseCommit() (string, error) { if !r.IsOpen() { if r.Request.BaseCommit != "" { return r.Request.BaseCommit, nil } // This means the review has been submitted, but did not specify a base commit. // In this case, we have to treat the last parent commit as the base. This is // usually what we want, since merging a target branch into a feature branch // results in the previous commit to the feature branch being the first parent, // and the latest commit to the target branch being the second parent. return r.Repo.GetLastParent(r.Revision) } targetRefHead, err := r.Repo.ResolveRefCommit(r.Request.TargetRef) if err != nil { return "", err } leftHandSide := targetRefHead rightHandSide := r.Revision if r.Request.ReviewRef != "" { if reviewRefHead, err := r.Repo.ResolveRefCommit(r.Request.ReviewRef); err == nil { rightHandSide = reviewRefHead } } return r.Repo.MergeBase(leftHandSide, rightHandSide) } // ListCommits lists the commits included in a review. func (r *Review) ListCommits() ([]string, error) { baseCommit, err := r.GetBaseCommit() if err != nil { return nil, err } headCommit, err := r.GetHeadCommit() if err != nil { return nil, err } return r.Repo.ListCommitsBetween(baseCommit, headCommit) } // GetDiff returns the diff for a review. func (r *Review) GetDiff(diffArgs ...string) (string, error) { var baseCommit, headCommit string baseCommit, err := r.GetBaseCommit() if err == nil { headCommit, err = r.GetHeadCommit() } if err == nil { return r.Repo.Diff(baseCommit, headCommit, diffArgs...) } return "", err } // AddComment adds the given comment to the review. func (r *Review) AddComment(c comment.Comment) error { commentNote, err := c.Write() if err != nil { return err } r.Repo.AppendNote(comment.Ref, r.Revision, commentNote) return nil } // Rebase performs an interactive rebase of the review onto its target ref. // // If the 'archivePrevious' argument is true, then the previous head of the // review will be added to the 'refs/devtools/archives/reviews' ref prior // to being rewritten. That ensures the review history is kept from being // garbage collected. func (r *Review) Rebase(archivePrevious bool) error { if archivePrevious { orig, err := r.GetHeadCommit() if err != nil { return err } if err := r.Repo.ArchiveRef(orig, archiveRef); err != nil { return err } } if err := r.Repo.SwitchToRef(r.Request.ReviewRef); err != nil { return err } err := r.Repo.RebaseRef(r.Request.TargetRef) if err != nil { return err } alias, err := r.Repo.GetCommitHash("HEAD") if err != nil { return err } r.Request.Alias = alias newNote, err := r.Request.Write() if err != nil { return err } return r.Repo.AppendNote(request.Ref, r.Revision, newNote) } // RebaseAndSign performs an interactive rebase of the review onto its // target ref. It signs the result of the rebase as well as (re)signs // the review request itself. // // If the 'archivePrevious' argument is true, then the previous head of the // review will be added to the 'refs/devtools/archives/reviews' ref prior // to being rewritten. That ensures the review history is kept from being // garbage collected. func (r *Review) RebaseAndSign(archivePrevious bool) error { if archivePrevious { orig, err := r.GetHeadCommit() if err != nil { return err } if err := r.Repo.ArchiveRef(orig, archiveRef); err != nil { return err } } if err := r.Repo.SwitchToRef(r.Request.ReviewRef); err != nil { return err } err := r.Repo.RebaseAndSignRef(r.Request.TargetRef) if err != nil { return err } alias, err := r.Repo.GetCommitHash("HEAD") if err != nil { return err } r.Request.Alias = alias key, err := r.Repo.GetUserSigningKey() if err != nil { return err } err = gpg.Sign(key, &r.Request) if err != nil { return err } newNote, err := r.Request.Write() if err != nil { return err } return r.Repo.AppendNote(request.Ref, r.Revision, newNote) } func wellKnownCommitForPath(repo repository.Repo, path string, archive bool) (string, error) { commitDetails := &repository.CommitDetails{ Author: "nobody", AuthorEmail: "nobody", AuthorTime: "100000000 +0000", Committer: "nobody", CommitterEmail: "nobody", Time: "100000000 +0000", Summary: path, } commitHash, err := repo.CreateCommitWithTree(commitDetails, emptyTree) if err != nil { return "", err } if !archive { return commitHash, nil } if err := repo.ArchiveRef(commitHash, archiveRef); err != nil { return "", err } return commitHash, nil } func AddDetachedComment(repo repository.Repo, c *comment.Comment) error { path := c.Location.Path wellKnownCommit, err := wellKnownCommitForPath(repo, path, true) if err != nil { return fmt.Errorf("Failure finding the well-known commit for detached comments on %q: %v", path, err) } commentNote, err := c.Write() if err != nil { return err } return repo.AppendNote(comment.Ref, wellKnownCommit, commentNote) } func GetDetachedComments(repo repository.Repo, path string) ([]CommentThread, error) { wellKnownCommit, err := wellKnownCommitForPath(repo, path, false) if err != nil { return nil, fmt.Errorf("Failure finding the well-known commit for detached comments on %q: %v", path, err) } return GetComments(repo, wellKnownCommit) }
if !*resolved { thread.Resolved = resolved
random_line_split
review.go
/* Copyright 2015 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package review contains the data structures used to represent code reviews. package review import ( "bytes" "encoding/json" "fmt" "sort" "github.com/google/git-appraise/repository" "github.com/google/git-appraise/review/analyses" "github.com/google/git-appraise/review/ci" "github.com/google/git-appraise/review/comment" "github.com/google/git-appraise/review/gpg" "github.com/google/git-appraise/review/request" ) const archiveRef = "refs/devtools/archives/reviews" var emptyTree = repository.NewTree(map[string]repository.TreeChild{}) // CommentThread represents the tree-based hierarchy of comments. // // The Resolved field represents the aggregate status of the entire thread. If // it is set to false, then it indicates that there is an unaddressed comment // in the thread. If it is unset, then that means that the root comment is an // FYI only, and that there are no unaddressed comments. If it is set to true, // then that means that there are no unaddressed comments, and that the root // comment has its resolved bit set to true. type CommentThread struct { Hash string `json:"hash,omitempty"` Comment comment.Comment `json:"comment"` Original *comment.Comment `json:"original,omitempty"` Edits []*comment.Comment `json:"edits,omitempty"` Children []CommentThread `json:"children,omitempty"` Resolved *bool `json:"resolved,omitempty"` Edited bool `json:"edited,omitempty"` } // Summary represents the high-level state of a code review. // // This high-level state corresponds to the data that can be quickly read // directly from the repo, so other methods that need to operate on a lot // of reviews (such as listing the open reviews) should prefer operating on // the summary rather than the details. // // Review summaries have two status fields which are orthogonal: // 1. Resolved indicates if a reviewer has accepted or rejected the change. // 2. Submitted indicates if the change has been incorporated into the target. type Summary struct { Repo repository.Repo `json:"-"` Revision string `json:"revision"` Request request.Request `json:"request"` AllRequests []request.Request `json:"-"` Comments []CommentThread `json:"comments,omitempty"` Resolved *bool `json:"resolved,omitempty"` Submitted bool `json:"submitted"` } // Review represents the entire state of a code review. // // This extends Summary to also include a list of reports for both the // continuous integration status, and the static analysis runs. Those reports // correspond to either the current commit in the review ref (for pending // reviews), or to the last commented-upon commit (for submitted reviews). type Review struct { *Summary Reports []ci.Report `json:"reports,omitempty"` Analyses []analyses.Report `json:"analyses,omitempty"` } type commentsByTimestamp []*comment.Comment // Interface methods for sorting comment threads by timestamp func (cs commentsByTimestamp) Len() int { return len(cs) } func (cs commentsByTimestamp) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] } func (cs commentsByTimestamp) Less(i, j int) bool { return cs[i].Timestamp < cs[j].Timestamp } type byTimestamp []CommentThread // Interface methods for sorting comment threads by timestamp func (threads byTimestamp) Len() int { return len(threads) } func (threads byTimestamp) Swap(i, j int) { threads[i], threads[j] = threads[j], threads[i] } func (threads byTimestamp) Less(i, j int) bool { return threads[i].Comment.Timestamp < threads[j].Comment.Timestamp } type requestsByTimestamp []request.Request // Interface methods for sorting review requests by timestamp func (requests requestsByTimestamp) Len() int { return len(requests) } func (requests requestsByTimestamp) Swap(i, j int) { requests[i], requests[j] = requests[j], requests[i] } func (requests requestsByTimestamp) Less(i, j int) bool { return requests[i].Timestamp < requests[j].Timestamp } type summariesWithNewestRequestsFirst []Summary // Interface methods for sorting review summaries in reverse chronological order func (summaries summariesWithNewestRequestsFirst) Len() int { return len(summaries) } func (summaries summariesWithNewestRequestsFirst) Swap(i, j int) { summaries[i], summaries[j] = summaries[j], summaries[i] } func (summaries summariesWithNewestRequestsFirst) Less(i, j int) bool { return summaries[i].Request.Timestamp > summaries[j].Request.Timestamp } // updateThreadsStatus calculates the aggregate status of a sequence of comment threads. // // The aggregate status is the conjunction of all of the non-nil child statuses. // // This has the side-effect of setting the "Resolved" field of all descendant comment threads. func updateThreadsStatus(threads []CommentThread) *bool { sort.Stable(byTimestamp(threads)) noUnresolved := true var result *bool for i := range threads { thread := &threads[i] thread.updateResolvedStatus() if thread.Resolved != nil { noUnresolved = noUnresolved && *thread.Resolved result = &noUnresolved } } return result } // updateResolvedStatus calculates the aggregate status of a single comment thread, // and updates the "Resolved" field of that thread accordingly. func (thread *CommentThread) updateResolvedStatus() { resolved := updateThreadsStatus(thread.Children) if resolved == nil { thread.Resolved = thread.Comment.Resolved return } if !*resolved { thread.Resolved = resolved return } if thread.Comment.Resolved == nil || !*thread.Comment.Resolved { thread.Resolved = nil return } thread.Resolved = resolved } // Verify verifies the signature on a comment. func (thread *CommentThread) Verify() error { err := gpg.Verify(&thread.Comment) if err != nil { hash, _ := thread.Comment.Hash() return fmt.Errorf("verification of comment [%s] failed: %s", hash, err) } for _, child := range thread.Children { err = child.Verify() if err != nil { return err } } return nil } // mutableThread is an internal-only data structure used to store partially constructed comment threads. type mutableThread struct { Hash string Comment comment.Comment Edits []*comment.Comment Children []*mutableThread } // fixMutableThread is a helper method to finalize a mutableThread struct // (partially constructed comment thread) as a CommentThread struct // (fully constructed comment thread). func fixMutableThread(mutableThread *mutableThread) CommentThread { var children []CommentThread edited := len(mutableThread.Edits) > 0 for _, mutableChild := range mutableThread.Children { child := fixMutableThread(mutableChild) if (!edited) && child.Edited { edited = true } children = append(children, child) } comment := &mutableThread.Comment if len(mutableThread.Edits) > 0 { sort.Stable(commentsByTimestamp(mutableThread.Edits)) comment = mutableThread.Edits[len(mutableThread.Edits)-1] } return CommentThread{ Hash: mutableThread.Hash, Comment: *comment, Original: &mutableThread.Comment, Edits: mutableThread.Edits, Children: children, Edited: edited, } } // This function builds the comment thread tree from the log-based list of comments. // // Since the comments can be processed in any order, this uses an internal mutable // data structure, and then converts it to the proper CommentThread structure at the end. func buildCommentThreads(commentsByHash map[string]comment.Comment) []CommentThread { threadsByHash := make(map[string]*mutableThread) for hash, comment := range commentsByHash { thread, ok := threadsByHash[hash] if !ok { thread = &mutableThread{ Hash: hash, Comment: comment, } threadsByHash[hash] = thread } } var rootHashes []string for hash, thread := range threadsByHash { if thread.Comment.Original != "" { original, ok := threadsByHash[thread.Comment.Original] if ok { original.Edits = append(original.Edits, &thread.Comment) } } else if thread.Comment.Parent == "" { rootHashes = append(rootHashes, hash) } else { parent, ok := threadsByHash[thread.Comment.Parent] if ok { parent.Children = append(parent.Children, thread) } } } var threads []CommentThread for _, hash := range rootHashes { threads = append(threads, fixMutableThread(threadsByHash[hash])) } return threads } // getCommentsFromNotes parses the log-structured sequence of comments for a commit, // and then builds the corresponding tree-structured comment threads. func getCommentsFromNotes(repo repository.Repo, revision string, commentNotes []repository.Note) ([]CommentThread, *bool) { commentsByHash := comment.ParseAllValid(commentNotes) comments := buildCommentThreads(commentsByHash) resolved := updateThreadsStatus(comments) return comments, resolved } func getSummaryFromNotes(repo repository.Repo, revision string, requestNotes, commentNotes []repository.Note) (*Summary, error) { requests := request.ParseAllValid(requestNotes) if requests == nil { return nil, fmt.Errorf("Could not find any review requests for %q", revision) } sort.Stable(requestsByTimestamp(requests)) reviewSummary := Summary{ Repo: repo, Revision: revision, Request: requests[len(requests)-1], AllRequests: requests, } comments, resolved := getCommentsFromNotes(repo, revision, commentNotes) reviewSummary.Comments = comments reviewSummary.Resolved = resolved return &reviewSummary, nil } func GetComments(repo repository.Repo, revision string) ([]CommentThread, error) { commentNotes := repo.GetNotes(comment.Ref, revision) c, _ := getCommentsFromNotes(repo, revision, commentNotes) return c, nil } // GetSummary returns the summary of the code review specified by its revision // and the references which contain that reviews summary and comments. // // If no review request exists, the returned review summary is nil. func GetSummaryViaRefs(repo repository.Repo, requestRef, commentRef, revision string) (*Summary, error) { if err := repo.VerifyCommit(revision); err != nil { return nil, fmt.Errorf("Could not find a commit named %q", revision) } requestNotes := repo.GetNotes(requestRef, revision) commentNotes := repo.GetNotes(commentRef, revision) summary, err := getSummaryFromNotes(repo, revision, requestNotes, commentNotes) if err != nil { return nil, err } currentCommit := revision if summary.Request.Alias != "" { currentCommit = summary.Request.Alias } if !summary.IsAbandoned() { submitted, err := repo.IsAncestor(currentCommit, summary.Request.TargetRef) if err != nil { return nil, err } summary.Submitted = submitted } return summary, nil } // GetSummary returns the summary of the specified code review. // // If no review request exists, the returned review summary is nil. func GetSummary(repo repository.Repo, revision string) (*Summary, error) { return GetSummaryViaRefs(repo, request.Ref, comment.Ref, revision) } // Details returns the detailed review for the given summary. func (r *Summary) Details() (*Review, error) { review := Review{ Summary: r, } currentCommit, err := review.GetHeadCommit() if err == nil { review.Reports = ci.ParseAllValid(review.Repo.GetNotes(ci.Ref, currentCommit)) review.Analyses = analyses.ParseAllValid(review.Repo.GetNotes(analyses.Ref, currentCommit)) } return &review, nil } // IsAbandoned returns whether or not the given review has been abandoned. func (r *Summary) IsAbandoned() bool { return r.Request.TargetRef == "" } // IsOpen returns whether or not the given review is still open (neither submitted nor abandoned). func (r *Summary) IsOpen() bool { return !r.Submitted && !r.IsAbandoned() } // Verify returns whether or not a summary's comments are a) signed, and b) /// that those signatures are verifiable. func (r *Summary) Verify() error { err := gpg.Verify(&r.Request) if err != nil { return fmt.Errorf("couldn't verify request targeting: %q: %s", r.Request.TargetRef, err) } for _, thread := range r.Comments { err := thread.Verify() if err != nil { return err } } return nil } // Get returns the specified code review. // // If no review request exists, the returned review is nil. func Get(repo repository.Repo, revision string) (*Review, error) { summary, err := GetSummary(repo, revision) if err != nil { return nil, err } if summary == nil { return nil, nil } return summary.Details() } func getIsSubmittedCheck(repo repository.Repo) func(ref, commit string) bool { refCommitsMap := make(map[string]map[string]bool) getRefCommitsMap := func(ref string) map[string]bool { commitsMap, ok := refCommitsMap[ref] if ok { return commitsMap } commitsMap = make(map[string]bool) for _, commit := range repo.ListCommits(ref) { commitsMap[commit] = true } refCommitsMap[ref] = commitsMap return commitsMap } return func(ref, commit string) bool { return getRefCommitsMap(ref)[commit] } } func unsortedListAll(repo repository.Repo) []Summary { reviewNotesMap, err := repo.GetAllNotes(request.Ref) if err != nil { return nil } discussNotesMap, err := repo.GetAllNotes(comment.Ref) if err != nil { return nil } isSubmittedCheck := getIsSubmittedCheck(repo) var reviews []Summary for commit, notes := range reviewNotesMap { summary, err := getSummaryFromNotes(repo, commit, notes, discussNotesMap[commit]) if err != nil { continue } if !summary.IsAbandoned() { summary.Submitted = isSubmittedCheck(summary.Request.TargetRef, summary.getStartingCommit()) } reviews = append(reviews, *summary) } return reviews } // ListAll returns all reviews stored in the git-notes. func ListAll(repo repository.Repo) []Summary { reviews := unsortedListAll(repo) sort.Stable(summariesWithNewestRequestsFirst(reviews)) return reviews } // ListOpen returns all reviews that are not yet incorporated into their target refs. func ListOpen(repo repository.Repo) []Summary { var openReviews []Summary for _, review := range unsortedListAll(repo) { if review.IsOpen() { openReviews = append(openReviews, review) } } sort.Stable(summariesWithNewestRequestsFirst(openReviews)) return openReviews } // GetCurrent returns the current, open code review. // // If there are multiple matching reviews, then an error is returned. func GetCurrent(repo repository.Repo) (*Review, error) { reviewRef, err := repo.GetHeadRef() if err != nil { return nil, err } var matchingReviews []Summary for _, review := range ListOpen(repo) { if review.Request.ReviewRef == reviewRef { matchingReviews = append(matchingReviews, review) } } if matchingReviews == nil { return nil, nil } if len(matchingReviews) != 1 { return nil, fmt.Errorf("There are %d open reviews for the ref \"%s\"", len(matchingReviews), reviewRef) } return matchingReviews[0].Details() } // GetBuildStatusMessage returns a string of the current build-and-test status // of the review, or "unknown" if the build-and-test status cannot be determined. func (r *Review) GetBuildStatusMessage() string { statusMessage := "unknown" ciReport, err := ci.GetLatestCIReport(r.Reports) if err != nil { return fmt.Sprintf("unknown: %s", err) } if ciReport != nil { statusMessage = fmt.Sprintf("%s (%q)", ciReport.Status, ciReport.URL) } return statusMessage } // GetAnalysesNotes returns all of the notes from the most recent static // analysis run recorded in the git notes. func (r *Review) GetAnalysesNotes() ([]analyses.Note, error) { latestAnalyses, err := analyses.GetLatestAnalysesReport(r.Analyses) if err != nil { return nil, err } if latestAnalyses == nil { return nil, fmt.Errorf("No analyses available") } return latestAnalyses.GetNotes() } // GetAnalysesMessage returns a string summarizing the results of the // most recent static analyses. func (r *Review) GetAnalysesMessage() string { latestAnalyses, err := analyses.GetLatestAnalysesReport(r.Analyses) if err != nil { return err.Error() } if latestAnalyses == nil { return "No analyses available" } status := latestAnalyses.Status if status != "" && status != analyses.StatusNeedsMoreWork { return status } analysesNotes, err := latestAnalyses.GetNotes() if err != nil { return err.Error() } if analysesNotes == nil { return "passed" } return fmt.Sprintf("%d warnings\n", len(analysesNotes)) // TODO(ojarjur): Figure out the best place to display the actual notes } func prettyPrintJSON(jsonBytes []byte) (string, error) { var prettyBytes bytes.Buffer err := json.Indent(&prettyBytes, jsonBytes, "", " ") if err != nil { return "", err } return prettyBytes.String(), nil } // GetCommentsJSON returns the pretty printed JSON for a slice of comment threads. func GetCommentsJSON(cs []CommentThread) (string, error) { jsonBytes, err := json.Marshal(cs) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // GetJSON returns the pretty printed JSON for a review summary. func (r *Summary) GetJSON() (string, error) { jsonBytes, err := json.Marshal(*r) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // GetJSON returns the pretty printed JSON for a review. func (r *Review) GetJSON() (string, error) { jsonBytes, err := json.Marshal(*r) if err != nil { return "", err } return prettyPrintJSON(jsonBytes) } // findLastCommit returns the later (newest) commit from the union of the provided commit // and all of the commits that are referenced in the given comment threads. func (r *Review)
(startingCommit, latestCommit string, commentThreads []CommentThread) string { isLater := func(commit string) bool { if err := r.Repo.VerifyCommit(commit); err != nil { return false } if t, e := r.Repo.IsAncestor(latestCommit, commit); e == nil && t { return true } if t, e := r.Repo.IsAncestor(startingCommit, commit); e == nil && !t { return false } if t, e := r.Repo.IsAncestor(commit, latestCommit); e == nil && t { return false } ct, err := r.Repo.GetCommitTime(commit) if err != nil { return false } lt, err := r.Repo.GetCommitTime(latestCommit) if err != nil { return true } return ct > lt } updateLatest := func(commit string) { if commit == "" { return } if isLater(commit) { latestCommit = commit } } for _, commentThread := range commentThreads { comment := commentThread.Comment if comment.Location != nil { updateLatest(comment.Location.Commit) } updateLatest(r.findLastCommit(startingCommit, latestCommit, commentThread.Children)) } return latestCommit } func (r *Summary) getStartingCommit() string { if r.Request.Alias != "" { return r.Request.Alias } return r.Revision } // GetHeadCommit returns the latest commit in a review. func (r *Review) GetHeadCommit() (string, error) { currentCommit := r.getStartingCommit() if r.Request.ReviewRef == "" { return currentCommit, nil } if r.Submitted { // The review has already been submitted. // Go through the list of comments and find the last commented upon commit. return r.findLastCommit(currentCommit, currentCommit, r.Comments), nil } // It is possible that the review ref is no longer an ancestor of the starting // commit (e.g. if a rebase left us in a detached head), in which case we have to // find the head commit without using it. useReviewRef, err := r.Repo.IsAncestor(currentCommit, r.Request.ReviewRef) if err != nil { return "", err } if useReviewRef { return r.Repo.ResolveRefCommit(r.Request.ReviewRef) } return r.findLastCommit(currentCommit, currentCommit, r.Comments), nil } // GetBaseCommit returns the commit against which a review should be compared. func (r *Review) GetBaseCommit() (string, error) { if !r.IsOpen() { if r.Request.BaseCommit != "" { return r.Request.BaseCommit, nil } // This means the review has been submitted, but did not specify a base commit. // In this case, we have to treat the last parent commit as the base. This is // usually what we want, since merging a target branch into a feature branch // results in the previous commit to the feature branch being the first parent, // and the latest commit to the target branch being the second parent. return r.Repo.GetLastParent(r.Revision) } targetRefHead, err := r.Repo.ResolveRefCommit(r.Request.TargetRef) if err != nil { return "", err } leftHandSide := targetRefHead rightHandSide := r.Revision if r.Request.ReviewRef != "" { if reviewRefHead, err := r.Repo.ResolveRefCommit(r.Request.ReviewRef); err == nil { rightHandSide = reviewRefHead } } return r.Repo.MergeBase(leftHandSide, rightHandSide) } // ListCommits lists the commits included in a review. func (r *Review) ListCommits() ([]string, error) { baseCommit, err := r.GetBaseCommit() if err != nil { return nil, err } headCommit, err := r.GetHeadCommit() if err != nil { return nil, err } return r.Repo.ListCommitsBetween(baseCommit, headCommit) } // GetDiff returns the diff for a review. func (r *Review) GetDiff(diffArgs ...string) (string, error) { var baseCommit, headCommit string baseCommit, err := r.GetBaseCommit() if err == nil { headCommit, err = r.GetHeadCommit() } if err == nil { return r.Repo.Diff(baseCommit, headCommit, diffArgs...) } return "", err } // AddComment adds the given comment to the review. func (r *Review) AddComment(c comment.Comment) error { commentNote, err := c.Write() if err != nil { return err } r.Repo.AppendNote(comment.Ref, r.Revision, commentNote) return nil } // Rebase performs an interactive rebase of the review onto its target ref. // // If the 'archivePrevious' argument is true, then the previous head of the // review will be added to the 'refs/devtools/archives/reviews' ref prior // to being rewritten. That ensures the review history is kept from being // garbage collected. func (r *Review) Rebase(archivePrevious bool) error { if archivePrevious { orig, err := r.GetHeadCommit() if err != nil { return err } if err := r.Repo.ArchiveRef(orig, archiveRef); err != nil { return err } } if err := r.Repo.SwitchToRef(r.Request.ReviewRef); err != nil { return err } err := r.Repo.RebaseRef(r.Request.TargetRef) if err != nil { return err } alias, err := r.Repo.GetCommitHash("HEAD") if err != nil { return err } r.Request.Alias = alias newNote, err := r.Request.Write() if err != nil { return err } return r.Repo.AppendNote(request.Ref, r.Revision, newNote) } // RebaseAndSign performs an interactive rebase of the review onto its // target ref. It signs the result of the rebase as well as (re)signs // the review request itself. // // If the 'archivePrevious' argument is true, then the previous head of the // review will be added to the 'refs/devtools/archives/reviews' ref prior // to being rewritten. That ensures the review history is kept from being // garbage collected. func (r *Review) RebaseAndSign(archivePrevious bool) error { if archivePrevious { orig, err := r.GetHeadCommit() if err != nil { return err } if err := r.Repo.ArchiveRef(orig, archiveRef); err != nil { return err } } if err := r.Repo.SwitchToRef(r.Request.ReviewRef); err != nil { return err } err := r.Repo.RebaseAndSignRef(r.Request.TargetRef) if err != nil { return err } alias, err := r.Repo.GetCommitHash("HEAD") if err != nil { return err } r.Request.Alias = alias key, err := r.Repo.GetUserSigningKey() if err != nil { return err } err = gpg.Sign(key, &r.Request) if err != nil { return err } newNote, err := r.Request.Write() if err != nil { return err } return r.Repo.AppendNote(request.Ref, r.Revision, newNote) } func wellKnownCommitForPath(repo repository.Repo, path string, archive bool) (string, error) { commitDetails := &repository.CommitDetails{ Author: "nobody", AuthorEmail: "nobody", AuthorTime: "100000000 +0000", Committer: "nobody", CommitterEmail: "nobody", Time: "100000000 +0000", Summary: path, } commitHash, err := repo.CreateCommitWithTree(commitDetails, emptyTree) if err != nil { return "", err } if !archive { return commitHash, nil } if err := repo.ArchiveRef(commitHash, archiveRef); err != nil { return "", err } return commitHash, nil } func AddDetachedComment(repo repository.Repo, c *comment.Comment) error { path := c.Location.Path wellKnownCommit, err := wellKnownCommitForPath(repo, path, true) if err != nil { return fmt.Errorf("Failure finding the well-known commit for detached comments on %q: %v", path, err) } commentNote, err := c.Write() if err != nil { return err } return repo.AppendNote(comment.Ref, wellKnownCommit, commentNote) } func GetDetachedComments(repo repository.Repo, path string) ([]CommentThread, error) { wellKnownCommit, err := wellKnownCommitForPath(repo, path, false) if err != nil { return nil, fmt.Errorf("Failure finding the well-known commit for detached comments on %q: %v", path, err) } return GetComments(repo, wellKnownCommit) }
findLastCommit
identifier_name
lib.rs
//! An Entity Component System for game development. //! //! Currently used for personal use (for a roguelike game), this library is highly unstable, and a WIP. #![allow(dead_code)] #![feature(append,drain)] use std::iter; use std::collections::HashMap; use std::ops::{Index, IndexMut}; use std::collections::hash_map::Entry::{Occupied, Vacant}; pub mod component_presence; pub mod family; pub mod builder; pub mod event; pub mod behavior; use family::{FamilyDataHolder, FamilyMap}; use event::{EventDataHolder}; pub use event::EventManager; pub use behavior::BehaviorManager; pub use behavior::Behavior; pub use component_presence::ComponentPresence; /// Type Entity is simply an ID used as indexes. pub type Entity = u32; /// The components macro defines all the structs and traits that manage /// the component part of the ECS. #[macro_export] macro_rules! components { ($data:ident: $([$access:ident, $ty:ty]),+ ) => { use $crate::component_presence::ComponentPresence; use $crate::component_presence::ComponentPresence::*; use $crate::{EntityDataHolder, Component, Entity, ComponentData}; use $crate::family::{FamilyMap}; use std::fmt; pub struct $data { pub components: Vec<&'static str>, pub families: Vec<&'static str>, $( pub $access: ComponentPresence<$ty>, )+ } impl $data { pub fn new_empty() -> $data { $data { components: Vec::new(), families: Vec::new(), $( $access: Lacks, )+ } } } impl fmt::Debug for $data { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut b = fmt.debug_struct("EntityData"); b.field("has components", &self.components); b.field("belongs to families", &self.families); /*$( if self.$access.has_it() { b.field(stringify!($access), &self.$access); } )+*/ b.finish() } } impl EntityDataHolder for $data { fn new() -> Self { $data::new_empty() } fn match_families(&self, families: &FamilyMap) -> Vec<&'static str> { let mut v: Vec<&str> = vec!(); // Tuple has the requirements/forbidden vectors for (family, tuple) in families { if $crate::family::matcher(tuple, &self.components) { v.push(family) } } v } fn set_families(&mut self, families: Vec<&'static str>) { self.families = families; } fn belongs_to_family(&self, family: &str) -> bool { self.families.contains(&family) } fn families(&self) -> Vec<&'static str> { self.families.clone() } } $( impl Component<$data> for $ty { fn add_to(self, ent: Entity, data: &mut ComponentData<$data>) { let ent_data: &mut $data = data.components.get_mut(&ent).expect("no entity"); ent_data.components.push(stringify!($access)); ent_data.$access = Has(self); } } )+ } } /// This is a marker trait to be used by the `components!` macro. /// /// This trait is implemented by `EntityData` which is a struct generated /// by the `components!` macro. /// /// `EntityData` will be of the form: /// /// ``` /// struct EntityData { /// component1: ComponentPresence<Component1>, /// component2: ComponentPresence<Component2>, /// //etc... /// } /// ``` /// /// So it will have one field per component defined in the call to `components!` /// You'll access these fields directly when indexing the `data` field of the `EntityManager` pub trait EntityDataHolder { fn new() -> Self; /// Takes a map of all the defined families, /// and returns the families that match this entity. fn match_families(&self, &FamilyMap) -> Vec<&'static str>; /// Sets the families this entity belongs to to `families` fn set_families(&mut self, Vec<&'static str>); fn belongs_to_family(&self, &'static str) -> bool; /// Gets the known families this ent belongs to. fn families(&self) -> Vec<&'static str>; } /// ComponentData knows which entities have which components. pub struct ComponentData<D: EntityDataHolder> { /// components holds the components owned by a certain entity. pub components: HashMap<Entity, D>, /// Family to list of entities. pub families: HashMap<&'static str, Vec<Entity>>, } /// This trait marks a struct as a component. (Automatically handled by macro `components!`) /// /// It should implement the `add_to` function, which is automatically generated /// by the `components!` macro. pub trait Component<D: EntityDataHolder> { /// Adds self to the specified entity. Called by the `EntityManager` fn add_to(self, ent: Entity, data: &mut ComponentData<D>); } impl<D: EntityDataHolder> ComponentData<D> { pub fn new() -> ComponentData<D> { ComponentData { components: HashMap::new(), families: HashMap::new(), } } pub fn get(&self, ent: &Entity) -> Option<&D> { self.components.get(ent) } pub fn get_mut(&mut self, ent: &Entity) -> Option<&mut D> { self.components.get_mut(ent) } pub fn create_component_data_for(&mut self, ent: Entity) { self.components.insert(ent, D::new()); } pub fn clear_family_data_for(&mut self, ent: Entity) { for family in self[ent].families() { self.remove_from_family(family, ent); debug_assert!(!self.families[family].contains(&ent)) } } pub fn
(&mut self, ent: Entity) { self.clear_family_data_for(ent); self.components.remove(&ent); } fn remove_from_family(&mut self, family: &str, ent: Entity) { let mut idx: Option<usize> = None; { let vec = self.families.get_mut(family).expect("No such family"); let op = vec.iter().enumerate().find(|&(_,e)| *e == ent); idx = Some(op.expect("Entity not found in this family").0); } if let Some(idx) = idx { self.families.get_mut(family).unwrap().swap_remove(idx); } else { panic!("Entity not found for family"); } } pub fn set_family_relation(&mut self, family: &'static str, ent: Entity) { match self.families.entry(family) { Vacant(entry) => {entry.insert(vec!(ent));}, Occupied(entry) => { let v = entry.into_mut(); if v.contains(&ent) { return; } v.push(ent); }, } } pub fn members_of(&self, family: &'static str) -> Vec<Entity> { match self.families.get(family) { Some(vec) => vec.clone(), None => vec!(), } } pub fn any_member_of(&self, family: &'static str) -> bool { !self.families.get(family).expect("no such family").is_empty() } } impl<D: EntityDataHolder> Index<Entity> for ComponentData<D> { type Output = D; fn index(&self, index: Entity) -> &D { &self.components.get(&index).expect(&format!("no entity {:?}", index)) } } impl<D: EntityDataHolder> IndexMut<Entity> for ComponentData<D> { fn index_mut(&mut self, index: Entity) -> &mut D { self.components.get_mut(&index).expect("no entity") } } /// The `EntityManager` type manages all the entities. /// /// It is in charge of creating and destroying entities. /// It also takes care of adding or removing components, through the `ComponentData` it contains. /// /// # Examples /// /// Creating a new manager, and adding some (predefined) components to a new entity. /// /// ``` /// let mut manager = EntityManager::new(); /// let ent = manager.new_entity(); /// manager.add_component_to(ent, Position{x: 1, y: 2}); /// ``` pub struct EntityManager<D: EntityDataHolder, F: FamilyDataHolder> { next_idx: usize, reusable_idxs: Vec<usize>, active: Vec<bool>, pub data: ComponentData<D>, /// Contains a list of all defined families, along with its requirements. families: F, } impl<D: EntityDataHolder, F: FamilyDataHolder> EntityManager<D, F> { /// Creates a new EntityManager pub fn new() -> EntityManager<D, F> { EntityManager{ next_idx: 0, reusable_idxs: vec!(), active: vec!(), data: ComponentData::new(), families: F::new(), } } /// Creates a new entity, assigning it an unused ID, returning that ID for further use. pub fn new_entity(&mut self) -> Entity { let idx = match self.reusable_idxs.pop() { None => { let idx = self.next_idx; self.next_idx += 1; idx } Some(idx) => idx, }; // Extend the vec if the idx is bigger. if self.active.len() <= idx { let padding = idx + 1 - self.active.len(); self.active.extend(iter::repeat(false).take(padding)); debug_assert!(self.active.len() == idx+1); } debug_assert!(!self.active[idx]); self.active[idx] = true; let ent = idx as Entity; self.data.create_component_data_for(ent); ent } /// Deletes the entity, removes all data related to it. /// /// Returns a list of events that were related to it, in case you need to do some clean up with them. pub fn delete_entity<Event>(&mut self, ent: Entity, events: &mut EventManager<Event>) -> Vec<Event> where Event: event::EventDataHolder { self.delete_entity_ignore_events(ent); events.clear_events_for(ent) } pub fn delete_entity_ignore_events(&mut self, ent: Entity) { let idx = ent as usize; assert!(self.active[idx]); self.reusable_idxs.push(idx); self.active[idx] = false; self.data.delete_component_data_for(ent); } pub fn build_ent<'a, A,B: EventDataHolder>(&'a mut self, ent: Entity, processor: &'a mut BehaviorManager<A,B>) -> EntityBuilder<D,A,B> { EntityBuilder::new(ent, processor, &mut self.data, self.families.all_families()) } /// Adds the specified component to the entity. pub fn add_component_to<A,B:EventDataHolder,C: Component<D>>(&mut self, e: Entity, c: C, processor: &mut BehaviorManager<A,B>) { //c.add_to(e, &mut self.data); self.build_ent(e, processor).add_component(c).finalize(); } } /// Used by `EntityManager` to add components to an Entity. /// /// An object of this type is obtained by calling `add_component` from an EntityManager pub struct EntityBuilder<'a, EntData: 'a + EntityDataHolder, T: 'a, Ev: event::EventDataHolder + 'a> { data: &'a mut ComponentData<EntData>, families: &'a FamilyMap, processor: &'a mut BehaviorManager<T,Ev>, ent: Entity, } impl<'a, EntData: 'a + EntityDataHolder, T, Ev: event::EventDataHolder> EntityBuilder<'a, EntData, T, Ev> { pub fn new(ent: Entity, processor: &'a mut BehaviorManager<T,Ev>, data: &'a mut ComponentData<EntData>, families: &'a FamilyMap) -> EntityBuilder<'a, EntData, T, Ev> { EntityBuilder { data: data, families: families, processor: processor, ent: ent, } } pub fn add_component<Comp: Component<EntData>>(self, comp: Comp) -> EntityBuilder<'a, EntData, T, Ev> { comp.add_to(self.ent, self.data); self } pub fn finalize(mut self) -> Entity { self.add_all_related_data(); self.ent } pub fn add_all_related_data(&mut self) { let mut families: Vec<&str> = self.data[self.ent].match_families(self.families); families.sort(); families.dedup(); // Clean up current component data, self.data.clear_family_data_for(self.ent); // Give the ComponentDataHolder information about this entities families. for family in families.iter() { self.data.set_family_relation(family, self.ent); } if !self.processor.valid_behaviors_for(families.clone()).is_empty() { self.processor.add_processable(self.ent); } // Give this EntityDataHolder a list of which families this entity has. self.data[self.ent].set_families(families); } } /* fn main() { println!("Hello, world!"); let mut manager = EntityManager::new(); let ent = manager.new_entity(); manager.add_component_to(ent, Position{x:1, y:2}); println!("pos: {:?}", manager.data[ent].position.x); manager.data[ent].position.x += 5; println!("pos: {:?}", manager.data[ent].position.x); println!("has glyph? {:?}", manager.data[ent].glyph.has_it()); } */
delete_component_data_for
identifier_name
lib.rs
//! An Entity Component System for game development. //! //! Currently used for personal use (for a roguelike game), this library is highly unstable, and a WIP. #![allow(dead_code)] #![feature(append,drain)] use std::iter; use std::collections::HashMap; use std::ops::{Index, IndexMut}; use std::collections::hash_map::Entry::{Occupied, Vacant}; pub mod component_presence; pub mod family; pub mod builder; pub mod event; pub mod behavior; use family::{FamilyDataHolder, FamilyMap}; use event::{EventDataHolder}; pub use event::EventManager; pub use behavior::BehaviorManager; pub use behavior::Behavior; pub use component_presence::ComponentPresence; /// Type Entity is simply an ID used as indexes. pub type Entity = u32; /// The components macro defines all the structs and traits that manage /// the component part of the ECS. #[macro_export] macro_rules! components { ($data:ident: $([$access:ident, $ty:ty]),+ ) => { use $crate::component_presence::ComponentPresence; use $crate::component_presence::ComponentPresence::*; use $crate::{EntityDataHolder, Component, Entity, ComponentData}; use $crate::family::{FamilyMap}; use std::fmt; pub struct $data { pub components: Vec<&'static str>, pub families: Vec<&'static str>, $( pub $access: ComponentPresence<$ty>, )+ } impl $data { pub fn new_empty() -> $data { $data { components: Vec::new(), families: Vec::new(), $( $access: Lacks, )+ } } } impl fmt::Debug for $data { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut b = fmt.debug_struct("EntityData"); b.field("has components", &self.components); b.field("belongs to families", &self.families); /*$( if self.$access.has_it() { b.field(stringify!($access), &self.$access); } )+*/ b.finish() } } impl EntityDataHolder for $data { fn new() -> Self { $data::new_empty() } fn match_families(&self, families: &FamilyMap) -> Vec<&'static str> { let mut v: Vec<&str> = vec!(); // Tuple has the requirements/forbidden vectors for (family, tuple) in families { if $crate::family::matcher(tuple, &self.components) { v.push(family) } } v } fn set_families(&mut self, families: Vec<&'static str>) { self.families = families; } fn belongs_to_family(&self, family: &str) -> bool { self.families.contains(&family) } fn families(&self) -> Vec<&'static str> { self.families.clone() } } $( impl Component<$data> for $ty { fn add_to(self, ent: Entity, data: &mut ComponentData<$data>) { let ent_data: &mut $data = data.components.get_mut(&ent).expect("no entity"); ent_data.components.push(stringify!($access)); ent_data.$access = Has(self); } } )+ } } /// This is a marker trait to be used by the `components!` macro. /// /// This trait is implemented by `EntityData` which is a struct generated /// by the `components!` macro. /// /// `EntityData` will be of the form: /// /// ``` /// struct EntityData { /// component1: ComponentPresence<Component1>, /// component2: ComponentPresence<Component2>, /// //etc... /// } /// ``` /// /// So it will have one field per component defined in the call to `components!` /// You'll access these fields directly when indexing the `data` field of the `EntityManager` pub trait EntityDataHolder { fn new() -> Self; /// Takes a map of all the defined families, /// and returns the families that match this entity. fn match_families(&self, &FamilyMap) -> Vec<&'static str>; /// Sets the families this entity belongs to to `families` fn set_families(&mut self, Vec<&'static str>); fn belongs_to_family(&self, &'static str) -> bool; /// Gets the known families this ent belongs to. fn families(&self) -> Vec<&'static str>; } /// ComponentData knows which entities have which components. pub struct ComponentData<D: EntityDataHolder> { /// components holds the components owned by a certain entity. pub components: HashMap<Entity, D>, /// Family to list of entities. pub families: HashMap<&'static str, Vec<Entity>>, } /// This trait marks a struct as a component. (Automatically handled by macro `components!`) /// /// It should implement the `add_to` function, which is automatically generated /// by the `components!` macro. pub trait Component<D: EntityDataHolder> { /// Adds self to the specified entity. Called by the `EntityManager` fn add_to(self, ent: Entity, data: &mut ComponentData<D>); } impl<D: EntityDataHolder> ComponentData<D> { pub fn new() -> ComponentData<D> { ComponentData { components: HashMap::new(), families: HashMap::new(), } } pub fn get(&self, ent: &Entity) -> Option<&D> { self.components.get(ent) } pub fn get_mut(&mut self, ent: &Entity) -> Option<&mut D> { self.components.get_mut(ent) } pub fn create_component_data_for(&mut self, ent: Entity) { self.components.insert(ent, D::new()); } pub fn clear_family_data_for(&mut self, ent: Entity) { for family in self[ent].families() { self.remove_from_family(family, ent); debug_assert!(!self.families[family].contains(&ent)) } } pub fn delete_component_data_for(&mut self, ent: Entity) { self.clear_family_data_for(ent); self.components.remove(&ent); } fn remove_from_family(&mut self, family: &str, ent: Entity) { let mut idx: Option<usize> = None; { let vec = self.families.get_mut(family).expect("No such family"); let op = vec.iter().enumerate().find(|&(_,e)| *e == ent); idx = Some(op.expect("Entity not found in this family").0); } if let Some(idx) = idx { self.families.get_mut(family).unwrap().swap_remove(idx); } else { panic!("Entity not found for family"); } } pub fn set_family_relation(&mut self, family: &'static str, ent: Entity) { match self.families.entry(family) { Vacant(entry) => {entry.insert(vec!(ent));}, Occupied(entry) => { let v = entry.into_mut(); if v.contains(&ent) { return; } v.push(ent); }, } } pub fn members_of(&self, family: &'static str) -> Vec<Entity> { match self.families.get(family) { Some(vec) => vec.clone(), None => vec!(), } } pub fn any_member_of(&self, family: &'static str) -> bool { !self.families.get(family).expect("no such family").is_empty() } } impl<D: EntityDataHolder> Index<Entity> for ComponentData<D> { type Output = D; fn index(&self, index: Entity) -> &D { &self.components.get(&index).expect(&format!("no entity {:?}", index)) } } impl<D: EntityDataHolder> IndexMut<Entity> for ComponentData<D> { fn index_mut(&mut self, index: Entity) -> &mut D { self.components.get_mut(&index).expect("no entity") } } /// The `EntityManager` type manages all the entities. /// /// It is in charge of creating and destroying entities. /// It also takes care of adding or removing components, through the `ComponentData` it contains. /// /// # Examples /// /// Creating a new manager, and adding some (predefined) components to a new entity. /// /// ``` /// let mut manager = EntityManager::new(); /// let ent = manager.new_entity(); /// manager.add_component_to(ent, Position{x: 1, y: 2}); /// ``` pub struct EntityManager<D: EntityDataHolder, F: FamilyDataHolder> { next_idx: usize, reusable_idxs: Vec<usize>, active: Vec<bool>, pub data: ComponentData<D>, /// Contains a list of all defined families, along with its requirements. families: F, } impl<D: EntityDataHolder, F: FamilyDataHolder> EntityManager<D, F> { /// Creates a new EntityManager pub fn new() -> EntityManager<D, F> { EntityManager{ next_idx: 0, reusable_idxs: vec!(), active: vec!(), data: ComponentData::new(), families: F::new(), } } /// Creates a new entity, assigning it an unused ID, returning that ID for further use. pub fn new_entity(&mut self) -> Entity { let idx = match self.reusable_idxs.pop() { None => { let idx = self.next_idx; self.next_idx += 1; idx } Some(idx) => idx, }; // Extend the vec if the idx is bigger. if self.active.len() <= idx { let padding = idx + 1 - self.active.len(); self.active.extend(iter::repeat(false).take(padding)); debug_assert!(self.active.len() == idx+1); } debug_assert!(!self.active[idx]); self.active[idx] = true; let ent = idx as Entity; self.data.create_component_data_for(ent); ent } /// Deletes the entity, removes all data related to it. /// /// Returns a list of events that were related to it, in case you need to do some clean up with them. pub fn delete_entity<Event>(&mut self, ent: Entity, events: &mut EventManager<Event>) -> Vec<Event> where Event: event::EventDataHolder { self.delete_entity_ignore_events(ent); events.clear_events_for(ent) } pub fn delete_entity_ignore_events(&mut self, ent: Entity) { let idx = ent as usize; assert!(self.active[idx]); self.reusable_idxs.push(idx); self.active[idx] = false; self.data.delete_component_data_for(ent); } pub fn build_ent<'a, A,B: EventDataHolder>(&'a mut self, ent: Entity, processor: &'a mut BehaviorManager<A,B>) -> EntityBuilder<D,A,B> { EntityBuilder::new(ent, processor, &mut self.data, self.families.all_families()) } /// Adds the specified component to the entity. pub fn add_component_to<A,B:EventDataHolder,C: Component<D>>(&mut self, e: Entity, c: C, processor: &mut BehaviorManager<A,B>) { //c.add_to(e, &mut self.data); self.build_ent(e, processor).add_component(c).finalize(); } } /// Used by `EntityManager` to add components to an Entity. /// /// An object of this type is obtained by calling `add_component` from an EntityManager pub struct EntityBuilder<'a, EntData: 'a + EntityDataHolder, T: 'a, Ev: event::EventDataHolder + 'a> { data: &'a mut ComponentData<EntData>, families: &'a FamilyMap, processor: &'a mut BehaviorManager<T,Ev>, ent: Entity, } impl<'a, EntData: 'a + EntityDataHolder, T, Ev: event::EventDataHolder> EntityBuilder<'a, EntData, T, Ev> { pub fn new(ent: Entity, processor: &'a mut BehaviorManager<T,Ev>, data: &'a mut ComponentData<EntData>, families: &'a FamilyMap) -> EntityBuilder<'a, EntData, T, Ev> { EntityBuilder { data: data, families: families, processor: processor, ent: ent, } } pub fn add_component<Comp: Component<EntData>>(self, comp: Comp) -> EntityBuilder<'a, EntData, T, Ev> { comp.add_to(self.ent, self.data); self } pub fn finalize(mut self) -> Entity { self.add_all_related_data(); self.ent } pub fn add_all_related_data(&mut self) { let mut families: Vec<&str> = self.data[self.ent].match_families(self.families); families.sort(); families.dedup(); // Clean up current component data, self.data.clear_family_data_for(self.ent); // Give the ComponentDataHolder information about this entities families. for family in families.iter() { self.data.set_family_relation(family, self.ent); } if !self.processor.valid_behaviors_for(families.clone()).is_empty()
// Give this EntityDataHolder a list of which families this entity has. self.data[self.ent].set_families(families); } } /* fn main() { println!("Hello, world!"); let mut manager = EntityManager::new(); let ent = manager.new_entity(); manager.add_component_to(ent, Position{x:1, y:2}); println!("pos: {:?}", manager.data[ent].position.x); manager.data[ent].position.x += 5; println!("pos: {:?}", manager.data[ent].position.x); println!("has glyph? {:?}", manager.data[ent].glyph.has_it()); } */
{ self.processor.add_processable(self.ent); }
conditional_block
lib.rs
//! An Entity Component System for game development. //! //! Currently used for personal use (for a roguelike game), this library is highly unstable, and a WIP. #![allow(dead_code)] #![feature(append,drain)] use std::iter; use std::collections::HashMap; use std::ops::{Index, IndexMut}; use std::collections::hash_map::Entry::{Occupied, Vacant}; pub mod component_presence; pub mod family; pub mod builder; pub mod event; pub mod behavior; use family::{FamilyDataHolder, FamilyMap}; use event::{EventDataHolder}; pub use event::EventManager; pub use behavior::BehaviorManager; pub use behavior::Behavior; pub use component_presence::ComponentPresence; /// Type Entity is simply an ID used as indexes. pub type Entity = u32; /// The components macro defines all the structs and traits that manage /// the component part of the ECS. #[macro_export] macro_rules! components { ($data:ident: $([$access:ident, $ty:ty]),+ ) => { use $crate::component_presence::ComponentPresence; use $crate::component_presence::ComponentPresence::*; use $crate::{EntityDataHolder, Component, Entity, ComponentData}; use $crate::family::{FamilyMap}; use std::fmt; pub struct $data { pub components: Vec<&'static str>, pub families: Vec<&'static str>, $( pub $access: ComponentPresence<$ty>, )+ } impl $data { pub fn new_empty() -> $data { $data { components: Vec::new(), families: Vec::new(), $( $access: Lacks, )+ } } } impl fmt::Debug for $data { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut b = fmt.debug_struct("EntityData"); b.field("has components", &self.components); b.field("belongs to families", &self.families);
if self.$access.has_it() { b.field(stringify!($access), &self.$access); } )+*/ b.finish() } } impl EntityDataHolder for $data { fn new() -> Self { $data::new_empty() } fn match_families(&self, families: &FamilyMap) -> Vec<&'static str> { let mut v: Vec<&str> = vec!(); // Tuple has the requirements/forbidden vectors for (family, tuple) in families { if $crate::family::matcher(tuple, &self.components) { v.push(family) } } v } fn set_families(&mut self, families: Vec<&'static str>) { self.families = families; } fn belongs_to_family(&self, family: &str) -> bool { self.families.contains(&family) } fn families(&self) -> Vec<&'static str> { self.families.clone() } } $( impl Component<$data> for $ty { fn add_to(self, ent: Entity, data: &mut ComponentData<$data>) { let ent_data: &mut $data = data.components.get_mut(&ent).expect("no entity"); ent_data.components.push(stringify!($access)); ent_data.$access = Has(self); } } )+ } } /// This is a marker trait to be used by the `components!` macro. /// /// This trait is implemented by `EntityData` which is a struct generated /// by the `components!` macro. /// /// `EntityData` will be of the form: /// /// ``` /// struct EntityData { /// component1: ComponentPresence<Component1>, /// component2: ComponentPresence<Component2>, /// //etc... /// } /// ``` /// /// So it will have one field per component defined in the call to `components!` /// You'll access these fields directly when indexing the `data` field of the `EntityManager` pub trait EntityDataHolder { fn new() -> Self; /// Takes a map of all the defined families, /// and returns the families that match this entity. fn match_families(&self, &FamilyMap) -> Vec<&'static str>; /// Sets the families this entity belongs to to `families` fn set_families(&mut self, Vec<&'static str>); fn belongs_to_family(&self, &'static str) -> bool; /// Gets the known families this ent belongs to. fn families(&self) -> Vec<&'static str>; } /// ComponentData knows which entities have which components. pub struct ComponentData<D: EntityDataHolder> { /// components holds the components owned by a certain entity. pub components: HashMap<Entity, D>, /// Family to list of entities. pub families: HashMap<&'static str, Vec<Entity>>, } /// This trait marks a struct as a component. (Automatically handled by macro `components!`) /// /// It should implement the `add_to` function, which is automatically generated /// by the `components!` macro. pub trait Component<D: EntityDataHolder> { /// Adds self to the specified entity. Called by the `EntityManager` fn add_to(self, ent: Entity, data: &mut ComponentData<D>); } impl<D: EntityDataHolder> ComponentData<D> { pub fn new() -> ComponentData<D> { ComponentData { components: HashMap::new(), families: HashMap::new(), } } pub fn get(&self, ent: &Entity) -> Option<&D> { self.components.get(ent) } pub fn get_mut(&mut self, ent: &Entity) -> Option<&mut D> { self.components.get_mut(ent) } pub fn create_component_data_for(&mut self, ent: Entity) { self.components.insert(ent, D::new()); } pub fn clear_family_data_for(&mut self, ent: Entity) { for family in self[ent].families() { self.remove_from_family(family, ent); debug_assert!(!self.families[family].contains(&ent)) } } pub fn delete_component_data_for(&mut self, ent: Entity) { self.clear_family_data_for(ent); self.components.remove(&ent); } fn remove_from_family(&mut self, family: &str, ent: Entity) { let mut idx: Option<usize> = None; { let vec = self.families.get_mut(family).expect("No such family"); let op = vec.iter().enumerate().find(|&(_,e)| *e == ent); idx = Some(op.expect("Entity not found in this family").0); } if let Some(idx) = idx { self.families.get_mut(family).unwrap().swap_remove(idx); } else { panic!("Entity not found for family"); } } pub fn set_family_relation(&mut self, family: &'static str, ent: Entity) { match self.families.entry(family) { Vacant(entry) => {entry.insert(vec!(ent));}, Occupied(entry) => { let v = entry.into_mut(); if v.contains(&ent) { return; } v.push(ent); }, } } pub fn members_of(&self, family: &'static str) -> Vec<Entity> { match self.families.get(family) { Some(vec) => vec.clone(), None => vec!(), } } pub fn any_member_of(&self, family: &'static str) -> bool { !self.families.get(family).expect("no such family").is_empty() } } impl<D: EntityDataHolder> Index<Entity> for ComponentData<D> { type Output = D; fn index(&self, index: Entity) -> &D { &self.components.get(&index).expect(&format!("no entity {:?}", index)) } } impl<D: EntityDataHolder> IndexMut<Entity> for ComponentData<D> { fn index_mut(&mut self, index: Entity) -> &mut D { self.components.get_mut(&index).expect("no entity") } } /// The `EntityManager` type manages all the entities. /// /// It is in charge of creating and destroying entities. /// It also takes care of adding or removing components, through the `ComponentData` it contains. /// /// # Examples /// /// Creating a new manager, and adding some (predefined) components to a new entity. /// /// ``` /// let mut manager = EntityManager::new(); /// let ent = manager.new_entity(); /// manager.add_component_to(ent, Position{x: 1, y: 2}); /// ``` pub struct EntityManager<D: EntityDataHolder, F: FamilyDataHolder> { next_idx: usize, reusable_idxs: Vec<usize>, active: Vec<bool>, pub data: ComponentData<D>, /// Contains a list of all defined families, along with its requirements. families: F, } impl<D: EntityDataHolder, F: FamilyDataHolder> EntityManager<D, F> { /// Creates a new EntityManager pub fn new() -> EntityManager<D, F> { EntityManager{ next_idx: 0, reusable_idxs: vec!(), active: vec!(), data: ComponentData::new(), families: F::new(), } } /// Creates a new entity, assigning it an unused ID, returning that ID for further use. pub fn new_entity(&mut self) -> Entity { let idx = match self.reusable_idxs.pop() { None => { let idx = self.next_idx; self.next_idx += 1; idx } Some(idx) => idx, }; // Extend the vec if the idx is bigger. if self.active.len() <= idx { let padding = idx + 1 - self.active.len(); self.active.extend(iter::repeat(false).take(padding)); debug_assert!(self.active.len() == idx+1); } debug_assert!(!self.active[idx]); self.active[idx] = true; let ent = idx as Entity; self.data.create_component_data_for(ent); ent } /// Deletes the entity, removes all data related to it. /// /// Returns a list of events that were related to it, in case you need to do some clean up with them. pub fn delete_entity<Event>(&mut self, ent: Entity, events: &mut EventManager<Event>) -> Vec<Event> where Event: event::EventDataHolder { self.delete_entity_ignore_events(ent); events.clear_events_for(ent) } pub fn delete_entity_ignore_events(&mut self, ent: Entity) { let idx = ent as usize; assert!(self.active[idx]); self.reusable_idxs.push(idx); self.active[idx] = false; self.data.delete_component_data_for(ent); } pub fn build_ent<'a, A,B: EventDataHolder>(&'a mut self, ent: Entity, processor: &'a mut BehaviorManager<A,B>) -> EntityBuilder<D,A,B> { EntityBuilder::new(ent, processor, &mut self.data, self.families.all_families()) } /// Adds the specified component to the entity. pub fn add_component_to<A,B:EventDataHolder,C: Component<D>>(&mut self, e: Entity, c: C, processor: &mut BehaviorManager<A,B>) { //c.add_to(e, &mut self.data); self.build_ent(e, processor).add_component(c).finalize(); } } /// Used by `EntityManager` to add components to an Entity. /// /// An object of this type is obtained by calling `add_component` from an EntityManager pub struct EntityBuilder<'a, EntData: 'a + EntityDataHolder, T: 'a, Ev: event::EventDataHolder + 'a> { data: &'a mut ComponentData<EntData>, families: &'a FamilyMap, processor: &'a mut BehaviorManager<T,Ev>, ent: Entity, } impl<'a, EntData: 'a + EntityDataHolder, T, Ev: event::EventDataHolder> EntityBuilder<'a, EntData, T, Ev> { pub fn new(ent: Entity, processor: &'a mut BehaviorManager<T,Ev>, data: &'a mut ComponentData<EntData>, families: &'a FamilyMap) -> EntityBuilder<'a, EntData, T, Ev> { EntityBuilder { data: data, families: families, processor: processor, ent: ent, } } pub fn add_component<Comp: Component<EntData>>(self, comp: Comp) -> EntityBuilder<'a, EntData, T, Ev> { comp.add_to(self.ent, self.data); self } pub fn finalize(mut self) -> Entity { self.add_all_related_data(); self.ent } pub fn add_all_related_data(&mut self) { let mut families: Vec<&str> = self.data[self.ent].match_families(self.families); families.sort(); families.dedup(); // Clean up current component data, self.data.clear_family_data_for(self.ent); // Give the ComponentDataHolder information about this entities families. for family in families.iter() { self.data.set_family_relation(family, self.ent); } if !self.processor.valid_behaviors_for(families.clone()).is_empty() { self.processor.add_processable(self.ent); } // Give this EntityDataHolder a list of which families this entity has. self.data[self.ent].set_families(families); } } /* fn main() { println!("Hello, world!"); let mut manager = EntityManager::new(); let ent = manager.new_entity(); manager.add_component_to(ent, Position{x:1, y:2}); println!("pos: {:?}", manager.data[ent].position.x); manager.data[ent].position.x += 5; println!("pos: {:?}", manager.data[ent].position.x); println!("has glyph? {:?}", manager.data[ent].glyph.has_it()); } */
/*$(
random_line_split